python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_array_almost_equal
from keras.layers import Input, Lambda
from keras.models import Model
from deep_qa.layers.wrappers import TimeDistributed
from deep_qa.testing.test_case import DeepQaTestCase
class TestTimeDistributed(DeepQaTestCase):
def test_handles_multiple_inputs(self):
input_layer_1 = Input(shape=(3, 1), dtype='int32')
input_layer_2 = Input(shape=(3, 1), dtype='int32')
combine_layer = Lambda(lambda x: x[0] ** x[1] + 1,
output_shape=lambda x: (x[0][0], 1),
name="a^b + 1 Layer")
td_combine = TimeDistributed(combine_layer)
output = td_combine([input_layer_1, input_layer_2])
model = Model([input_layer_1, input_layer_2], output)
batch_input_1 = numpy.array([[[4], [5], [6]],
[[3], [3], [3]],
[[0], [1], [2]]], dtype='float32')
batch_input_2 = numpy.array([[[3], [2], [1]],
[[1], [2], [3]],
[[1], [0], [2]]], dtype='float32')
expected_result = (batch_input_1 ** batch_input_2 + 1)
# In TimeDistributed, we reshape tensors whose final dimension is 1, so we need to do that here.
if numpy.shape(expected_result)[-1] == 1:
expected_result = numpy.reshape(expected_result, numpy.shape(expected_result)[:-1])
result = model.predict([batch_input_1, batch_input_2])
assert_array_almost_equal(result, expected_result)
| deep_qa-master | tests/layers/wrappers/time_distributed_test.py |
deep_qa-master | tests/layers/wrappers/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import numpy
from deep_qa.layers.encoders import BOWEncoder
from deep_qa.layers.wrappers import EncoderWrapper, OutputMask
from deep_qa.testing.test_case import DeepQaTestCase
from deep_qa.training.models import DeepQaModel
from keras.layers import Embedding, Input
class TestEncoderWrapper(DeepQaTestCase):
def test_mask_is_computed_correctly(self):
background_input = Input(shape=(3, 3), dtype='int32')
embedding = Embedding(input_dim=3, output_dim=2, mask_zero=True)
embedded_background = embedding(background_input)
encoded_background = EncoderWrapper(BOWEncoder(units=2))(embedded_background)
mask_output = OutputMask()(encoded_background)
model = DeepQaModel(inputs=[background_input], outputs=mask_output)
test_background = numpy.asarray([
[
[0, 0, 0],
[2, 2, 2],
[0, 0, 0],
]
])
expected_mask = numpy.asarray([[0, 1, 0]])
actual_mask = model.predict([test_background])
numpy.testing.assert_array_almost_equal(expected_mask, actual_mask)
| deep_qa-master | tests/layers/wrappers/encoder_wrapper_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras import backend as K
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.attention.masked_softmax import MaskedSoftmax
class TestMaskedSoftmaxLayer:
def test_call_works_with_no_mask(self):
batch_size = 1
num_options = 4
options_input = Input(shape=(num_options,), dtype='float32')
softmax_result = MaskedSoftmax()(options_input)
model = Model(inputs=[options_input], outputs=[softmax_result])
options_tensor = numpy.asarray([[2, 4, 0, 1]])
softmax_tensor = model.predict([options_tensor])
assert softmax_tensor.shape == (batch_size, num_options)
numpy.testing.assert_almost_equal(softmax_tensor,
[[0.112457, 0.830953, 0.015219, 0.041371]],
decimal=5)
def test_call_handles_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 3
num_options = 4
options_input = Input(shape=(length_1, length_2, num_options,), dtype='float32')
softmax_result = MaskedSoftmax()(options_input)
model = Model(inputs=[options_input], outputs=[softmax_result])
options_tensor = numpy.zeros((batch_size, length_1, length_2, num_options))
for i in range(length_1):
for j in range(length_2):
options_tensor[0, i, j] = [2, 4, 0, 1]
softmax_tensor = model.predict([options_tensor])
assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
for i in range(length_1):
for j in range(length_2):
numpy.testing.assert_almost_equal(softmax_tensor[0, i, j],
[0.112457, 0.830953, 0.015219, 0.041371],
decimal=5)
def test_call_handles_masking_properly(self):
options = K.variable(numpy.asarray([[2, 4, 0, 1]]))
mask = K.variable(numpy.asarray([[1, 0, 1, 1]]))
softmax = K.eval(MaskedSoftmax().call(options, mask=mask))
assert softmax.shape == (1, 4)
numpy.testing.assert_almost_equal(softmax, [[0.66524096, 0, 0.09003057, 0.24472847]])
| deep_qa-master | tests/layers/attention/masked_softmax_test.py |
deep_qa-master | tests/layers/attention/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import keras.backend as K
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.attention import GatedAttention
class TestGatedAttentionLayer:
def test_multiplication(self):
document_len = 3
question_len = 4
bigru_hidden_dim = 2
document_input = Input(shape=(document_len, bigru_hidden_dim,),
dtype='float32',
name="document_input")
question_input = Input(shape=(question_len, bigru_hidden_dim,),
dtype='float32',
name="question_input")
attention_input = Input(shape=(document_len, question_len,),
dtype='float32',
name="attention_input")
gated_attention = GatedAttention()([document_input, question_input,
attention_input])
model = Model([document_input, question_input, attention_input],
gated_attention)
# Testing general non-batched case.
document = numpy.array([[[0.3, 0.1], [0.4, 0.2], [0.8, 0.1]]])
question = numpy.array([[[0.2, 0.6], [0.4, 0.3], [0.5, 0.7], [0.1, .6]]])
attention = numpy.array([[[0.3, 0.1, 0.5, 0.2],
[0.4, 0.2, 0.8, 0.7],
[0.8, 0.1, 0.6, 0.4]]])
result = model.predict([document, question, attention])
assert_almost_equal(result, numpy.array([[[0.111, 0.068],
[0.252, 0.256],
[0.432, 0.117]]]))
def test_masked_multiplication(self):
# test masked batch case
document = K.variable(numpy.array([[[0.3, 0.1], [0.4, 0.2], [0.8, 0.1]]]))
document_mask = K.variable(numpy.array([[1, 1, 0]]))
question = K.variable(numpy.array([[[0.2, 0.6], [0.4, 0.3], [0.5, 0.7],
[0.1, .6]]]))
attention = K.variable(numpy.array([[[0.3, 0.1, 0.5, 0.2],
[0.4, 0.2, 0.8, 0.7],
[0.8, 0.1, 0.6, 0.4]]]))
gated_attention = GatedAttention(gating_function="*")
result = K.eval(gated_attention([document, question, attention],
mask=[document_mask]))
assert_almost_equal(result, numpy.array([[[0.111, 0.068],
[0.252, 0.256],
[0.0, 0.0]]]))
def test_addition(self):
document_len = 3
question_len = 4
bigru_hidden_dim = 2
document_input = Input(shape=(document_len, bigru_hidden_dim,),
dtype='float32',
name="document_input")
question_input = Input(shape=(question_len, bigru_hidden_dim,),
dtype='float32',
name="question_input")
attention_input = Input(shape=(document_len, question_len,),
dtype='float32',
name="attention_input")
gated_attention = GatedAttention(gating_function="+")([document_input, question_input,
attention_input])
model = Model([document_input, question_input, attention_input],
gated_attention)
# Testing general non-batched case.
document = numpy.array([[[0.3, 0.1], [0.4, 0.2], [0.8, 0.1]]])
question = numpy.array([[[0.2, 0.6], [0.4, 0.3], [0.5, 0.7], [0.1, .6]]])
attention = numpy.array([[[0.3, 0.1, 0.5, 0.2],
[0.4, 0.2, 0.8, 0.7],
[0.8, 0.1, 0.6, 0.4]]])
result = model.predict([document, question, attention])
assert_almost_equal(result, numpy.array([[[0.67, 0.78],
[1.03, 1.48],
[1.34, 1.27]]]))
def test_masked_addition(self):
# test masked batch case
document = K.variable(numpy.array([[[0.3, 0.1], [0.4, 0.2], [0.8, 0.1]]]))
document_mask = K.variable(numpy.array([[1, 1, 0]]))
question = K.variable(numpy.array([[[0.2, 0.6], [0.4, 0.3], [0.5, 0.7],
[0.1, .6]]]))
attention = K.variable(numpy.array([[[0.3, 0.1, 0.5, 0.2],
[0.4, 0.2, 0.8, 0.7],
[0.8, 0.1, 0.6, 0.4]]]))
gated_attention = GatedAttention(gating_function="+")
result = K.eval(gated_attention([document, question, attention],
mask=[document_mask]))
assert_almost_equal(result, numpy.array([[[0.67, 0.78],
[1.03, 1.48],
[0.0, 0.0]]]))
def test_concatenation(self):
document_len = 3
question_len = 4
bigru_hidden_dim = 2
document_input = Input(shape=(document_len, bigru_hidden_dim,),
dtype='float32',
name="document_input")
question_input = Input(shape=(question_len, bigru_hidden_dim,),
dtype='float32',
name="question_input")
attention_input = Input(shape=(document_len, question_len,),
dtype='float32',
name="attention_input")
gated_attention = GatedAttention(gating_function="||")([document_input, question_input,
attention_input])
model = Model([document_input, question_input, attention_input],
gated_attention)
# Testing general non-batched case.
document = numpy.array([[[0.3, 0.1], [0.4, 0.2], [0.8, 0.1]]])
question = numpy.array([[[0.2, 0.6], [0.4, 0.3], [0.5, 0.7], [0.1, .6]]])
attention = numpy.array([[[0.3, 0.1, 0.5, 0.2],
[0.4, 0.2, 0.8, 0.7],
[0.8, 0.1, 0.6, 0.4]]])
result = model.predict([document, question, attention])
assert_almost_equal(result, numpy.array([[[0.37, 0.68, 0.3, 0.1],
[0.63, 1.28, 0.4, 0.2],
[0.54, 1.17, 0.8, 0.1]]]))
def test_masked_concatenation(self):
# test masked batch case
document = K.variable(numpy.array([[[0.3, 0.1], [0.4, 0.2], [0.8, 0.1]]]))
document_mask = K.variable(numpy.array([[1, 1, 0]]))
question = K.variable(numpy.array([[[0.2, 0.6], [0.4, 0.3], [0.5, 0.7],
[0.1, .6]]]))
attention = K.variable(numpy.array([[[0.3, 0.1, 0.5, 0.2],
[0.4, 0.2, 0.8, 0.7],
[0.8, 0.1, 0.6, 0.4]]]))
gated_attention = GatedAttention(gating_function="||")
result = K.eval(gated_attention([document, question, attention],
mask=[document_mask]))
assert_almost_equal(result, numpy.array([[[0.37, 0.68, 0.3, 0.1],
[0.63, 1.28, 0.4, 0.2],
[0.0, 0.0, 0.0, 0.0]]]))
| deep_qa-master | tests/layers/attention/gated_attention_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Embedding, Input
from keras.models import Model
from deep_qa.layers.attention import WeightedSum
class TestWeightedSumLayer:
def test_call_works_on_simple_input(self):
batch_size = 1
sentence_length = 5
embedding_dim = 4
matrix_input = Input(shape=(sentence_length, embedding_dim), dtype='float32')
attention_input = Input(shape=(sentence_length,), dtype='float32')
aggregated_vector = WeightedSum()([matrix_input, attention_input])
model = Model(inputs=[matrix_input, attention_input], outputs=[aggregated_vector])
sentence_tensor = numpy.random.rand(batch_size, sentence_length, embedding_dim)
attention_tensor = numpy.asarray([[.3, .4, .1, 0, 1.2]])
aggregated_tensor = model.predict([sentence_tensor, attention_tensor])
assert aggregated_tensor.shape == (batch_size, embedding_dim)
expected_tensor = (0.3 * sentence_tensor[0, 0] +
0.4 * sentence_tensor[0, 1] +
0.1 * sentence_tensor[0, 2] +
0.0 * sentence_tensor[0, 3] +
1.2 * sentence_tensor[0, 4])
numpy.testing.assert_almost_equal(aggregated_tensor, [expected_tensor], decimal=5)
def test_call_handles_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 6
length_3 = 2
embedding_dim = 4
matrix_input = Input(shape=(length_1, length_2, length_3, embedding_dim), dtype='float32')
attention_input = Input(shape=(length_1, length_2, length_3,), dtype='float32')
aggregated_vector = WeightedSum()([matrix_input, attention_input])
model = Model(inputs=[matrix_input, attention_input], outputs=[aggregated_vector])
sentence_tensor = numpy.random.rand(batch_size, length_1, length_2, length_3, embedding_dim)
attention_tensor = numpy.random.rand(batch_size, length_1, length_2, length_3)
aggregated_tensor = model.predict([sentence_tensor, attention_tensor])
assert aggregated_tensor.shape == (batch_size, length_1, length_2, embedding_dim)
expected_tensor = (attention_tensor[0, 3, 2, 0] * sentence_tensor[0, 3, 2, 0] +
attention_tensor[0, 3, 2, 1] * sentence_tensor[0, 3, 2, 1])
numpy.testing.assert_almost_equal(aggregated_tensor[0, 3, 2], expected_tensor, decimal=5)
def test_call_handles_uneven_higher_order_input(self):
batch_size = 1
length_1 = 5
length_2 = 6
length_3 = 2
embedding_dim = 4
matrix_input = Input(shape=(length_3, embedding_dim), dtype='float32')
attention_input = Input(shape=(length_1, length_2, length_3,), dtype='float32')
aggregated_vector = WeightedSum()([matrix_input, attention_input])
model = Model(inputs=[matrix_input, attention_input], outputs=[aggregated_vector])
sentence_tensor = numpy.random.rand(batch_size, length_3, embedding_dim)
attention_tensor = numpy.random.rand(batch_size, length_1, length_2, length_3)
aggregated_tensor = model.predict([sentence_tensor, attention_tensor])
assert aggregated_tensor.shape == (batch_size, length_1, length_2, embedding_dim)
for i in range(length_1):
for j in range(length_2):
expected_tensor = (attention_tensor[0, i, j, 0] * sentence_tensor[0, 0] +
attention_tensor[0, i, j, 1] * sentence_tensor[0, 1])
numpy.testing.assert_almost_equal(aggregated_tensor[0, i, j], expected_tensor,
decimal=5)
def test_call_handles_masking_properly(self):
batch_size = 1
vocab_size = 4
sentence_length = 5
embedding_dim = 4
embedding_weights = numpy.random.rand(vocab_size, embedding_dim)
embedding = Embedding(vocab_size, embedding_dim, weights=[embedding_weights], mask_zero=True)
sentence_input = Input(shape=(sentence_length,), dtype='int32')
sentence_embedding = embedding(sentence_input)
attention_input = Input(shape=(sentence_length,), dtype='float32')
aggregated_vector = WeightedSum()([sentence_embedding, attention_input])
model = Model(inputs=[sentence_input, attention_input], outputs=[aggregated_vector])
sentence_tensor = numpy.asarray([[1, 3, 2, 1, 0]])
attention_tensor = numpy.asarray([[.3, .4, .1, 0, 1.2]])
aggregated_tensor = model.predict([sentence_tensor, attention_tensor])
assert aggregated_tensor.shape == (batch_size, embedding_dim)
expected_tensor = (0.3 * embedding_weights[1] +
0.4 * embedding_weights[3] +
0.1 * embedding_weights[2] +
0.0 * embedding_weights[1] +
0.0 * embedding_weights[0]) # this one is 0 because of masking
numpy.testing.assert_almost_equal(aggregated_tensor, [expected_tensor], decimal=5)
| deep_qa-master | tests/layers/attention/weighted_sum_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_allclose
from keras.layers import Dense, Embedding, Input
from keras.models import Model, load_model
from deep_qa.layers.attention import MatrixAttention
from deep_qa.layers.wrappers import OutputMask
from deep_qa.testing.test_case import DeepQaTestCase
class TestMatrixAttentionLayer(DeepQaTestCase):
def test_call_works_on_simple_input(self):
sentence_1_length = 2
sentence_2_length = 3
embedding_dim = 3
sentence_1_embedding = Input(shape=(sentence_1_length, embedding_dim), dtype='float32')
sentence_2_embedding = Input(shape=(sentence_2_length, embedding_dim,), dtype='float32')
attention_layer = MatrixAttention()
attention = attention_layer([sentence_1_embedding, sentence_2_embedding])
model = Model(inputs=[sentence_1_embedding, sentence_2_embedding], outputs=[attention])
sentence_1_tensor = numpy.asarray([[[1, 1, 1], [-1, 0, 1]]])
sentence_2_tensor = numpy.asarray([[[1, 1, 1], [-1, 0, 1], [-1, -1, -1]]])
attention_tensor = model.predict([sentence_1_tensor, sentence_2_tensor])
assert attention_tensor.shape == (1, sentence_1_length, sentence_2_length)
assert_allclose(attention_tensor, [[[3, 0, -3], [0, 2, 0]]])
def test_model_loads_correctly(self):
sentence_1_length = 2
sentence_2_length = 3
embedding_dim = 3
sentence_1_embedding = Input(shape=(sentence_1_length, embedding_dim), dtype='float32')
sentence_2_embedding = Input(shape=(sentence_2_length, embedding_dim,), dtype='float32')
similarity_function_params = {'type': 'linear', 'combination': 'x,y,x*y'}
attention_layer = MatrixAttention(similarity_function=similarity_function_params)
attention = attention_layer([sentence_1_embedding, sentence_2_embedding])
attention = Dense(2)(attention)
model = Model(inputs=[sentence_1_embedding, sentence_2_embedding], outputs=[attention])
sentence_1_tensor = numpy.asarray([[[1, 1, 1], [-1, 0, 1]]])
sentence_2_tensor = numpy.asarray([[[1, 1, 1], [-1, 0, 1], [-1, -1, -1]]])
model_file = self.TEST_DIR + "model.tmp"
before_loading = model.predict([sentence_1_tensor, sentence_2_tensor])
model.save(model_file)
model = load_model(model_file, # pylint: disable=redefined-variable-type
custom_objects={'MatrixAttention': MatrixAttention})
after_loading = model.predict([sentence_1_tensor, sentence_2_tensor])
assert_allclose(before_loading, after_loading)
def test_call_handles_masking_properly(self):
sentence_length = 4
vocab_size = 4
embedding_dim = 3
embedding_weights = numpy.asarray([[0, 0, 0], [1, 1, 1], [-1, 0, 1], [-1, -1, 0]])
embedding = Embedding(vocab_size, embedding_dim, weights=[embedding_weights], mask_zero=True)
sentence_1_input = Input(shape=(sentence_length,), dtype='int32')
sentence_2_input = Input(shape=(sentence_length,), dtype='int32')
sentence_1_embedding = embedding(sentence_1_input)
sentence_2_embedding = embedding(sentence_2_input)
attention_layer = MatrixAttention()
attention = attention_layer([sentence_1_embedding, sentence_2_embedding])
attention_mask = OutputMask()(attention)
model = Model(inputs=[sentence_1_input, sentence_2_input], outputs=[attention, attention_mask])
sentence_1_tensor = numpy.asarray([[0, 0, 1, 3]])
sentence_2_tensor = numpy.asarray([[0, 1, 0, 2]])
attention_tensor, attention_mask = model.predict([sentence_1_tensor, sentence_2_tensor])
expected_attention = numpy.asarray([[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 3, 0, 0],
[0, -2, 0, 1]]])
expected_mask = numpy.asarray([[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1]]])
assert_allclose(attention_tensor, expected_attention)
assert_allclose(attention_mask, expected_mask)
| deep_qa-master | tests/layers/attention/matrix_attention_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
from keras.layers import Embedding, Input
from keras.models import Model
import keras.backend as K
from deep_qa.layers.attention import Attention
from deep_qa.layers.wrappers import OutputMask
class TestAttentionLayer:
def test_no_mask(self):
vector_length = 3
matrix_num_rows = 2
vector_input = Input(shape=(vector_length,),
dtype='float32',
name="vector_input")
matrix_input = Input(shape=(matrix_num_rows, vector_length),
dtype='float32',
name="matrix_input")
similarity_softmax = Attention()([vector_input, matrix_input])
model = Model([vector_input, matrix_input],
similarity_softmax)
# Testing general non-batched case.
vector = numpy.array([[0.3, 0.1, 0.5]])
matrix = numpy.array([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2]]])
result = model.predict([vector, matrix])
assert_almost_equal(result, numpy.array([[0.52871835, 0.47128162]]))
# Testing non-batched case where inputs are all 0s.
vector = numpy.array([[0, 0, 0]])
matrix = numpy.array([[[0, 0, 0], [0, 0, 0]]])
result = model.predict([vector, matrix])
assert_almost_equal(result, numpy.array([[0.5, 0.5]]))
def test_masked(self):
# Testing general masked non-batched case.
vector = K.variable(numpy.array([[0.3, 0.1, 0.5]]))
matrix = K.variable(numpy.array([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.1, 0.4, 0.3]]]))
mask = K.variable(numpy.array([[1.0, 0.0, 1.0]]))
result = K.eval(Attention().call([vector, matrix], mask=["_", mask]))
assert_almost_equal(result, numpy.array([[0.52248482, 0.0, 0.47751518]]))
def test_batched_no_mask(self):
vector_length = 3
matrix_num_rows = 2
vector_input = Input(shape=(vector_length,),
dtype='float32',
name="vector_input")
matrix_input = Input(shape=(matrix_num_rows, vector_length),
dtype='float32',
name="matrix_input")
similarity_softmax = Attention()([vector_input, matrix_input])
model = Model([vector_input, matrix_input],
similarity_softmax)
# Testing general batched case.
vector = numpy.array([[0.3, 0.1, 0.5], [0.3, 0.1, 0.5]])
matrix = numpy.array([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2]],
[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2]]])
result = model.predict([vector, matrix])
assert_almost_equal(result, numpy.array([[0.52871835, 0.47128162],
[0.52871835, 0.47128162]]))
def test_batched_masked(self):
# Testing general masked non-batched case.
vector = K.variable(numpy.array([[0.3, 0.1, 0.5], [0.3, 0.1, 0.5]]))
matrix = K.variable(numpy.array([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]],
[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]]]))
mask = K.variable(numpy.array([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]]))
result = K.eval(Attention().call([vector, matrix], mask=["_", mask]))
assert_almost_equal(result, numpy.array([[0.52871835, 0.47128162, 0.0],
[0.50749944, 0.0, 0.49250056]]))
# Test the case where a mask is all 0s and an input is all 0s.
vector = K.variable(numpy.array([[0.0, 0.0, 0.0], [0.3, 0.1, 0.5]]))
matrix = K.variable(numpy.array([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]],
[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]]]))
mask = K.variable(numpy.array([[1.0, 1.0, 0.0], [0.0, 0.0, 0.0]]))
result = K.eval(Attention().call([vector, matrix], mask=["_", mask]))
assert_almost_equal(result, numpy.array([[0.5, 0.5, 0.0],
[0.0, 0.0, 0.0]]))
def test_call_works_on_simple_input(self):
sentence_length = 2
embedding_dim = 3
sentence_embedding = Input(shape=(sentence_length, embedding_dim), dtype='float32')
query_input = Input(shape=(embedding_dim,), dtype='float32')
attention_layer = Attention()
attention = attention_layer([query_input, sentence_embedding])
model = Model(inputs=[query_input, sentence_embedding], outputs=[attention])
sentence_tensor = numpy.asarray([[[1, 1, 1], [-1, 0, 1]]])
query_tensor = numpy.asarray([[.1, .8, .5]])
attention_tensor = model.predict([query_tensor, sentence_tensor])
assert_almost_equal(attention_tensor, [[.73105858, .26894142]])
def test_call_handles_masking_properly(self):
sentence_length = 4
vocab_size = 4
embedding_dim = 3
embedding_weights = numpy.asarray([[0, 0, 0], [1, 1, 1], [-1, 0, 1], [-1, -1, 0]])
embedding = Embedding(vocab_size, embedding_dim, weights=[embedding_weights], mask_zero=True)
sentence_input = Input(shape=(sentence_length,), dtype='int32')
sentence_embedding = embedding(sentence_input)
query_input = Input(shape=(embedding_dim,), dtype='float32')
attention_layer = Attention()
attention = attention_layer([query_input, sentence_embedding])
model = Model(inputs=[query_input, sentence_input], outputs=[attention])
sentence_tensor = numpy.asarray([[0, 1, 0, 2]])
query_tensor = numpy.asarray([[.1, .8, .5]])
attention_tensor = model.predict([query_tensor, sentence_tensor])
assert_almost_equal(attention_tensor, [[0, .73105858, 0, .26894142]])
def test_non_normalized_attention_works(self):
sentence_length = 4
vocab_size = 4
embedding_dim = 3
embedding_weights = numpy.asarray([[-1, 0, 4], [1, 1, 1], [-1, 0, -1], [-1, -1, 0]])
embedding = Embedding(vocab_size, embedding_dim, weights=[embedding_weights], mask_zero=True)
sentence_input = Input(shape=(sentence_length,), dtype='int32')
sentence_embedding = embedding(sentence_input)
query_input = Input(shape=(embedding_dim,), dtype='float32')
attention_layer = Attention(normalize=False)
attention = attention_layer([query_input, sentence_embedding])
attention_mask = OutputMask()(attention)
model = Model(inputs=[query_input, sentence_input], outputs=[attention, attention_mask])
sentence_tensor = numpy.asarray([[0, 1, 0, 2]])
query_tensor = numpy.asarray([[.1, .8, .5]])
attention_tensor, mask_tensor = model.predict([query_tensor, sentence_tensor])
assert_almost_equal(attention_tensor, [[1.9, 1.4, 1.9, -.6]])
assert_almost_equal(mask_tensor, [[0, 1, 0, 1]])
| deep_qa-master | tests/layers/attention/attention_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_allclose
from keras.layers import Input, Dense
from keras.models import Model
from deep_qa.layers.backend import CollapseToBatch, ExpandFromBatch, AddMask
class TestCollapseAndExpand:
# We need to test CollapseToBatch and ExpandFromBatch together, because Keras doesn't like it
# if you change the batch size between inputs and outputs. It makes sense to test them
# together, anyway.
def test_collapse_and_expand_works_with_dynamic_shape(self):
batch_size = 3
length1 = 5
length2 = 7
length3 = 2
dense_units = 6
input_layer = Input(shape=(length1, None, length3), dtype='float32')
masked_input = AddMask(mask_value=1)(input_layer)
collapsed_1 = CollapseToBatch(num_to_collapse=1)(masked_input)
collapsed_2 = CollapseToBatch(num_to_collapse=2)(masked_input)
dense = Dense(dense_units)(collapsed_2)
expanded_1 = ExpandFromBatch(num_to_expand=1)([collapsed_1, masked_input])
expanded_2 = ExpandFromBatch(num_to_expand=2)([collapsed_2, masked_input])
expanded_dense = ExpandFromBatch(num_to_expand=2)([dense, masked_input])
model = Model(inputs=input_layer, outputs=[expanded_1, expanded_2, expanded_dense])
input_tensor = numpy.random.randint(0, 3, (batch_size, length1, length2, length3))
expanded_1_tensor, expanded_2_tensor, expanded_dense_tensor = model.predict(input_tensor)
assert expanded_1_tensor.shape == input_tensor.shape
assert expanded_2_tensor.shape == input_tensor.shape
assert expanded_dense_tensor.shape == input_tensor.shape[:-1] + (dense_units,)
assert_allclose(expanded_1_tensor, input_tensor)
assert_allclose(expanded_2_tensor, input_tensor)
| deep_qa-master | tests/layers/backend/collapse_and_expand_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.backend import AddMask, ReplaceMaskedValues
class TestReplaceMaskedValues:
def test_call_works_on_simple_input(self):
input_length = 3
input_layer = Input(shape=(input_length,), dtype='float32')
masked = AddMask(2)(input_layer)
replaced = ReplaceMaskedValues(4)(masked)
model = Model(inputs=[input_layer], outputs=[replaced])
input_tensor = numpy.asarray([[2, 5, 2], [2, -4, -2]])
replaced_tensor = model.predict([input_tensor])
assert_almost_equal(replaced_tensor, numpy.asarray([[4, 5, 4], [4, -4, -2]]))
| deep_qa-master | tests/layers/backend/replace_masked_values_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.backend.add_mask import AddMask
from deep_qa.layers.backend.multiply import Multiply
from deep_qa.layers.wrappers import OutputMask
class TestMultiply:
def test_call_works_on_simple_input(self):
batch_size = 2
input_length = 5
input_1_layer = Input(shape=(input_length,), dtype='float32')
input_2_layer = Input(shape=(input_length,), dtype='float32')
masking_layer = AddMask()
masked_input_1 = masking_layer(input_1_layer)
masked_input_2 = masking_layer(input_2_layer)
multiply_output = Multiply()([masked_input_1, masked_input_2])
multiply_mask = OutputMask()(multiply_output)
model = Model(inputs=[input_1_layer, input_2_layer], outputs=[multiply_output, multiply_mask])
input_1_tensor = numpy.asarray([[2, 5, 0, 1, -4],
[-1, 0, -2, -10, -4]])
input_2_tensor = numpy.asarray([[3, 2, 1, 0, -2],
[0, 2, 2, 2, 2]])
multiply_tensor, mask_tensor = model.predict([input_1_tensor, input_2_tensor])
assert multiply_tensor.shape == (batch_size, input_length)
numpy.testing.assert_almost_equal(multiply_tensor, [[6, 10, 0, 0, 8],
[0, 0, -4, -20, -8]])
numpy.testing.assert_almost_equal(mask_tensor, [[1, 1, 0, 0, 1],
[0, 0, 1, 1, 1]])
def test_call_works_with_uneven_dims(self):
batch_size = 1
input_length = 2
input_length_2 = 5
input_1_layer = Input(shape=(input_length, input_length_2), dtype='float32')
input_2_layer = Input(shape=(input_length,), dtype='float32')
masking_layer = AddMask()
masked_input_1 = masking_layer(input_1_layer)
masked_input_2 = masking_layer(input_2_layer)
multiply_output = Multiply()([masked_input_1, masked_input_2])
multiply_mask = OutputMask()(multiply_output)
model = Model(inputs=[input_1_layer, input_2_layer], outputs=[multiply_output, multiply_mask])
input_1_tensor = numpy.asarray([[[2, 5, 0, 1, -4],
[-1, 0, -2, -10, -4]]])
input_2_tensor = numpy.asarray([[2, 1]])
multiply_tensor, mask_tensor = model.predict([input_1_tensor, input_2_tensor])
assert multiply_tensor.shape == (batch_size, input_length, input_length_2)
numpy.testing.assert_almost_equal(multiply_tensor, [[[4, 10, 0, 2, -8],
[-1, 0, -2, -10, -4]]])
numpy.testing.assert_almost_equal(mask_tensor, [[[1, 1, 0, 1, 1],
[1, 0, 1, 1, 1]]])
| deep_qa-master | tests/layers/backend/multiply_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.backend import Permute
class TestPermuteLayer:
def test_call_works_on_simple_input(self):
batch_size = 2
input_length_1 = 2
input_length_2 = 1
input_layer = Input(shape=(input_length_1, input_length_2), dtype='float32')
permute_output = Permute(pattern=[0, 2, 1])(input_layer)
model = Model(inputs=[input_layer], outputs=[permute_output])
input_tensor = numpy.asarray([[[2], [5]], [[-1], [-4]]])
permute_tensor = model.predict([input_tensor])
assert permute_tensor.shape == (batch_size, input_length_2, input_length_1)
numpy.testing.assert_almost_equal(permute_tensor, [[[2, 5]], [[-1, -4]]])
| deep_qa-master | tests/layers/backend/permute_test.py |
deep_qa-master | tests/layers/backend/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.backend import Max
class TestMaxLayer:
def test_call_works_on_simple_input(self):
batch_size = 2
input_length = 5
input_layer = Input(shape=(input_length,), dtype='float32')
max_output = Max()(input_layer)
model = Model(inputs=[input_layer], outputs=[max_output])
input_tensor = numpy.asarray([[2, 5, 3, 1, -4], [-1, -4, -2, -10, -4]])
max_tensor = model.predict([input_tensor])
assert max_tensor.shape == (batch_size,)
numpy.testing.assert_almost_equal(max_tensor, [5, -1])
| deep_qa-master | tests/layers/backend/max_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import keras.backend as K
from keras.layers import Input, Masking
from keras.models import Model
from deep_qa.layers.backend import BatchDot
from deep_qa.layers.wrappers import OutputMask
from deep_qa.testing.test_case import DeepQaTestCase
class TestBatchDotLayer(DeepQaTestCase):
def test_compute_mask_basic(self):
batch_size = 2
# test the case where the tensors are even
# tensor_a has shape (2, 3, 2), so mask_a has shape (2, 3)
tensor_a = K.variable(numpy.random.randint(7, size=(batch_size, 3, 2)))
mask_a = K.variable(numpy.array([[1, 0, 1], [1, 1, 0]]))
# tensor_b has shape (2, 4, 2), so mask_b has shape (2, 4)
tensor_b = K.variable(numpy.random.randint(7, size=(batch_size, 4, 2)))
mask_b = K.variable(numpy.array([[0, 1, 1, 1], [1, 0, 1, 1]]))
# a_dot_b would have shape (2, 3, 4), so mask of a_dot_b has shape (2, 3, 4)
calculated_mask = K.eval(BatchDot().compute_mask([tensor_a, tensor_b],
[mask_a, mask_b]))
assert_almost_equal(calculated_mask, numpy.array([[[0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0]],
[[1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]]))
# test the case where tensor_a has less dimensions than tensor_b
# tensor_a has shape (2, 4, 2), so mask_a has shape (2, 4)
tensor_a = K.variable(numpy.random.randint(7, size=(batch_size, 4, 2)))
mask_a = K.variable(numpy.array([[1, 0, 1, 0], [1, 1, 0, 0]]))
# tensor_b has shape (2, 4, 3, 2), so mask_b has shape (2, 4, 3)
tensor_b = K.variable(numpy.random.randint(7, size=(batch_size, 4, 3, 2)))
mask_b = K.variable(numpy.array([[[1, 1, 1],
[1, 1, 1],
[1, 1, 0],
[1, 0, 0]],
[[1, 1, 1],
[1, 1, 0],
[1, 0, 0],
[0, 0, 0]]]))
# a_dot_b would have shape (2, 4, 3), so mask of a_dot_b has shape (2, 4, 3)
calculated_mask = K.eval(BatchDot().compute_mask([tensor_a, tensor_b],
[mask_a, mask_b]))
assert calculated_mask.shape == (batch_size, 4, 3)
assert_almost_equal(calculated_mask, numpy.array([[[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 0.0, 0.0]],
[[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]]))
# test the case where tensor_a has more dimensions than tensor_b
# tensor_a has shape (2, 3, 4, 2), so mask_a has shape (2, 3, 4)
tensor_a = K.variable(numpy.random.randint(7, size=(batch_size, 3, 4, 2)))
mask_a = K.variable(numpy.array([[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 0, 1]],
[[1, 1, 1, 1],
[1, 1, 0, 1],
[1, 0, 0, 1]]]))
# tensor_b has shape (2, 3, 2), so mask_b has shape (2, 3)
tensor_b = K.variable(numpy.random.randint(7, size=(batch_size, 3, 2)))
mask_b = K.variable(numpy.array([[1, 0, 1], [1, 1, 0]]))
# a_dot_b would have shape (2, 3, 4), so mask of a_dot_b has shape (2, 3, 4)
calculated_mask = K.eval(BatchDot().compute_mask([tensor_a, tensor_b],
[mask_a, mask_b]))
assert calculated_mask.shape == (batch_size, 3, 4)
assert_almost_equal(calculated_mask, numpy.array([[[1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 1.0]],
[[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]]))
def test_a_smaller_than_b(self):
batch_size = 3
tensor_a = numpy.random.randint(7, size=(batch_size, 5))
tensor_b = numpy.random.randint(7, size=(batch_size, 2, 5))
# Manually set some values to 1 here, which will be masked later
# (1 and not 0 so that masked values are still non-zero in the output)
tensor_a[0] = 0
tensor_b[0][1] = 0
input_tensor_a = Input(shape=(5,))
masked_tensor_a = Masking(mask_value=0)(input_tensor_a)
input_tensor_b = Input(shape=(2, 5))
masked_tensor_b = Masking(mask_value=0)(input_tensor_b)
a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])
a_dot_b_mask = OutputMask()(a_dot_b)
model = Model(inputs=[input_tensor_a, input_tensor_b],
outputs=[a_dot_b, a_dot_b_mask])
# a_dot_b and mask_tensor are of shape (3, 2).
a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])
# Test that the dot happened like we expected.
for i in range(batch_size):
# each dot product should be of shape (2,)
assert_almost_equal(a_dot_b_tensor[i],
numpy.einsum("i,mi->m", tensor_a[i], tensor_b[i]))
# Check that the values in the output mask are 0 where the
# values were set to 1 above.
assert mask_tensor[0][0] == 0
assert mask_tensor[0][1] == 0
def test_a_larger_than_b(self):
batch_size = 3
tensor_a = numpy.random.randint(7, size=(batch_size, 2, 5))
tensor_b = numpy.random.randint(7, size=(batch_size, 5))
# Manually set some values to 1 here, which will be masked later
# (1 and not 0 so that masked values are still non-zero in the output)
tensor_a[0][1] = 0
tensor_b[0] = 0
input_tensor_a = Input(shape=(2, 5))
masked_tensor_a = Masking(mask_value=0)(input_tensor_a)
input_tensor_b = Input(shape=(5,))
masked_tensor_b = Masking(mask_value=0)(input_tensor_b)
a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])
a_dot_b_mask = OutputMask()(a_dot_b)
model = Model(inputs=[input_tensor_a, input_tensor_b],
outputs=[a_dot_b, a_dot_b_mask])
# a_dot_b and mask_tensor are of shape (3, 2).
a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])
# Test that the dot happened like we expected.
for i in range(batch_size):
# each dot product should be of shape (2,)
assert_almost_equal(a_dot_b_tensor[i],
numpy.einsum("mi,i->m", tensor_a[i], tensor_b[i]))
# Check that the values in the output mask are 0 where the
# values were set to 1 above.
assert mask_tensor[0][0] == 0
assert mask_tensor[0][1] == 0
def test_a_smaller_than_b_higher_dimension(self):
batch_size = 3
tensor_a = numpy.random.randint(7, size=(batch_size, 4, 5))
tensor_b = numpy.random.randint(7, size=(batch_size, 4, 2, 5))
# Manually set some values to 1 here, which will be masked later
# (1 and not 0 so that masked values are still non-zero in the output)
tensor_a[0][1] = 0
tensor_a[1][3] = 0
tensor_b[0][1][1] = 0
tensor_b[0][2][1] = 0
input_tensor_a = Input(shape=(4, 5))
masked_tensor_a = Masking(mask_value=0)(input_tensor_a)
input_tensor_b = Input(shape=(4, 2, 5))
masked_tensor_b = Masking(mask_value=0)(input_tensor_b)
a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])
a_dot_b_mask = OutputMask()(a_dot_b)
model = Model(inputs=[input_tensor_a, input_tensor_b],
outputs=[a_dot_b, a_dot_b_mask])
# a_dot_b and mask_tensor are of shape (3, 4, 2).
a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])
# Test that the dot happened like we expected.
for i in range(batch_size):
# each dot product should be of shape (4, 2)
assert_almost_equal(a_dot_b_tensor[i],
numpy.einsum("ij,imj->im", tensor_a[i], tensor_b[i]))
# Check that the values in the output mask are 0 where the
# values were set to 1 above.
assert mask_tensor[0][1][0] == 0
assert mask_tensor[0][1][1] == 0
assert mask_tensor[0][2][1] == 0
assert mask_tensor[1][3][0] == 0
assert mask_tensor[1][3][1] == 0
def test_a_larger_than_b_higher_dimension(self):
batch_size = 3
tensor_a = numpy.random.randint(7, size=(batch_size, 4, 2, 5))
tensor_b = numpy.random.randint(7, size=(batch_size, 4, 5))
# Manually set some values to 1 here, which will be masked later
# (1 and not 0 so that masked values are still non-zero in the output)
tensor_a[0][1][1] = 0
tensor_a[0][2][1] = 0
tensor_b[0][1] = 0
tensor_b[1][3] = 0
input_tensor_a = Input(shape=(4, 2, 5))
masked_tensor_a = Masking(mask_value=0)(input_tensor_a)
input_tensor_b = Input(shape=(4, 5))
masked_tensor_b = Masking(mask_value=0)(input_tensor_b)
a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])
a_dot_b_mask = OutputMask()(a_dot_b)
model = Model(inputs=[input_tensor_a, input_tensor_b],
outputs=[a_dot_b, a_dot_b_mask])
# a_dot_b and mask_tensor are of shape (3, 4, 2).
a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])
# Test that the dot happened like we expected.
for i in range(batch_size):
# each dot product should be of shape (4, 2)
assert_almost_equal(a_dot_b_tensor[i],
numpy.einsum("imj,ij->im", tensor_a[i], tensor_b[i]))
# Check that the values in the output mask are 0 where the
# values were set to 1 above.
assert mask_tensor[0][1][0] == 0
assert mask_tensor[0][1][1] == 0
assert mask_tensor[0][2][1] == 0
assert mask_tensor[1][3][0] == 0
assert mask_tensor[1][3][1] == 0
def test_output_shapes(self):
bd = BatchDot()
a_shapes = [(5, 10), (1, 1, 1), (1, 5, 3), (1, 5, 4, 3), (1, 5, 3)]
b_shapes = [(5, 10), (1, 1, 1), (1, 2, 3), (1, 5, 3), (1, 5, 4, 3)]
expected_shapes = [(5, 1), (1, 1, 1), (1, 5, 2), (1, 5, 4), (1, 5, 4)]
for a_shape, b_shape, expected_shape in zip(a_shapes, b_shapes, expected_shapes):
assert K.eval(bd([K.ones(shape=a_shape), K.ones(shape=b_shape)])).shape == expected_shape
assert bd.compute_output_shape([a_shape, b_shape]) == expected_shape
| deep_qa-master | tests/layers/backend/batch_dot_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.backend import Envelope
class TestEnvelopeLayer:
def test_call_works_on_simple_input(self):
batch_size = 1
sequence_length = 5
span_begin_input = Input(shape=(sequence_length,), dtype='float32')
span_end_input = Input(shape=(sequence_length,), dtype='float32')
envelope = Envelope()([span_begin_input, span_end_input])
model = Model(inputs=[span_begin_input, span_end_input], outputs=[envelope])
span_begin_tensor = numpy.asarray([[0.01, 0.1, 0.8, 0.05, 0.04]])
span_end_tensor = numpy.asarray([[0.01, 0.04, 0.05, 0.2, 0.7]])
envelope_tensor = model.predict([span_begin_tensor, span_end_tensor])
assert envelope_tensor.shape == (batch_size, sequence_length)
expected_envelope = [[0.01 * 0.99, 0.11 * 0.95, 0.91 * 0.9, 0.96 * 0.7, 1.0 * 0.0]]
numpy.testing.assert_almost_equal(envelope_tensor, expected_envelope)
| deep_qa-master | tests/layers/backend/envelope_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.backend import Repeat
class TestRepeatLayer:
def test_call_works_on_simple_input(self):
batch_size = 2
input_length = 3
repetitions = 4
input_layer = Input(shape=(input_length,), dtype='float32')
repeat_output = Repeat(axis=1, repetitions=repetitions)(input_layer)
model = Model(inputs=[input_layer], outputs=[repeat_output])
input_tensor = numpy.asarray([[2, 5, 3], [-1, -4, -2]])
repeat_tensor = model.predict([input_tensor])
assert repeat_tensor.shape == (batch_size, repetitions, input_length)
for i in range(repetitions):
numpy.testing.assert_almost_equal(repeat_tensor[:, i, :], [[2, 5, 3], [-1, -4, -2]])
| deep_qa-master | tests/layers/backend/repeat_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.backend import RepeatLike
class TestRepeatLikeLayer:
def test_call_works_on_simple_input(self):
batch_size = 2
input_length = 3
repetitions = 4
input_layer = Input(shape=(input_length,), dtype='float32')
input_layer_2 = Input(shape=(None,), dtype='float32')
repeat_output = RepeatLike(axis=1, copy_from_axis=1)([input_layer, input_layer_2])
model = Model(inputs=[input_layer, input_layer_2], outputs=[repeat_output])
input_tensor = numpy.asarray([[2, 5, 3], [-1, -4, -2]])
input_tensor_2 = numpy.ones((batch_size, repetitions))
repeat_tensor = model.predict([input_tensor, input_tensor_2])
assert repeat_tensor.shape == (batch_size, repetitions, input_length)
for i in range(repetitions):
numpy.testing.assert_almost_equal(repeat_tensor[:, i, :], [[2, 5, 3], [-1, -4, -2]])
| deep_qa-master | tests/layers/backend/repeat_like_test.py |
deep_qa-master | tests/layers/encoders/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input, Embedding
from keras.models import Model
from deep_qa.layers.encoders import BOWEncoder
class TestBOWEncoder:
def test_on_unmasked_input(self):
sentence_length = 5
embedding_dim = 10
vocabulary_size = 15
input_layer = Input(shape=(sentence_length,), dtype='int32')
# Embedding does not mask zeros
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim)
encoder = BOWEncoder()
embedded_input = embedding(input_layer)
encoded_input = encoder(embedded_input)
model = Model(inputs=input_layer, outputs=encoded_input)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
test_input = numpy.asarray([[0, 3, 1, 7, 10]], dtype='int32')
embedding_weights = embedding.get_weights()[0] # get_weights returns a list with one element.
expected_output = numpy.mean(embedding_weights[test_input], axis=1)
actual_output = model.predict(test_input)
numpy.testing.assert_array_almost_equal(expected_output, actual_output)
def test_on_masked_input(self):
sentence_length = 5
embedding_dim = 10
vocabulary_size = 15
input_layer = Input(shape=(sentence_length,), dtype='int32')
# Embedding masks zeros
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, mask_zero=True)
encoder = BOWEncoder()
embedded_input = embedding(input_layer)
encoded_input = encoder(embedded_input)
model = Model(inputs=input_layer, outputs=encoded_input)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
test_input = numpy.asarray([[0, 3, 1, 7, 10]], dtype='int32')
embedding_weights = embedding.get_weights()[0] # get_weights returns a list with one element.
# Omitting the first element (0), because that is supposed to be masked in the model.
expected_output = numpy.mean(embedding_weights[test_input[:, 1:]], axis=1)
actual_output = model.predict(test_input)
# Following comparison is till the sixth decimal.
numpy.testing.assert_array_almost_equal(expected_output, actual_output)
def test_on_all_zeros(self):
sentence_length = 5
embedding_dim = 10
vocabulary_size = 15
input_layer = Input(shape=(sentence_length,), dtype='int32')
# Embedding masks zeros
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, mask_zero=True)
encoder = BOWEncoder()
embedded_input = embedding(input_layer)
encoded_input = encoder(embedded_input)
model = Model(inputs=input_layer, outputs=encoded_input)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
test_input = numpy.asarray([[0, 0, 0, 0, 0]], dtype='int32')
# Omitting the first element (0), because that is supposed to be masked in the model.
expected_output = numpy.zeros((1, embedding_dim))
actual_output = model.predict(test_input)
# Following comparison is till the sixth decimal.
numpy.testing.assert_array_almost_equal(expected_output, actual_output)
| deep_qa-master | tests/layers/encoders/bow_encoder_test.py |
deep_qa-master | tests/training/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from copy import deepcopy
import keras.backend as K
from deep_qa.common.params import Params
from deep_qa.models.text_classification import ClassificationModel
from deep_qa.testing.test_case import DeepQaTestCase
class TestMultiGpu(DeepQaTestCase):
def setUp(self):
super(TestMultiGpu, self).setUp()
self.write_true_false_model_files()
self.args = Params({
'num_gpus': 2,
})
def test_model_can_train_and_load(self):
self.ensure_model_trains_and_loads(ClassificationModel, self.args)
def test_model_can_train_and_load_with_generator(self):
args = self.args
args["data_generator"] = {"dynamic_batching": True, "padding_noise": 0.4}
self.ensure_model_trains_and_loads(ClassificationModel, args)
def test_variables_live_on_cpu(self):
model = self.get_model(ClassificationModel, self.args)
model.train()
trainable_variables = model.model.trainable_weights
for variable in trainable_variables:
# This is an odd quirk of tensorflow - the devices are actually named
# slightly differently from their scopes ... (i.e != "/cpu:0")
assert variable.device == "/cpu:0" or variable.device == ""
def test_multi_gpu_shares_variables(self):
multi_gpu_model = self.get_model(ClassificationModel, self.args)
single_gpu_args = deepcopy(self.args)
single_gpu_args["num_gpus"] = 1
single_gpu_model = self.get_model(ClassificationModel, single_gpu_args)
multi_gpu_model.train()
multi_gpu_variables = [x.name for x in multi_gpu_model.model.trainable_weights]
K.clear_session()
single_gpu_model.train()
single_gpu_variables = ["tower_0/" + x.name for x in single_gpu_model.model.trainable_weights]
assert single_gpu_variables == multi_gpu_variables
| deep_qa-master | tests/training/multi_gpu_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
import tensorflow
from deep_qa.common.params import Params
from deep_qa.testing.test_case import DeepQaTestCase
from deep_qa.training.train_utils import _get_dense_gradient_average, _get_sparse_gradient_average
from deep_qa.training.train_utils import pin_variable_device_scope, slice_batch, average_gradients
class TestTrainUtils(DeepQaTestCase):
def setUp(self):
super(TestTrainUtils, self).setUp()
self.write_true_false_model_files()
self.args = Params({
'num_gpus': 2,
})
def test_pinned_scope_correctly_allocates_ops(self):
scope_function = pin_variable_device_scope(device="/gpu:0", variable_device="/cpu:0")
# Should have a cpu scope.
variable = tensorflow.Variable([])
# Should have a gpu scope.
add_op = tensorflow.add(variable, 1.0)
assert scope_function(variable.op) == "/cpu:0"
assert scope_function(add_op.op) == "/gpu:0" # pylint: disable=no-member
def test_gradient_average(self):
tensors = [tensorflow.ones([10, 20]) for _ in range(5)]
average = _get_dense_gradient_average(tensors)
session = tensorflow.Session()
numpy.testing.assert_array_equal(session.run(average), session.run(tensors[0]))
def test_sparse_gradient_average(self):
tensors = [tensorflow.IndexedSlices(values=tensorflow.ones([5, 20]),
indices=tensorflow.constant([1, 2, 3, 4, 5])) for _ in range(5)]
average = _get_sparse_gradient_average(tensors)
session = tensorflow.Session()
# Unique indices, so the returned tensor should be a weighted average of the respective indices.
numpy.testing.assert_array_almost_equal(session.run(average.values), session.run(tensorflow.ones([5, 20])))
tensors = [tensorflow.IndexedSlices(values=tensorflow.ones([5, 20]),
indices=tensorflow.constant([1, 1, 1, 2, 1])) for _ in range(5)]
average = _get_sparse_gradient_average(tensors)
# Now we have duplicate indices, so the values for these indices in the 5 tensors we are averaging
# should be summed prior to being averaged. Here we have 5 tensors x 4 duplicate indices which
# all have value ones(1, 20), so the first return value should be an array of fours. The second
# returned value corresponds to the case above. This checks that the slices are being
# correctly de-duplicated.
expected_returned_tensor = numpy.concatenate([numpy.ones([1, 20]) * 4., numpy.ones([1, 20])], 0)
numpy.testing.assert_array_almost_equal(session.run(average.values), expected_returned_tensor)
def test_tower_gradient_average(self):
grad1 = [tensorflow.constant(numpy.random.random([10, 20])) for _ in range(3)]
variable1 = tensorflow.ones([10, 20])
grad2 = [tensorflow.constant(numpy.random.random([10, 3, 4])) for _ in range(3)]
variable2 = tensorflow.ones([10, 3, 4])
sparse_variable = tensorflow.ones([20, 20])
sparse_grads = [tensorflow.IndexedSlices(values=tensorflow.constant(numpy.random.random([5, 20])),
indices=tensorflow.constant([1, 2, 3, 4, 5]),
dense_shape=tensorflow.shape(sparse_variable))
for _ in range(3)]
tower1 = [(grad1[0], variable1), (grad2[0], variable2), (sparse_grads[0], sparse_variable)]
tower2 = [(grad1[1], variable1), (grad2[1], variable2), (sparse_grads[1], sparse_variable)]
tower3 = [(grad1[2], variable1), (grad2[2], variable2), (sparse_grads[2], sparse_variable)]
averages = average_gradients([tower1, tower2, tower3])
session = tensorflow.Session()
expected_grad1_mean = numpy.mean(session.run(grad1), 0)
expected_grad2_mean = numpy.mean(session.run(grad2), 0)
expected_grad3_mean = numpy.mean(session.run([x.values for x in sparse_grads]), 0)
actual_grad1_mean = session.run(averages[0][0])
actual_grad2_mean = session.run(averages[1][0])
actual_grad3_mean = session.run(averages[2][0].values)
numpy.testing.assert_array_almost_equal(expected_grad1_mean, actual_grad1_mean)
numpy.testing.assert_array_almost_equal(expected_grad2_mean, actual_grad2_mean)
numpy.testing.assert_array_almost_equal(expected_grad3_mean, actual_grad3_mean)
def test_slice_batch(self):
tensor1 = tensorflow.get_variable("tensor1", shape=[32, 10, 4])
tensor2 = tensorflow.get_variable("tensor2", shape=[32, 12])
tensor3 = tensorflow.get_variable("tensor3", shape=[32])
split_tensors = slice_batch([tensor1, tensor2, tensor3], num_gpus=4)
session = tensorflow.Session()
session.run(tensorflow.global_variables_initializer())
returned_arrays = session.run(split_tensors)
expected_tensor1 = numpy.reshape(session.run(tensor1), [4, 8, 10, 4])
expected_tensor2 = numpy.reshape(session.run(tensor2), [4, 8, 12])
expected_tensor3 = numpy.reshape(session.run(tensor3), [4, 8])
numpy.testing.assert_array_equal(returned_arrays[0], expected_tensor1)
numpy.testing.assert_array_equal(returned_arrays[1], expected_tensor2)
numpy.testing.assert_array_equal(returned_arrays[2], expected_tensor3)
| deep_qa-master | tests/training/train_utils_test.py |
# pylint: disable=invalid-name,no-self-use
import numpy
from numpy.testing import assert_almost_equal
from keras import backend as K
from deep_qa.testing.test_case import DeepQaTestCase
from deep_qa.training.losses import ranking_loss, ranking_loss_with_margin
class TestLosses(DeepQaTestCase):
def test_ranking_loss_is_computed_correctly(self):
predictions = numpy.asarray([[.1, .4, .8], [-.1, -.2, .1]])
labels = numpy.asarray([[0, 0, 1], [1, 0, 0]])
sigmoid = lambda x: 1.0 / (1.0 + numpy.exp(-x))
expected_result = numpy.mean(-sigmoid(numpy.asarray([.8 - .4, -.1 - .1])))
result = K.eval(ranking_loss(K.variable(predictions), K.variable(labels)))
assert_almost_equal(expected_result, result)
def test_ranking_loss_with_margin_is_computed_correctly(self):
predictions = numpy.asarray([[.1, .4, .8], [-.1, -.2, .1]])
labels = numpy.asarray([[0, 0, 1], [1, 0, 0]])
expected_result = numpy.mean(numpy.maximum(0, numpy.asarray([1 + .4 - .8, 1 + .1 - -.1])))
result = K.eval(ranking_loss_with_margin(K.variable(predictions), K.variable(labels)))
assert_almost_equal(expected_result, result)
| deep_qa-master | tests/training/losses_test.py |
# pylint: disable=no-self-use,invalid-name
from unittest import mock
from deep_qa.common.params import Params, pop_choice
from deep_qa.data.datasets import Dataset, SnliDataset
from deep_qa.layers.encoders import encoders
from deep_qa.models.text_classification import ClassificationModel
from deep_qa.testing.test_case import DeepQaTestCase
class TestTextTrainer(DeepQaTestCase):
# pylint: disable=protected-access
def test_get_encoder_works_without_params(self):
self.write_true_false_model_files()
model = self.get_model(ClassificationModel, {'encoder': {}})
model._build_model()
encoder = model._get_encoder()
encoder_type = pop_choice({}, "type", list(encoders.keys()), default_to_first_choice=True)
expected_encoder = encoders[encoder_type](**{})
assert isinstance(encoder, expected_encoder.__class__)
@mock.patch.object(ClassificationModel, '_output_debug_info')
def test_padding_works_correctly(self, _output_debug_info):
self.write_true_false_model_files()
args = Params({
'embeddings': {'words': {'dimension': 2}, 'characters': {'dimension': 2}},
'tokenizer': {'type': 'words and characters'},
'show_summary_with_masking_info': True,
'debug': {
'data': 'training',
'layer_names': [
'combined_word_embedding_for_sentence_input',
],
'masks': [
'combined_word_embedding_for_sentence_input',
],
}
})
model = self.get_model(ClassificationModel, args)
def new_debug(output_dict, epoch): # pylint: disable=unused-argument
# We're going to check two things in here: that the shape of combined word embedding is
# as expected, and that the mask is computed correctly.
# TODO(matt): actually, from this test, it looks like the mask is returned as
# output_dict['combined_word_embedding'][1]. Maybe this means we can simplify the
# logic in Trainer._debug()? I need to look into this more to be sure that's
# consistently happening, though.
word_embeddings = output_dict['combined_word_embedding_for_sentence_input'][0]
assert len(word_embeddings) == 6
assert word_embeddings[0].shape == (3, 4)
word_masks = output_dict['combined_word_embedding_for_sentence_input'][1]
# Zeros are added to sentences _from the left_.
assert word_masks[0][0] == 0
assert word_masks[0][1] == 0
assert word_masks[0][2] == 1
assert word_masks[1][0] == 1
assert word_masks[1][1] == 1
assert word_masks[1][2] == 1
assert word_masks[2][0] == 0
assert word_masks[2][1] == 1
assert word_masks[2][2] == 1
assert word_masks[3][0] == 0
assert word_masks[3][1] == 0
assert word_masks[3][2] == 1
_output_debug_info.side_effect = new_debug
model.train()
def test_load_model_and_fit(self):
args = Params({
'test_files': [self.TEST_FILE],
'embeddings': {'words': {'dimension': 4}, 'characters': {'dimension': 2}},
'save_models': True,
'tokenizer': {'type': 'words and characters'},
'show_summary_with_masking_info': True,
})
self.write_true_false_model_files()
model, loaded_model = self.ensure_model_trains_and_loads(ClassificationModel, args)
# now fit both models on some more data, and ensure that we get the same results.
self.write_additional_true_false_model_files()
_, training_arrays = loaded_model.load_data_arrays(loaded_model.train_files)
model.model.fit(training_arrays[0], training_arrays[1], shuffle=False, nb_epoch=1)
loaded_model.model.fit(training_arrays[0], training_arrays[1], shuffle=False, nb_epoch=1)
# _, validation_arrays = loaded_model.load_data_arrays(loaded_model.validation_files)
# verify that original model and the loaded model predict the same outputs
# TODO(matt): fix the randomness that occurs here.
# assert_allclose(model.model.predict(validation_arrays[0]),
# loaded_model.model.predict(validation_arrays[0]))
def test_data_generator_works(self):
args = Params({
'test_files': [self.TEST_FILE],
'embeddings': {'words': {'dimension': 4}, 'characters': {'dimension': 2}},
'save_models': True,
'tokenizer': {'type': 'words and characters'},
'data_generator': {},
'show_summary_with_masking_info': True,
})
self.write_true_false_model_files()
self.ensure_model_trains_and_loads(ClassificationModel, args)
def test_dynamic_padding_works(self):
args = Params({
'test_files': [self.TEST_FILE],
'embeddings': {'words': {'dimension': 4}, 'characters': {'dimension': 2}},
'save_models': True,
'tokenizer': {'type': 'words and characters'},
'data_generator': {'dynamic_padding': True},
'batch_size': 2,
})
self.write_true_false_model_files()
self.ensure_model_trains_and_loads(ClassificationModel, args)
def test_pretrained_embeddings_works_correctly(self):
self.write_true_false_model_files()
self.write_pretrained_vector_files()
args = Params({
'embeddings': {
'words': {
'dimension': 8,
'pretrained_file': self.PRETRAINED_VECTORS_GZIP,
'project': True
},
'characters': {'dimension': 8}},
})
model = self.get_model(ClassificationModel, args)
model.train()
def test_reading_two_datasets_return_identical_types(self):
self.write_true_false_model_files()
model = self.get_model(ClassificationModel)
train_dataset = model.load_dataset_from_files([self.TRAIN_FILE])
validation_dataset = model.load_dataset_from_files([self.VALIDATION_FILE])
assert isinstance(train_dataset, Dataset)
assert isinstance(validation_dataset, Dataset)
def test_reading_two_non_default_datasets_return_identical_types(self):
self.write_original_snli_data()
model = self.get_model(ClassificationModel, {"dataset": {"type": "snli"}})
train_dataset = model.load_dataset_from_files([self.TRAIN_FILE])
validation_dataset = model.load_dataset_from_files([self.TRAIN_FILE])
assert isinstance(train_dataset, SnliDataset)
assert isinstance(validation_dataset, SnliDataset)
| deep_qa-master | tests/training/text_trainer_test.py |
deep_qa-master | tests/tensors/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import keras.backend as K
from deep_qa.tensors.backend import l1_normalize
from deep_qa.tensors.masked_operations import masked_batch_dot, masked_softmax
class TestMaskedOperations:
def test_masked_batch_dot_masks_properly(self):
embedding_dim = 3
a_length = 4
b_length = 5
batch_size = 2
tensor_a = numpy.random.rand(batch_size, a_length, embedding_dim)
tensor_b = numpy.random.rand(batch_size, b_length, embedding_dim)
mask_a = numpy.ones((batch_size, a_length))
mask_a[1, 3] = 0
mask_b = numpy.ones((batch_size, b_length))
mask_b[1, 2] = 0
result = K.eval(masked_batch_dot(K.variable(tensor_a),
K.variable(tensor_b),
K.variable(mask_a),
K.variable(mask_b)))
assert numpy.all(result[0, :, :] != numpy.zeros((a_length, b_length)))
assert numpy.any(result[1, 0, :] != numpy.zeros((b_length)))
assert numpy.any(result[1, 1, :] != numpy.zeros((b_length)))
assert numpy.any(result[1, 2, :] != numpy.zeros((b_length)))
assert numpy.all(result[1, 3, :] == numpy.zeros((b_length)))
assert numpy.any(result[1, :, 0] != numpy.zeros((a_length)))
assert numpy.any(result[1, :, 1] != numpy.zeros((a_length)))
assert numpy.all(result[1, :, 2] == numpy.zeros((a_length)))
assert numpy.any(result[1, :, 3] != numpy.zeros((a_length)))
assert numpy.any(result[1, :, 4] != numpy.zeros((a_length)))
result = K.eval(masked_batch_dot(K.variable(tensor_a),
K.variable(tensor_b),
None,
None))
assert numpy.all(result[0, :, :] != numpy.zeros((a_length, b_length)))
assert numpy.all(result[1, :, :] != numpy.zeros((a_length, b_length)))
result = K.eval(masked_batch_dot(K.variable(tensor_a),
K.variable(tensor_b),
K.variable(mask_a),
None))
assert numpy.all(result[0, :, :] != numpy.zeros((a_length, b_length)))
assert numpy.any(result[1, 0, :] != numpy.zeros((b_length)))
assert numpy.any(result[1, 1, :] != numpy.zeros((b_length)))
assert numpy.any(result[1, 2, :] != numpy.zeros((b_length)))
assert numpy.all(result[1, 3, :] == numpy.zeros((b_length)))
assert numpy.any(result[1, :, 0] != numpy.zeros((a_length)))
assert numpy.any(result[1, :, 1] != numpy.zeros((a_length)))
assert numpy.any(result[1, :, 2] != numpy.zeros((a_length)))
assert numpy.any(result[1, :, 3] != numpy.zeros((a_length)))
assert numpy.any(result[1, :, 4] != numpy.zeros((a_length)))
result = K.eval(masked_batch_dot(K.variable(tensor_a),
K.variable(tensor_b),
None,
K.variable(mask_b)))
assert numpy.all(result[0, :, :] != numpy.zeros((a_length, b_length)))
assert numpy.any(result[1, 0, :] != numpy.zeros((b_length)))
assert numpy.any(result[1, 1, :] != numpy.zeros((b_length)))
assert numpy.any(result[1, 2, :] != numpy.zeros((b_length)))
assert numpy.any(result[1, 3, :] != numpy.zeros((b_length)))
assert numpy.any(result[1, :, 0] != numpy.zeros((a_length)))
assert numpy.any(result[1, :, 1] != numpy.zeros((a_length)))
assert numpy.all(result[1, :, 2] == numpy.zeros((a_length)))
assert numpy.any(result[1, :, 3] != numpy.zeros((a_length)))
assert numpy.any(result[1, :, 4] != numpy.zeros((a_length)))
def test_masked_batch_dot_handles_uneven_tensors(self):
# We're going to test masked_batch_dot with tensors of shape (batch_size, a_length,
# embedding_dim) and (batch_size, embedding_dim). The result should have shape
# (batch_size, a_length).
embedding_dim = 3
a_length = 5
batch_size = 2
tensor_a = numpy.random.rand(batch_size, a_length, embedding_dim)
tensor_b = numpy.random.rand(batch_size, embedding_dim)
mask_a = numpy.ones((batch_size, a_length))
mask_a[0, 3] = 0
mask_b = numpy.ones((batch_size,))
mask_b[1] = 0
result = K.eval(masked_batch_dot(K.variable(tensor_a),
K.variable(tensor_b),
K.variable(mask_a),
K.variable(mask_b)))
assert result[0, 0] != 0
assert result[0, 1] != 0
assert result[0, 2] != 0
assert result[0, 3] == 0
assert result[0, 4] != 0
assert numpy.all(result[1, :] == numpy.zeros((a_length)))
# We should get the same result if we flip the order of the tensors.
flipped_result = K.eval(masked_batch_dot(K.variable(tensor_b),
K.variable(tensor_a),
K.variable(mask_b),
K.variable(mask_a)))
assert numpy.all(result == flipped_result)
def test_masked_batch_dot_handles_uneven_higher_order_tensors(self):
# We're going to test masked_batch_dot with tensors of shape (batch_size, common,
# a_length, embedding_dim) and (batch_size, common, embedding_dim). The result should have
# shape (batch_size, common, a_length).
embedding_dim = 3
common_length = 4
a_length = 5
batch_size = 2
tensor_a = numpy.random.rand(batch_size, common_length, a_length, embedding_dim)
tensor_b = numpy.random.rand(batch_size, common_length, embedding_dim)
mask_a = numpy.ones((batch_size, common_length, a_length))
mask_a[1, 1, 3] = 0
mask_b = numpy.ones((batch_size, common_length))
mask_b[1, 2] = 0
result = K.eval(masked_batch_dot(K.variable(tensor_a),
K.variable(tensor_b),
K.variable(mask_a),
K.variable(mask_b)))
assert numpy.all(result[0, :, :] != numpy.zeros((common_length, a_length)))
assert numpy.all(result[1, 0, :] != numpy.zeros((a_length)))
assert result[1, 1, 0] != 0
assert result[1, 1, 1] != 0
assert result[1, 1, 2] != 0
assert result[1, 1, 3] == 0
assert result[1, 1, 4] != 0
assert numpy.all(result[1, 2, :] == numpy.zeros((a_length)))
assert numpy.all(result[1, 3, :] != numpy.zeros((a_length)))
# We should get the same result if we pass the smaller tensor in first.
flipped_result = K.eval(masked_batch_dot(K.variable(tensor_b),
K.variable(tensor_a),
K.variable(mask_b),
K.variable(mask_a)))
assert numpy.all(result == flipped_result)
def test_l1_normalize_no_mask(self):
# Testing the general unmasked 1D case.
vector_1d = K.variable(numpy.array([[2, 1, 5, 7]]))
vector_1d_normalized = K.eval(l1_normalize(vector_1d))
assert_almost_equal(vector_1d_normalized,
numpy.array([[0.13333333, 0.06666666,
0.33333333, 0.46666666]]))
assert_almost_equal(1.0, numpy.sum(vector_1d_normalized), decimal=6)
# Testing the unmasked 1D case with all 0s.
vector_1d_zeros = K.variable(numpy.array([[0, 0, 0, 0]]))
vector_1d_zeros_normalized = K.eval(l1_normalize(vector_1d_zeros))
assert_array_almost_equal(vector_1d_zeros_normalized,
numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the general unmasked batched case when
# inputs are not all 0's
matrix = K.variable(numpy.array([[2, 1, 5, 7], [2, 2, 2, 2]]))
matrix_normalized = K.eval(l1_normalize(matrix))
assert_array_almost_equal(matrix_normalized,
numpy.array([[0.13333333, 0.06666666,
0.33333333, 0.46666666],
[0.25, 0.25,
0.25, 0.25]]))
assert_almost_equal(numpy.array([1.0, 1.0]),
numpy.sum(matrix_normalized, axis=1), decimal=6)
# Testing the general unmasked batched case when
# one row is all 0's
matrix = K.variable(numpy.array([[2, 1, 5, 7], [0, 0, 0, 0]]))
matrix_normalized = K.eval(l1_normalize(matrix))
assert_array_almost_equal(matrix_normalized,
numpy.array([[0.13333333, 0.06666666,
0.33333333, 0.46666666],
[0.25, 0.25,
0.25, 0.25]]))
assert_almost_equal(numpy.array([1.0, 1.0]),
numpy.sum(matrix_normalized, axis=1), decimal=6)
def test_l1_normalize_masked(self):
# Testing the general masked 1D case.
vector_1d = K.variable(numpy.array([[2, 1, 5, 7]]))
vector_1d_mask = K.variable(numpy.array([[1, 1, 0, 1]]))
vector_1d_normalized = K.eval(l1_normalize(vector_1d,
vector_1d_mask))
assert_array_almost_equal(vector_1d_normalized,
numpy.array([[0.2, 0.1,
0.0, 0.7]]))
assert_almost_equal(1.0, numpy.sum(vector_1d_normalized), decimal=6)
vector_1d = K.variable(numpy.array([[1.0, 2.0, 3.0, 4.0]]))
vector_1d_mask = K.variable(numpy.array([[1, 1, 0, 1]]))
vector_1d_normalized = K.eval(l1_normalize(vector_1d,
vector_1d_mask))
assert_array_almost_equal(vector_1d_normalized,
numpy.array([[0.14285715, 0.2857143,
0, 0.5714286]]))
assert_almost_equal(1.0, numpy.sum(vector_1d_normalized), decimal=6)
# Testing the masked 1D case where the mask is
# not all zero and the input is all zero.
vector_1d_zeros = K.variable(numpy.array([[0, 0, 0, 0]]))
vector_1d_zeros_mask = K.variable(numpy.array([[1, 1, 0, 1]]))
vector_1d_zeros_normalized = K.eval(l1_normalize(vector_1d_zeros,
vector_1d_zeros_mask))
assert_array_almost_equal(vector_1d_zeros_normalized,
numpy.array([[0.3333333, 0.3333333,
0.0, 0.3333333]]))
vector_1d_zeros = K.variable(numpy.array([[0, 0, 0, 0]]))
vector_1d_zeros_mask = K.variable(numpy.array([[0, 0, 0, 0]]))
vector_1d_zeros_normalized = K.eval(l1_normalize(vector_1d_zeros,
vector_1d_zeros_mask))
assert_array_almost_equal(vector_1d_zeros_normalized,
numpy.array([[0.25, 0.25,
0.25, 0.25]]))
# Testing the general batched masked case when the input is not
# all 0's and the masks are not all 0's.
matrix = K.variable(numpy.array([[2, 1, 5, 7], [2, 2, 2, 2]]))
matrix_mask = K.variable(numpy.array([[1, 1, 0, 1], [1, 1, 1, 1]]))
matrix_normalized = K.eval(l1_normalize(matrix, matrix_mask))
assert_array_almost_equal(matrix_normalized,
numpy.array([[0.2, 0.1,
0.0, 0.7],
[0.25, 0.25,
0.25, 0.25]]))
assert_almost_equal(numpy.array([1.0, 1.0]),
numpy.sum(matrix_normalized, axis=1), decimal=6)
# Testing the batched masked case when the masks are all 0's
# and one of the input rows is all 0's.
matrix = K.variable(numpy.array([[2, 1, 5, 7], [0, 0, 0, 0]]))
matrix_mask = K.variable(numpy.array([[0, 0, 0, 0], [0, 0, 0, 0]]))
matrix_normalized = K.eval(l1_normalize(matrix, matrix_mask))
assert_array_almost_equal(matrix_normalized,
numpy.array([[0.25, 0.25,
0.25, 0.25],
[0.25, 0.25,
0.25, 0.25]]))
assert_almost_equal(numpy.array([1.0, 1.0]),
numpy.sum(matrix_normalized, axis=1), decimal=6)
def test_l1_normalize_special_cases(self):
# Testing the special masked 1D case where the mask
# all zero and the input is all zero as well.
vector_1d_zeros = K.variable(numpy.array([[0.0, 0.0, 0.0, 0.0]]))
vector_1d_zeros_mask = K.variable(numpy.array([[0, 0, 0, 0]]))
vector_1d_zeros_normalized = K.eval(l1_normalize(vector_1d_zeros,
vector_1d_zeros_mask))
assert_array_almost_equal(vector_1d_zeros_normalized,
numpy.array([[0.25, 0.25, 0.25, 0.25]]))
# Testing the special masked 1D case where the mask
# all zero and the input is not all zero.
vector_1d_zeros = K.variable(numpy.array([[2, 1, 5, 7]]))
vector_1d_zeros_mask = K.variable(numpy.array([[0, 0, 0, 0]]))
vector_1d_zeros_normalized = K.eval(l1_normalize(vector_1d_zeros,
vector_1d_zeros_mask))
assert_array_almost_equal(vector_1d_zeros_normalized,
numpy.array([[0.25, 0.25, 0.25, 0.25]]))
def test_masked_softmax_no_mask(self):
# Testing the general unmasked 1D case.
vector_1d = K.variable(numpy.array([[1.0, 2.0, 3.0]]))
vector_1d_softmaxed = K.eval(masked_softmax(vector_1d, None))
assert_array_almost_equal(vector_1d_softmaxed,
numpy.array([[0.090031, 0.244728, 0.665241]]))
assert_almost_equal(1.0, numpy.sum(vector_1d_softmaxed), decimal=6)
vector_1d = K.variable(numpy.array([[1.0, 2.0, 5.0]]))
vector_1d_softmaxed = K.eval(masked_softmax(vector_1d, None))
assert_array_almost_equal(vector_1d_softmaxed,
numpy.array([[0.017148, 0.046613, 0.93624]]))
# Testing the unmasked 1D case where the input is all 0s.
vector_zero = K.variable(numpy.array([[0.0, 0.0, 0.0]]))
vector_zero_softmaxed = K.eval(masked_softmax(vector_zero, None))
assert_array_almost_equal(vector_zero_softmaxed,
numpy.array([[0.33333334, 0.33333334, 0.33333334]]))
# Testing the general unmasked batched case.
matrix = K.variable(numpy.array([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]]))
masked_matrix_softmaxed = K.eval(masked_softmax(matrix, None))
assert_array_almost_equal(masked_matrix_softmaxed,
numpy.array([[0.01714783, 0.04661262, 0.93623955],
[0.09003057, 0.24472847, 0.66524096]]))
# Testing the unmasked batched case where one of the inputs are all 0s.
matrix = K.variable(numpy.array([[1.0, 2.0, 5.0], [0.0, 0.0, 0.0]]))
masked_matrix_softmaxed = K.eval(masked_softmax(matrix, None))
assert_array_almost_equal(masked_matrix_softmaxed,
numpy.array([[0.01714783, 0.04661262, 0.93623955],
[0.33333334, 0.33333334, 0.33333334]]))
def test_masked_softmax_masked(self):
# Testing the general masked 1D case.
vector_1d = K.variable(numpy.array([[1.0, 2.0, 5.0]]))
mask_1d = K.variable(numpy.array([[1.0, 0.0, 1.0]]))
vector_1d_softmaxed = K.eval(masked_softmax(vector_1d, mask_1d))
assert_array_almost_equal(vector_1d_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382]]))
vector_1d = K.variable(numpy.array([[0.0, 2.0, 3.0, 4.0]]))
mask_1d = K.variable(numpy.array([[1.0, 0.0, 1.0, 1.0]]))
vector_1d_softmaxed = K.eval(masked_softmax(vector_1d, mask_1d))
assert_array_almost_equal(vector_1d_softmaxed,
numpy.array([[0.01321289, 0.0,
0.26538793, 0.72139918]]))
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = K.variable(numpy.array([[0.0, 0.0, 0.0, 0.0]]))
mask_1d = K.variable(numpy.array([[0.0, 0.0, 0.0, 1.0]]))
vector_1d_softmaxed = K.eval(masked_softmax(vector_1d, mask_1d))
assert_array_almost_equal(vector_1d_softmaxed,
numpy.array([[0, 0, 0, 1]]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = K.variable(numpy.array([[0.0, 2.0, 3.0, 4.0]]))
mask_1d = K.variable(numpy.array([[0.0, 0.0, 0.0, 0.0]]))
vector_1d_softmaxed = K.eval(masked_softmax(vector_1d, mask_1d))
assert_array_almost_equal(vector_1d_softmaxed,
numpy.array([[0.0, 0.0,
0.0, 0.0]]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = K.variable(numpy.array([[0.0, 0.0, 0.0, 0.0]]))
mask_1d = K.variable(numpy.array([[0.0, 0.0, 0.0, 0.0]]))
vector_1d_softmaxed = K.eval(masked_softmax(vector_1d, mask_1d))
assert_array_almost_equal(vector_1d_softmaxed,
numpy.array([[0.0, 0.0,
0.0, 0.0]]))
# Testing the general masked batched case.
matrix = K.variable(numpy.array([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]]))
mask = K.variable(numpy.array([[1.0, 0.0, 1.0], [1.0, 1.0, 1.0]]))
masked_matrix_softmaxed = K.eval(masked_softmax(matrix, mask))
assert_array_almost_equal(masked_matrix_softmaxed,
numpy.array([[0.01798621, 0.0, 0.98201382],
[0.090031, 0.244728, 0.665241]]))
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = K.variable(numpy.array([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]))
mask = K.variable(numpy.array([[1.0, 0.0, 1.0], [1.0, 1.0, 1.0]]))
masked_matrix_softmaxed = K.eval(masked_softmax(matrix, mask))
assert_array_almost_equal(masked_matrix_softmaxed,
numpy.array([[0.5, 0.0, 0.5],
[0.090031, 0.244728, 0.665241]]))
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = K.variable(numpy.array([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]))
mask = K.variable(numpy.array([[1.0, 0.0, 1.0], [0.0, 0.0, 0.0]]))
masked_matrix_softmaxed = K.eval(masked_softmax(matrix, mask))
assert_array_almost_equal(masked_matrix_softmaxed,
numpy.array([[0.5, 0.0, 0.5],
[0.0, 0.0, 0.0]]))
matrix = K.variable(numpy.array([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]))
mask = K.variable(numpy.array([[0.0, 0.0, 0.0], [1.0, 0.0, 1.0]]))
masked_matrix_softmaxed = K.eval(masked_softmax(matrix, mask))
assert_array_almost_equal(masked_matrix_softmaxed,
numpy.array([[0.0, 0.0, 0.0],
[0.11920292, 0.0, 0.88079708]]))
| deep_qa-master | tests/tensors/masked_operations_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from deep_qa.tensors.backend import hardmax
from deep_qa.testing.test_case import DeepQaTestCase
from keras import backend as K
class TestBackendTensorFunctions(DeepQaTestCase):
def test_hardmax(self):
batch_size = 3
knowledge_length = 5
unnormalized_attention = K.variable(numpy.random.rand(batch_size, knowledge_length))
hardmax_output = hardmax(unnormalized_attention, knowledge_length)
input_value = K.eval(unnormalized_attention)
output_value = K.eval(hardmax_output)
assert output_value.shape == (batch_size, knowledge_length) # pylint: disable=no-member
# Assert all elements other than the ones are zeros
assert numpy.count_nonzero(output_value) == batch_size
# Assert the max values in all rows are ones
assert numpy.all(numpy.equal(numpy.max(output_value, axis=1),
numpy.ones((batch_size,))))
# Assert ones are in the right places
assert numpy.all(numpy.equal(numpy.argmax(output_value, axis=1),
numpy.argmax(input_value, axis=1)))
| deep_qa-master | tests/tensors/backend_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import keras.backend as K
from deep_qa.tensors.similarity_functions.dot_product import DotProduct
class TestDotProductSimilarityFunction:
dot_product = DotProduct(name='dot_product')
def test_initialize_weights_returns_empty(self):
weights = self.dot_product.initialize_weights(3, 3)
assert isinstance(weights, list) and len(weights) == 0
def test_compute_similarity_does_a_dot_product(self):
a_vectors = numpy.asarray([[1, 1, 1], [-1, -1, -1]])
b_vectors = numpy.asarray([[1, 0, 1], [1, 0, 0]])
result = K.eval(self.dot_product.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (2,)
assert numpy.all(result == [2, -1])
def test_compute_similarity_works_with_higher_order_tensors(self):
a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
result = K.eval(self.dot_product.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (5, 4, 3, 6)
assert_almost_equal(result[3, 2, 1, 3],
numpy.dot(a_vectors[3, 2, 1, 3], b_vectors[3, 2, 1, 3]),
decimal=6)
| deep_qa-master | tests/tensors/similarity_functions/dot_product_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import keras.backend as K
from deep_qa.tensors.similarity_functions.linear import Linear
class TestLinearSimilarityFunction:
def test_initialize_weights_returns_correct_weight_sizes(self):
linear = Linear(name='linear', combination='x,y')
weights = linear.initialize_weights(3, 6)
assert isinstance(weights, list) and len(weights) == 2
weight_vector, bias = weights
assert K.int_shape(weight_vector) == (9, 1)
assert K.int_shape(bias) == (1,)
def test_compute_similarity_does_a_weighted_product(self):
linear = Linear(name='linear', combination='x,y')
linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5], [2.0], [-1.0]]))
linear.bias = K.variable(numpy.asarray([.1]))
a_vectors = numpy.asarray([[[1, 1, 1], [-1, -1, 0]]])
b_vectors = numpy.asarray([[[0], [1]]])
result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (1, 2,)
assert_almost_equal(result, [[2.3, -1.1]])
def test_compute_similarity_works_with_higher_order_tensors(self):
linear = Linear(name='linear', combination='x,y')
weights = numpy.random.rand(14, 1)
linear.weight_vector = K.variable(weights)
linear.bias = K.variable(numpy.asarray([0]))
a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (5, 4, 3, 6)
combined_vectors = numpy.concatenate([a_vectors[3, 2, 1, 3, :], b_vectors[3, 2, 1, 3, :]])
expected_result = numpy.dot(combined_vectors, weights)
assert_almost_equal(result[3, 2, 1, 3], expected_result, decimal=6)
def test_compute_similarity_works_with_multiply_combinations(self):
linear = Linear(name='linear', combination='x*y')
linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5]]))
linear.bias = K.variable(numpy.asarray([0]))
a_vectors = numpy.asarray([[1, 1], [-1, -1]])
b_vectors = numpy.asarray([[1, 0], [0, 1]])
result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (2,)
assert_almost_equal(result, [-.3, -.5])
def test_compute_similarity_works_with_divide_combinations(self):
linear = Linear(name='linear', combination='x/y')
linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5]]))
linear.bias = K.variable(numpy.asarray([0]))
a_vectors = numpy.asarray([[1, 1], [-1, -1]])
b_vectors = numpy.asarray([[1, 2], [2, 1]])
result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (2,)
assert_almost_equal(result, [-.05, -.35])
def test_compute_similarity_works_with_add_combinations(self):
linear = Linear(name='linear', combination='x+y')
linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5]]))
linear.bias = K.variable(numpy.asarray([0]))
a_vectors = numpy.asarray([[1, 1], [-1, -1]])
b_vectors = numpy.asarray([[1, 0], [0, 1]])
result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (2,)
assert_almost_equal(result, [-.1, .3])
def test_compute_similarity_works_with_subtract_combinations(self):
linear = Linear(name='linear', combination='x-y')
linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5]]))
linear.bias = K.variable(numpy.asarray([0]))
a_vectors = numpy.asarray([[1, 1], [-1, -1]])
b_vectors = numpy.asarray([[1, 0], [0, 1]])
result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (2,)
assert_almost_equal(result, [.5, -.7])
| deep_qa-master | tests/tensors/similarity_functions/linear_test.py |
deep_qa-master | tests/tensors/similarity_functions/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import keras.backend as K
from deep_qa.tensors.similarity_functions.cosine_similarity import CosineSimilarity
from deep_qa.tensors.similarity_functions.dot_product import DotProduct
class TestCosineSimilarityFunction:
cosine_similarity = CosineSimilarity(name='cosine_similarity')
dot_product = DotProduct(name="dot_product")
def test_initialize_weights_returns_empty(self):
weights = self.cosine_similarity.initialize_weights(3, 3)
assert isinstance(weights, list) and len(weights) == 0
def test_compute_similarity_does_a_cosine_similarity(self):
a_vectors = numpy.asarray([[numpy.random.random(3) for _ in range(2)]], dtype="float32")
b_vectors = numpy.asarray([[numpy.random.random(3) for _ in range(2)]], dtype="float32")
normed_a = K.l2_normalize(K.variable(a_vectors), axis=-1)
normed_b = K.l2_normalize(K.variable(b_vectors), axis=-1)
desired_result = K.eval(self.dot_product.compute_similarity(normed_a, normed_b))
result = K.eval(self.cosine_similarity.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (1, 2) # batch_size = 1
assert numpy.all(result == desired_result)
def test_compute_similarity_works_with_higher_order_tensors(self):
a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
normed_a = K.eval(K.l2_normalize(K.variable(a_vectors), axis=-1))
normed_b = K.eval(K.l2_normalize(K.variable(b_vectors), axis=-1))
result = K.eval(self.cosine_similarity.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (5, 4, 3, 6)
assert_almost_equal(result[3, 2, 1, 3],
numpy.dot(normed_a[3, 2, 1, 3], normed_b[3, 2, 1, 3]),
decimal=6)
| deep_qa-master | tests/tensors/similarity_functions/cosine_similarity_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import keras.backend as K
from deep_qa.tensors.similarity_functions.bilinear import Bilinear
class TestBilinearSimilarityFunction:
def test_initialize_weights_returns_correct_weight_sizes(self):
bilinear = Bilinear(name='bilinear')
weights = bilinear.initialize_weights(3, 3)
assert isinstance(weights, list) and len(weights) == 2
weight_vector, bias = weights
assert K.int_shape(weight_vector) == (3, 3)
assert K.int_shape(bias) == (1,)
weights = bilinear.initialize_weights(2, 5)
assert isinstance(weights, list) and len(weights) == 2
weight_vector, bias = weights
assert K.int_shape(weight_vector) == (2, 5)
assert K.int_shape(bias) == (1,)
def test_compute_similarity_does_a_bilinear_product(self):
bilinear = Bilinear(name='bilinear')
weights = numpy.asarray([[-.3, .5], [2.0, -1.0]])
bilinear.weight_matrix = K.variable(weights)
bilinear.bias = K.variable(numpy.asarray([.1]))
a_vectors = numpy.asarray([[1, 1], [-1, -1]])
b_vectors = numpy.asarray([[1, 0], [0, 1]])
result = K.eval(bilinear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (2,)
assert_almost_equal(result, [1.8, .6])
def test_compute_similarity_works_with_higher_order_tensors(self):
bilinear = Bilinear(name='bilinear')
weights = numpy.random.rand(4, 7)
bilinear.weight_matrix = K.variable(weights)
bilinear.bias = K.variable(numpy.asarray([0]))
a_vectors = numpy.random.rand(5, 4, 3, 6, 4)
b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
result = K.eval(bilinear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (5, 4, 3, 6)
expected_result = numpy.dot(numpy.dot(numpy.transpose(a_vectors[3, 2, 1, 3]), weights),
b_vectors[3, 2, 1, 3])
assert_almost_equal(result[3, 2, 1, 3], expected_result, decimal=5)
| deep_qa-master | tests/tensors/similarity_functions/bilinear_test.py |
deep_qa-master | tests/models/__init__.py |
|
deep_qa-master | tests/models/sequence_tagging/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import numpy
from deep_qa.common.params import Params
from deep_qa.models.sequence_tagging import SimpleTagger
from deep_qa.testing.test_case import DeepQaTestCase
class TestSimpleTagger(DeepQaTestCase):
def test_trains_and_loads_correctly(self):
self.write_sequence_tagging_files()
args = Params({
'save_models': True,
'show_summary_with_masking_info': True,
'instance_type': 'PreTokenizedTaggingInstance',
'tokenizer': {'processor': {'word_splitter': 'no_op'}},
})
self.ensure_model_trains_and_loads(SimpleTagger, args)
def test_loss_function_uses_mask(self):
# We're going to make sure that the loss and accuracy computations are the same for any
# permutation of labels on padded tokens. If not, the loss/accuracy function is paying
# attention to the labels when it shouldn't be. We're not going to test for any particular
# accuracy value, just that all of them are the same - I ran this a few times by hand to be
# sure that we're getting different accuracy values, depending on the initialization.
self.write_sequence_tagging_files()
args = Params({
'show_summary_with_masking_info': True,
'instance_type': 'PreTokenizedTaggingInstance',
'tokenizer': {'processor': {'word_splitter': 'no_op'}},
})
model = self.get_model(SimpleTagger, args)
model.train()
input_indices = [3, 2, 0, 0]
labels = [[[0, 1], [1, 0], [1, 0], [1, 0]],
[[0, 1], [1, 0], [1, 0], [0, 1]],
[[0, 1], [1, 0], [0, 1], [1, 0]],
[[0, 1], [1, 0], [0, 1], [0, 1]]]
results = [model.model.evaluate(numpy.asarray([input_indices]), numpy.asarray([label]))
for label in labels]
loss, accuracy = zip(*results)
assert len(set(loss)) == 1
assert len(set(accuracy)) == 1
| deep_qa-master | tests/models/sequence_tagging/simple_tagger_test.py |
deep_qa-master | tests/models/entailment/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from deep_qa.common.params import Params
from deep_qa.models.entailment import DecomposableAttention
from deep_qa.testing.test_case import DeepQaTestCase
class TestDecomposableAttentionModel(DeepQaTestCase):
def test_trains_and_loads_correctly(self):
self.write_snli_files()
args = Params({
'num_seq2seq_layers': 1,
})
self.ensure_model_trains_and_loads(DecomposableAttention, args)
| deep_qa-master | tests/models/entailment/decomposable_attention_test.py |
deep_qa-master | tests/models/reading_comprehension/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from deep_qa.common.params import Params
from deep_qa.models.reading_comprehension import AttentionSumReader
from deep_qa.testing.test_case import DeepQaTestCase
class TestAttentionSumReader(DeepQaTestCase):
def test_train_does_not_crash_and_load_works(self):
self.write_who_did_what_files()
args = Params({
"encoder": {
"default": {
"type": "bi_gru",
"units": 7
}
},
"seq2seq_encoder": {
"default": {
"type": "bi_gru",
"encoder_params": {
"units": 7
},
"wrapper_params": {}
}
},
})
self.ensure_model_trains_and_loads(AttentionSumReader, args)
| deep_qa-master | tests/models/reading_comprehension/attention_sum_reader_test.py |
# pylint: disable=no-self-use,invalid-name
from deep_qa.common.params import Params
from deep_qa.models.reading_comprehension import GatedAttentionReader
from deep_qa.testing.test_case import DeepQaTestCase
class TestGatedAttention(DeepQaTestCase):
def test_cloze_train_does_not_crash(self):
self.write_who_did_what_files()
args = Params({
"qd_common_feature": True,
"gating_function": "+",
"cloze_token": "xxxxx",
"num_gated_attention_layers": 2,
"tokenizer": {
"type": "words and characters"
},
"encoder": {
"word": {
"type": "bi_gru",
"units": 2,
}
},
"seq2seq_encoder": {
"question_0": {
"type": "bi_gru",
"encoder_params": {
"units": 3
},
"wrapper_params": {}
},
"document_0": {
"type": "bi_gru",
"encoder_params": {
"units": 3
},
"wrapper_params": {}
},
"document_final": {
"type": "bi_gru",
"encoder_params": {
"units": 3
},
"wrapper_params": {}
},
"question_final": {
"type": "bi_gru",
"encoder_params": {
"units": 3
},
"wrapper_params": {
"merge_mode": None
}
}
},
})
model, loaded_model = self.ensure_model_trains_and_loads(GatedAttentionReader, args)
# verify that the gated attention function was set properly
assert model.gating_function == "+"
assert model.gating_function == model.model.get_layer("gated_attention_0").gating_function
# verify that the gated attention function was set properly in the loaded model
assert loaded_model.gating_function == "+"
assert loaded_model.gating_function == loaded_model.model.get_layer("gated_attention_0").gating_function
def test_non_cloze_train_does_not_crash(self):
self.write_who_did_what_files()
args = Params({
"qd_common_feature": True,
"num_gated_attention_layers": 2,
"gating_function": "+",
"tokenizer": {
"type": "words and characters"
},
"encoder": {
"word": {
"type": "bi_gru",
"units": 2,
},
"question_final": {
"type": "bi_gru",
"units": 3
}
},
"seq2seq_encoder": {
"question_0": {
"type": "bi_gru",
"encoder_params": {
"units": 3
},
"wrapper_params": {}
},
"document_0": {
"type": "bi_gru",
"encoder_params": {
"units": 3
},
"wrapper_params": {}
},
"document_final": {
"type": "bi_gru",
"encoder_params": {
"units": 3
},
"wrapper_params": {}
}
},
})
model, loaded_model = self.ensure_model_trains_and_loads(GatedAttentionReader, args)
# verify that the gated attention function was set properly
assert model.gating_function == "+"
assert model.gating_function == model.model.get_layer("gated_attention_0").gating_function
# verify that the gated attention function was set properly in the loaded model
assert loaded_model.gating_function == "+"
assert loaded_model.gating_function == loaded_model.model.get_layer("gated_attention_0").gating_function
| deep_qa-master | tests/models/reading_comprehension/gated_attention_reader_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from deep_qa.common.params import Params
from deep_qa.models.reading_comprehension import BidirectionalAttentionFlow
from deep_qa.testing.test_case import DeepQaTestCase
from flaky import flaky
class TestBidirectionalAttentionFlow(DeepQaTestCase):
@flaky
def test_trains_and_loads_correctly(self):
self.write_span_prediction_files()
args = Params({
'embeddings': {'words': {'dimension': 8}, 'characters': {'dimension': 4}},
'save_models': True,
'tokenizer': {'type': 'words and characters'},
'show_summary_with_masking_info': True,
})
model, _ = self.ensure_model_trains_and_loads(BidirectionalAttentionFlow, args)
for layer in model.model.layers:
if layer.name == 'characters_embedding':
assert layer.get_output_shape_at(0)[-1] == 4
break
else:
assert False, "couldn't find character embedding layer"
def test_get_best_span(self):
# Note that the best span cannot be (1, 0) since even though 0.3 * 0.5 is the greatest
# value, the end span index is constrained to occur after the begin span index.
span_begin_probs = numpy.array([0.1, 0.3, 0.05, 0.3, 0.25])
span_end_probs = numpy.array([0.5, 0.1, 0.2, 0.05, 0.15])
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs,
span_end_probs)
assert begin_end_idxs == (1, 2)
# Testing an edge case of the dynamic program here, for the order of when you update the
# best previous span position. We should not get (1, 1), because that's an empty span.
span_begin_probs = numpy.array([0.4, 0.5, 0.1])
span_end_probs = numpy.array([0.3, 0.6, 0.1])
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs,
span_end_probs)
assert begin_end_idxs == (0, 1)
# test higher-order input
# Note that the best span cannot be (1, 1) since even though 0.3 * 0.5 is the greatest
# value, the end span index is constrained to occur after the begin span index.
span_begin_probs = numpy.array([[0.1, 0.3, 0.05, 0.3, 0.25]])
span_end_probs = numpy.array([[0.1, 0.5, 0.2, 0.05, 0.15]])
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs,
span_end_probs)
assert begin_end_idxs == (1, 2)
| deep_qa-master | tests/models/reading_comprehension/bidirectional_attention_test.py |
deep_qa-master | tests/common/__init__.py |
|
from deep_qa.common.checks import ensure_pythonhashseed_set
def test_pythonhashseed():
ensure_pythonhashseed_set()
| deep_qa-master | tests/common/pythonhashseed_test.py |
# pylint: disable=no-self-use,invalid-name
from deep_qa.common import util
from deep_qa.testing.test_case import DeepQaTestCase
class TestCommonUtils(DeepQaTestCase):
def test_group_by_count(self):
assert util.group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 20) == [[1, 2, 3], [4, 5, 6], [7, 20, 20]]
| deep_qa-master | tests/common/test_util.py |
# pylint: disable=no-self-use,invalid-name
import gzip
import numpy
import pytest
from deep_qa.common.checks import ConfigurationError
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.embeddings import PretrainedEmbeddings
from deep_qa.models.text_classification import ClassificationModel
from deep_qa.testing.test_case import DeepQaTestCase
class TestPretrainedEmbeddings(DeepQaTestCase):
# pylint: disable=protected-access
def test_get_embedding_layer_uses_correct_embedding_dim(self):
data_indexer = DataIndexer()
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
assert embedding_layer.output_dim == 3
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0 3.1\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0 -1.2\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
assert embedding_layer.output_dim == 4
def test_get_embedding_layer_crashes_when_embedding_dim_is_one(self):
data_indexer = DataIndexer()
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("dimensionality 3\n".encode('utf-8'))
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode('utf-8'))
with pytest.raises(Exception):
PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
def test_get_embedding_layer_skips_inconsistent_lines(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word1")
data_indexer.add_word_to_index("word2")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 \n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
print(embedding_layer.weights)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word2")]
assert not numpy.allclose(word_vector[:2], numpy.asarray([0.1, 0.4]))
def test_get_embedding_layer_actually_initializes_word_vectors_correctly(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word")]
assert numpy.allclose(word_vector, numpy.asarray([1.0, 2.3, -1.0]))
def test_get_embedding_layer_initializes_unseen_words_randomly_not_zero(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word2")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word2")]
assert not numpy.allclose(word_vector, numpy.asarray([0.0, 0.0, 0.0]))
def test_embedding_will_not_project_random_embeddings(self):
self.write_pretrained_vector_files()
self.write_true_false_model_files()
with pytest.raises(ConfigurationError):
args = {
"embeddings": {
"words": {
"dimension": 5,
"project": True,
"fine_tune": True,
"dropout": 0.2
}
}
}
model = self.get_model(ClassificationModel, args)
model.train()
def test_projection_dim_not_equal_to_pretrained_dim_with_no_projection_flag_raises_error(self):
self.write_pretrained_vector_files()
self.write_true_false_model_files()
with pytest.raises(ConfigurationError):
args = {
"embeddings": {
"words": {
"dimension": 13,
"pretrained_file": self.PRETRAINED_VECTORS_GZIP,
"project": False,
"fine_tune": False,
"dropout": 0.2
}
}
}
model = self.get_model(ClassificationModel, args)
model.train()
| deep_qa-master | tests/data/embeddings_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from deep_qa.common.params import Params
from deep_qa.data import DataGenerator, IndexedDataset
from deep_qa.testing.test_case import DeepQaTestCase
class TestDataGenerator(DeepQaTestCase):
def setUp(self):
super(TestDataGenerator, self).setUp()
self.text_trainer = FakeTextTrainer()
self.instances = [
FakeInstance(0, 5, 3, 2),
FakeInstance(1, 4, 3, 2),
FakeInstance(2, 4, 1, 2),
FakeInstance(3, 9, 3, 2),
FakeInstance(4, 8, 3, 2),
FakeInstance(5, 2, 1, 2),
FakeInstance(6, 3, 3, 2),
FakeInstance(7, 3, 3, 3),
FakeInstance(8, 1, 1, 2),
FakeInstance(9, 1, 1, 3),
]
def test_instances_are_sorted_by_sorting_keys(self):
params = Params({
'dynamic_padding': True,
'padding_noise': 0.0,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 4
one_epoch_arrays = [next(batches) for _ in range(4)]
one_epoch_arrays.sort(key=lambda x: x[0][0])
assert self.as_list(one_epoch_arrays[0][0]) == [1, 0, 4]
assert self.as_list(one_epoch_arrays[1][0]) == [3]
assert self.as_list(one_epoch_arrays[2][0]) == [6, 7, 2]
assert self.as_list(one_epoch_arrays[3][0]) == [8, 9, 5]
def test_batches_are_consistent_with_no_repermuting(self):
params = Params({
'padding_noise': 0.0,
'sort_every_epoch': False,
'dynamic_padding': True,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 4
first_epoch_arrays = [next(batches) for _ in range(4)]
second_epoch_arrays = [next(batches) for _ in range(4)]
first_epoch_arrays.sort(key=lambda x: x[0][0])
second_epoch_arrays.sort(key=lambda x: x[0][0])
first_epoch = [self.as_list(x[0]) for x in first_epoch_arrays]
second_epoch = [self.as_list(x[0]) for x in second_epoch_arrays]
assert first_epoch == second_epoch
def test_biggest_batch_first(self):
params = Params({
'padding_noise': 0.0,
'dynamic_padding': True,
'biggest_batch_first': True,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
biggest_batches = [next(batches) for _ in range(2)]
assert self.as_list(biggest_batches[0][0]) == [3]
assert self.as_list(biggest_batches[1][0]) == [1, 0, 4]
def test_adaptive_grouping(self):
params = Params({
'padding_noise': 0.0,
'dynamic_padding': True,
'adaptive_batch_sizes': True,
'adaptive_memory_usage_constant': 130,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 4
one_epoch_arrays = [next(batches) for _ in range(4)]
one_epoch_arrays.sort(key=lambda x: x[0][0])
assert self.as_list(one_epoch_arrays[0][0]) == [0, 4]
assert self.as_list(one_epoch_arrays[1][0]) == [3]
assert self.as_list(one_epoch_arrays[2][0]) == [7, 2, 1]
assert self.as_list(one_epoch_arrays[3][0]) == [8, 9, 5, 6]
def test_sort_every_batch_actually_adds_noise_every_batch(self):
# We're just going to get two epoch's worth of batches, and make sure that they're
# different.
params = Params({
'padding_noise': 0.8,
'sort_every_epoch': True,
'dynamic_padding': True,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 4
first_epoch_arrays = [next(batches) for _ in range(4)]
second_epoch_arrays = [next(batches) for _ in range(4)]
first_epoch_arrays.sort(key=lambda x: x[0][0])
second_epoch_arrays.sort(key=lambda x: x[0][0])
first_epoch = [self.as_list(x[0]) for x in first_epoch_arrays]
second_epoch = [self.as_list(x[0]) for x in second_epoch_arrays]
assert first_epoch != second_epoch
def test_maximum_batch_size_is_actually_a_maximum(self):
params = Params({
'padding_noise': 0.0,
'dynamic_padding': True,
'adaptive_batch_sizes': True,
'adaptive_memory_usage_constant': 50,
'maximum_batch_size': 2,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 7
one_epoch_arrays = [next(batches) for _ in range(7)]
one_epoch_arrays.sort(key=lambda x: x[0][0])
print([self.as_list(x[0]) for x in one_epoch_arrays])
assert self.as_list(one_epoch_arrays[0][0]) == [0]
assert self.as_list(one_epoch_arrays[1][0]) == [2, 1]
assert self.as_list(one_epoch_arrays[2][0]) == [3]
assert self.as_list(one_epoch_arrays[3][0]) == [4]
assert self.as_list(one_epoch_arrays[4][0]) == [5, 6]
assert self.as_list(one_epoch_arrays[5][0]) == [7]
assert self.as_list(one_epoch_arrays[6][0]) == [8, 9]
def as_list(self, array):
return list(numpy.squeeze(array, axis=-1))
class FakeInstance:
def __init__(self, index, a_length, b_length, c_length):
self.index = index
self.a_length = a_length
self.b_length = b_length
self.c_length = c_length
def get_padding_lengths(self):
return {'a': self.a_length, 'b': self.b_length, 'c': self.c_length}
def pad(self, lengths):
pass
def as_training_data(self):
return numpy.asarray([self.index]), numpy.asarray([self.index])
class FakeTextTrainer:
batch_size = 3
a_length = None
b_length = None
c_length = None
def get_instance_sorting_keys(self):
return ['a', 'b', 'c']
def get_padding_lengths(self):
return {'a': self.a_length, 'b': self.b_length, 'c': self.c_length}
def get_padding_memory_scaling(self, lengths):
return lengths['a'] * lengths['b'] * lengths['c']
| deep_qa-master | tests/data/data_generator_test.py |
deep_qa-master | tests/data/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import codecs
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.datasets import TextDataset
from deep_qa.data.instances.text_classification.text_classification_instance import TextClassificationInstance
from deep_qa.testing.test_case import DeepQaTestCase
class TestDataIndexer(DeepQaTestCase):
def test_fit_word_dictionary_respects_min_count(self):
instance = TextClassificationInstance("a a a a b b c c c", True)
dataset = TextDataset([instance])
data_indexer = DataIndexer()
data_indexer.fit_word_dictionary(dataset, min_count=4)
assert 'a' in data_indexer.words_in_index()
assert 'b' not in data_indexer.words_in_index()
assert 'c' not in data_indexer.words_in_index()
data_indexer = DataIndexer()
data_indexer.fit_word_dictionary(dataset, min_count=1)
assert 'a' in data_indexer.words_in_index()
assert 'b' in data_indexer.words_in_index()
assert 'c' in data_indexer.words_in_index()
def test_add_word_to_index_gives_consistent_results(self):
data_indexer = DataIndexer()
initial_vocab_size = data_indexer.get_vocab_size()
word_index = data_indexer.add_word_to_index("word")
assert "word" in data_indexer.words_in_index()
assert data_indexer.get_word_index("word") == word_index
assert data_indexer.get_word_from_index(word_index) == "word"
assert data_indexer.get_vocab_size() == initial_vocab_size + 1
# Now add it again, and make sure nothing changes.
data_indexer.add_word_to_index("word")
assert "word" in data_indexer.words_in_index()
assert data_indexer.get_word_index("word") == word_index
assert data_indexer.get_word_from_index(word_index) == "word"
assert data_indexer.get_vocab_size() == initial_vocab_size + 1
def test_namespaces(self):
data_indexer = DataIndexer()
initial_vocab_size = data_indexer.get_vocab_size()
word_index = data_indexer.add_word_to_index("word", namespace='1')
assert "word" in data_indexer.words_in_index(namespace='1')
assert data_indexer.get_word_index("word", namespace='1') == word_index
assert data_indexer.get_word_from_index(word_index, namespace='1') == "word"
assert data_indexer.get_vocab_size(namespace='1') == initial_vocab_size + 1
# Now add it again, in a different namespace and a different word, and make sure it's like
# new.
word2_index = data_indexer.add_word_to_index("word2", namespace='2')
word_index = data_indexer.add_word_to_index("word", namespace='2')
assert "word" in data_indexer.words_in_index(namespace='2')
assert "word2" in data_indexer.words_in_index(namespace='2')
assert data_indexer.get_word_index("word", namespace='2') == word_index
assert data_indexer.get_word_index("word2", namespace='2') == word2_index
assert data_indexer.get_word_from_index(word_index, namespace='2') == "word"
assert data_indexer.get_word_from_index(word2_index, namespace='2') == "word2"
assert data_indexer.get_vocab_size(namespace='2') == initial_vocab_size + 2
def test_unknown_token(self):
# pylint: disable=protected-access
# We're putting this behavior in a test so that the behavior is documented. There is
# solver code that depends in a small way on how we treat the unknown token, so any
# breaking change to this behavior should break a test, so you know you've done something
# that needs more consideration.
data_indexer = DataIndexer()
oov_token = data_indexer._oov_token
oov_index = data_indexer.get_word_index(oov_token)
assert oov_index == 1
assert data_indexer.get_word_index("unseen word") == oov_index
def test_set_from_file(self):
# pylint: disable=protected-access
vocab_filename = self.TEST_DIR + 'vocab_file'
with codecs.open(vocab_filename, 'w', 'utf-8') as vocab_file:
vocab_file.write('<S>\n')
vocab_file.write('</S>\n')
vocab_file.write('<UNK>\n')
vocab_file.write('a\n')
vocab_file.write('word\n')
vocab_file.write('another\n')
data_indexer = DataIndexer()
data_indexer.set_from_file(vocab_filename, oov_token="<UNK>")
assert data_indexer._oov_token == "<UNK>"
assert data_indexer.get_word_index("random string") == 3
assert data_indexer.get_word_index("<S>") == 1
assert data_indexer.get_word_index("</S>") == 2
assert data_indexer.get_word_index("<UNK>") == 3
assert data_indexer.get_word_index("a") == 4
assert data_indexer.get_word_index("word") == 5
assert data_indexer.get_word_index("another") == 6
assert data_indexer.get_word_from_index(0) == data_indexer._padding_token
assert data_indexer.get_word_from_index(1) == "<S>"
assert data_indexer.get_word_from_index(2) == "</S>"
assert data_indexer.get_word_from_index(3) == "<UNK>"
assert data_indexer.get_word_from_index(4) == "a"
assert data_indexer.get_word_from_index(5) == "word"
assert data_indexer.get_word_from_index(6) == "another"
| deep_qa-master | tests/data/data_indexer_test.py |
# pylint: disable=no-self-use,invalid-name
from deep_qa.data.tokenizers.word_processor import WordProcessor
from deep_qa.common.params import Params
class TestWordProcessor:
def test_passes_through_correctly(self):
word_processor = WordProcessor(Params({}))
sentence = "this (sentence) has 'crazy' \"punctuation\"."
tokens = word_processor.get_tokens(sentence)
expected_tokens = ["this", "(", "sentence", ")", "has", "'", "crazy", "'", "\"",
"punctuation", "\"", "."]
assert tokens == expected_tokens
def test_stems_and_filters_correctly(self):
word_processor = WordProcessor(Params({'word_stemmer': 'porter', 'word_filter': 'stopwords'}))
sentence = "this (sentence) has 'crazy' \"punctuation\"."
expected_tokens = ["sentenc", "ha", "crazi", "punctuat"]
tokens = word_processor.get_tokens(sentence)
assert tokens == expected_tokens
| deep_qa-master | tests/data/tokenizers/word_processor_test.py |
# pylint: disable=no-self-use,invalid-name
from deep_qa.data.tokenizers.word_tokenizer import WordTokenizer
from deep_qa.common.params import Params
class TestTokenizer:
tokenizer = WordTokenizer(Params({}))
passage = "On January 7, 2012, Beyoncé gave birth to her first child, a daughter, Blue Ivy " +\
"Carter, at Lenox Hill Hospital in New York. Five months later, she performed for four " +\
"nights at Revel Atlantic City's Ovation Hall to celebrate the resort's opening, her " +\
"first performances since giving birth to Blue Ivy."
def test_char_span_to_token_span_handles_easy_cases(self):
# "January 7, 2012"
token_span = self.tokenizer.char_span_to_token_span(self.passage, (3, 18))
assert token_span == (1, 5)
# "Lenox Hill Hospital"
token_span = self.tokenizer.char_span_to_token_span(self.passage, (91, 110))
assert token_span == (22, 25)
# "Lenox Hill Hospital in New York."
token_span = self.tokenizer.char_span_to_token_span(self.passage, (91, 123))
assert token_span == (22, 29)
| deep_qa-master | tests/data/tokenizers/tokenizer_test.py |
# pylint: disable=no-self-use,invalid-name
from deep_qa.data.tokenizers.word_splitter import SimpleWordSplitter
from deep_qa.data.tokenizers.word_splitter import SpacyWordSplitter
class TestSimpleWordSplitter:
word_splitter = SimpleWordSplitter()
def test_tokenize_handles_complex_punctuation(self):
sentence = "this (sentence) has 'crazy' \"punctuation\"."
expected_tokens = ["this", "(", "sentence", ")", "has", "'", "crazy", "'", '"',
"punctuation", '"', "."]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
def test_tokenize_handles_contraction(self):
sentence = "it ain't joe's problem; would've been yesterday"
expected_tokens = ["it", "ai", "n't", "joe", "'s", "problem", ";", "would", "'ve", "been",
"yesterday"]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
def test_tokenize_handles_multiple_contraction(self):
sentence = "wouldn't've"
expected_tokens = ["would", "n't", "'ve"]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
def test_tokenize_handles_final_apostrophe(self):
sentence = "the jones' house"
expected_tokens = ["the", "jones", "'", "house"]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
def test_tokenize_handles_special_cases(self):
sentence = "mr. and mrs. jones, etc., went to, e.g., the store"
expected_tokens = ["mr.", "and", "mrs.", "jones", ",", "etc.", ",", "went", "to", ",",
"e.g.", ",", "the", "store"]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
class TestSpacyWordSplitter:
word_splitter = SpacyWordSplitter()
def test_tokenize_handles_complex_punctuation(self):
sentence = "this (sentence) has 'crazy' \"punctuation\"."
expected_tokens = ["this", "(", "sentence", ")", "has", "'", "crazy", "'", '"',
"punctuation", '"', "."]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
def test_tokenize_handles_contraction(self):
# note that "would've" is kept together, while "ain't" is not.
sentence = "it ain't joe's problem; would've been yesterday"
expected_tokens = ["it", "ai", "n't", "joe", "'s", "problem", ";", "would've", "been",
"yesterday"]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
def test_tokenize_handles_multiple_contraction(self):
sentence = "wouldn't've"
expected_tokens = ["would", "n't", "'ve"]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
def test_tokenize_handles_final_apostrophe(self):
sentence = "the jones' house"
expected_tokens = ["the", "jones", "'", "house"]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
def test_tokenize_handles_special_cases(self):
# note that the etc. doesn't quite work --- we can special case this if we want.
sentence = "Mr. and Mrs. Jones, etc., went to, e.g., the store"
expected_tokens = ["mr.", "and", "mrs.", "jones", ",", "etc", ".", ",", "went", "to", ",",
"e.g.", ",", "the", "store"]
tokens = self.word_splitter.split_words(sentence)
assert tokens == expected_tokens
| deep_qa-master | tests/data/tokenizers/word_splitter_test.py |
deep_qa-master | tests/data/dataset_readers/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import random
from os.path import join
import numpy
from deep_qa.data.dataset_readers.squad_sentence_selection_reader import SquadSentenceSelectionReader
from deep_qa.testing.test_case import DeepQaTestCase
from overrides import overrides
class TestSquadSentenceSelectionReader(DeepQaTestCase):
@overrides
def setUp(self):
super(TestSquadSentenceSelectionReader, self).setUp()
# write a SQuAD json file.
# pylint: disable=bad-continuation
self.sentences = [
"Architecturally, the school has a Catholic character.",
"Atop the Main Building's gold dome is a golden statue of the Virgin Mary.",
"Immediately in front of the Main Building and facing it, is a copper statue of "
"Christ with arms upraised with the legend \\\"Venite Ad Me Omnes\\\".",
"Next to the Main Building is the Basilica of the Sacred Heart.",
"Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection.",
"It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly "
"appeared to Saint Bernadette Soubirous in 1858.",
"At the end of the main drive (and in a direct line that connects through 3 "
"statues and the Gold Dome), is a simple, modern stone statue of Mary.",
"This is another sentence.",
"And another one.",
"Yet another sentence 1.",
"Yet another sentence 2.",
"Yet another sentence 3.",
"Yet another sentence 4.",
"Yet another sentence 5.",
]
# pylint: enable=bad-continuation
self.passage1 = " ".join(self.sentences[:7])
self.passage2 = " ".join(self.sentences[7:])
self.question0 = "To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?"
self.question1 = "What is in front of the Notre Dame Main Building?"
self.questions = [self.question0, self.question1]
json_string = """
{
"data":[
{
"title":"University_of_Notre_Dame",
"paragraphs":[
{
"context":"%s",
"qas":[
{
"answers":[
{
"answer_start":515,
"text":"Saint Bernadette Soubirous"
}
],
"question":"%s",
"id":"5733be284776f41900661182"
},
{
"answers":[
{
"answer_start":188,
"text":"a copper statue of Christ"
}
],
"question":"%s",
"id":"5733be284776f4190066117f"
}
]
},
{
"context":"%s",
"qas":[ ]
}
]
}
]
}
""" % (self.passage1, self.question0, self.question1, self.passage2)
with open(self.TEST_DIR + "squad_data.json", "w") as f:
f.write(json_string)
random.seed(1337)
numpy.random.seed(1337)
def test_reader_should_shuffle_consistently_with_the_same_seed(self):
random.seed(1337)
numpy.random.seed(1337)
reader = SquadSentenceSelectionReader()
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
random.seed(1337)
numpy.random.seed(1337)
reader = SquadSentenceSelectionReader()
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines2 = []
for line in generated_file:
lines2.append(line.strip())
assert lines == lines2
def test_default_squad_sentence_selection_reader(self):
# Note that the ordering of these sentences depends on a a particular shuffling of the
# data (and thus the random seed set above), and could change if you change the number of
# shuffles done in the code. Sorry.
context0 = "###".join(self.sentences[i] for i in [2, 4, 1, 3, 0, 5, 6]).replace("\\\"", "\"")
index0 = "5"
expected_line0 = self.question0 + "\t" + context0 + "\t" + index0
context1 = "###".join(self.sentences[i] for i in [0, 3, 4, 6, 2, 1, 5]).replace("\\\"", "\"")
index1 = "4"
expected_line1 = self.question1 + "\t" + context1 + "\t" + index1
reader = SquadSentenceSelectionReader()
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
assert expected_line0 == lines[0]
assert expected_line1 == lines[1]
def test_negative_sentence_choices_all_work(self):
# We're going to make sure that the other negative sentence selection methods don't crash
# here; that's about it.
# Note that the ordering of these sentences depends on a a particular shuffling of the
# data (and thus the random seed set above), and could change if you change the number of
# shuffles done in the code.
context0 = "###".join(self.sentences[i] for i in [3, 4, 0, 13, 5, 9]).replace("\\\"", "\"")
index0 = "4"
expected_line0 = self.question0 + "\t" + context0 + "\t" + index0
context1 = "###".join(self.sentences[i] for i in [4, 1, 9, 2, 7, 12]).replace("\\\"", "\"")
index1 = "3"
expected_line1 = self.question1 + "\t" + context1 + "\t" + index1
reader = SquadSentenceSelectionReader(negative_sentence_selection="random-2,pad-to-5")
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
assert expected_line0 == lines[0]
assert expected_line1 == lines[1]
def test_negative_question_choice_works(self):
# We're going to make sure that the other negative sentence selection methods don't crash
# here; that's about it.
context0 = "###".join([self.question0, self.sentences[5]]).replace("\\\"", "\"")
index0 = "1"
expected_line0 = self.question0 + "\t" + context0 + "\t" + index0
context1 = "###".join([self.sentences[2], self.question1]).replace("\\\"", "\"")
index1 = "0"
expected_line1 = self.question1 + "\t" + context1 + "\t" + index1
reader = SquadSentenceSelectionReader(negative_sentence_selection="question")
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
assert expected_line0 == lines[0]
assert expected_line1 == lines[1]
def test_negative_random_question_choice_works(self):
# We're going to make sure that the other negative sentence selection methods don't crash
# here; that's about it.
context0 = "###".join([self.question0, self.question1, self.sentences[5]]).replace("\\\"", "\"")
index0 = "2"
expected_line0 = self.question0 + "\t" + context0 + "\t" + index0
context1 = "###".join([self.question1, self.question0, self.sentences[2]]).replace("\\\"", "\"")
index1 = "2"
expected_line1 = self.question1 + "\t" + context1 + "\t" + index1
reader = SquadSentenceSelectionReader(negative_sentence_selection="questions-random-2")
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
assert expected_line0 == lines[0]
assert expected_line1 == lines[1]
| deep_qa-master | tests/data/dataset_readers/squad_sentence_selection_reader_test.py |
# pylint: disable=no-self-use,invalid-name
from deep_qa.data.datasets import SnliDataset
from deep_qa.data.instances.entailment.snli_instance import SnliInstance
from deep_qa.testing.test_case import DeepQaTestCase
class TestSnliDataset(DeepQaTestCase):
def setUp(self):
super(TestSnliDataset, self).setUp()
self.write_original_snli_data()
def test_read_from_file(self):
dataset = SnliDataset.read_from_file(self.TRAIN_FILE, SnliInstance)
instance1 = SnliInstance("A person on a horse jumps over a broken down airplane.",
"A person is training his horse for a competition.",
"neutral")
instance2 = SnliInstance("A person on a horse jumps over a broken down airplane.",
"A person is at a diner, ordering an omelette.",
"contradicts")
instance3 = SnliInstance("A person on a horse jumps over a broken down airplane.",
"A person is outdoors, on a horse.",
"entails")
assert len(dataset.instances) == 3
instance = dataset.instances[0]
assert instance.index == instance1.index
assert instance.first_sentence == instance1.first_sentence
assert instance.second_sentence == instance1.second_sentence
assert instance.label == instance1.label
instance = dataset.instances[1]
assert instance.index == instance2.index
assert instance.first_sentence == instance2.first_sentence
assert instance.second_sentence == instance2.second_sentence
assert instance.label == instance2.label
instance = dataset.instances[2]
assert instance.index == instance3.index
assert instance.first_sentence == instance3.first_sentence
assert instance.second_sentence == instance3.second_sentence
assert instance.label == instance3.label
| deep_qa-master | tests/data/datasets/snli_dataset_test.py |
deep_qa-master | tests/data/datasets/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from deep_qa.data.datasets.dataset import Dataset, TextDataset
from deep_qa.data.instances.text_classification.text_classification_instance import TextClassificationInstance
from deep_qa.testing.test_case import DeepQaTestCase
class TestDataset:
def test_merge(self):
instances = [TextClassificationInstance("testing", None, None),
TextClassificationInstance("testing1", None, None)]
dataset1 = Dataset(instances[:1])
dataset2 = Dataset(instances[1:])
merged = dataset1.merge(dataset2)
assert merged.instances == instances
class TestTextDataset(DeepQaTestCase):
def test_read_from_file_with_no_default_label(self):
filename = self.TEST_DIR + 'test_dataset_file'
with open(filename, 'w') as datafile:
datafile.write("1\tinstance1\t0\n")
datafile.write("2\tinstance2\t1\n")
datafile.write("3\tinstance3\n")
dataset = TextDataset.read_from_file(filename, TextClassificationInstance)
assert len(dataset.instances) == 3
instance = dataset.instances[0]
assert instance.index == 1
assert instance.text == "instance1"
assert instance.label is False
instance = dataset.instances[1]
assert instance.index == 2
assert instance.text == "instance2"
assert instance.label is True
instance = dataset.instances[2]
assert instance.index == 3
assert instance.text == "instance3"
assert instance.label is None
| deep_qa-master | tests/data/datasets/dataset_test.py |
# pylint: disable=no-self-use,invalid-name
from deep_qa.common.params import Params
from deep_qa.data.datasets import LanguageModelingDataset
from deep_qa.data.instances.language_modeling.sentence_instance import SentenceInstance
from deep_qa.testing.test_case import DeepQaTestCase
class TestLanguageModellingDataset(DeepQaTestCase):
def setUp(self):
super(TestLanguageModellingDataset, self).setUp()
self.write_sentence_data()
def test_read_from_file(self):
args = Params({"sequence_length": 4})
dataset = LanguageModelingDataset.read_from_file(self.TRAIN_FILE, SentenceInstance, args)
instances = dataset.instances
assert instances[0].text == "This is a sentence"
assert instances[1].text == "for language modelling. Here's"
assert instances[2].text == "another one for language"
| deep_qa-master | tests/data/datasets/language_modeling_dataset_test.py |
# pylint: disable=no-self-use,invalid-name
from deep_qa.common.params import Params
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.instances.instance import TextInstance
from deep_qa.data.instances.text_classification import IndexedTextClassificationInstance
from deep_qa.data.instances.text_classification import TextClassificationInstance
from deep_qa.data.tokenizers import tokenizers
from deep_qa.testing.test_case import DeepQaTestCase
class TestTextInstance(DeepQaTestCase):
"""
The point of this test class is to test the TextEncoder used by the TextInstance, to be sure
that we get what we expect when using character encoders, or word-and-character encoders.
"""
def tearDown(self):
super(TestTextInstance, self).tearDown()
TextInstance.tokenizer = tokenizers['words'](Params({}))
def test_words_tokenizes_the_sentence_correctly(self):
t = TextClassificationInstance("This is a sentence.", None)
assert t.words() == {'words': ['this', 'is', 'a', 'sentence', '.']}
TextInstance.tokenizer = tokenizers['characters'](Params({}))
assert t.words() == {'words': ['T', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 's',
'e', 'n', 't', 'e', 'n', 'c', 'e', '.']}
TextInstance.tokenizer = tokenizers['words and characters'](Params({}))
assert t.words() == {'words': ['this', 'is', 'a', 'sentence', '.'],
'characters': ['t', 'h', 'i', 's', 'i', 's', 'a', 's', 'e', 'n', 't',
'e', 'n', 'c', 'e', '.']}
def test_to_indexed_instance_converts_correctly(self):
data_indexer = DataIndexer()
sentence_index = data_indexer.add_word_to_index("sentence", namespace='words')
capital_a_index = data_indexer.add_word_to_index("A", namespace='words')
space_index = data_indexer.add_word_to_index(" ", namespace='words')
a_index = data_indexer.add_word_to_index("a", namespace='words')
s_index = data_indexer.add_word_to_index("s", namespace='words')
e_index = data_indexer.add_word_to_index("e", namespace='words')
n_index = data_indexer.add_word_to_index("n", namespace='words')
t_index = data_indexer.add_word_to_index("t", namespace='words')
c_index = data_indexer.add_word_to_index("c", namespace='words')
a_char_index = data_indexer.add_word_to_index("a", namespace='characters')
s_char_index = data_indexer.add_word_to_index("s", namespace='characters')
e_char_index = data_indexer.add_word_to_index("e", namespace='characters')
n_char_index = data_indexer.add_word_to_index("n", namespace='characters')
t_char_index = data_indexer.add_word_to_index("t", namespace='characters')
c_char_index = data_indexer.add_word_to_index("c", namespace='characters')
instance = TextClassificationInstance("A sentence", None).to_indexed_instance(data_indexer)
assert instance.word_indices == [a_index, sentence_index]
TextInstance.tokenizer = tokenizers['characters'](Params({}))
instance = TextClassificationInstance("A sentence", None).to_indexed_instance(data_indexer)
assert instance.word_indices == [capital_a_index, space_index, s_index, e_index, n_index, t_index,
e_index, n_index, c_index, e_index]
TextInstance.tokenizer = tokenizers['words and characters'](Params({}))
instance = TextClassificationInstance("A sentence", None).to_indexed_instance(data_indexer)
assert instance.word_indices == [[a_index, a_char_index],
[sentence_index, s_char_index, e_char_index, n_char_index,
t_char_index, e_char_index, n_char_index, c_char_index,
e_char_index]]
class TestIndexedInstance(DeepQaTestCase):
def test_get_padding_lengths_works_with_words_and_characters(self):
instance = IndexedTextClassificationInstance([[1, 2], [3, 1, 2]], True)
assert instance.get_padding_lengths() == {'num_sentence_words': 2, 'num_word_characters': 3}
def test_pad_word_sequence_handles_words_and_characters_less(self):
instance = IndexedTextClassificationInstance([[1, 2], [3, 1, 2]], True)
padded = instance.pad_word_sequence(instance.word_indices,
{'num_sentence_words': 3, 'num_word_characters': 4})
assert padded == [[0, 0, 0, 0], [1, 2, 0, 0], [3, 1, 2, 0]]
def test_pad_word_sequence_handles_words_and_characters_greater(self):
instance = IndexedTextClassificationInstance([[1, 2], [3, 1, 2]], True)
padded = instance.pad_word_sequence(instance.word_indices,
{'num_sentence_words': 5, 'num_word_characters': 4})
assert padded == [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 2, 0, 0], [3, 1, 2, 0]]
| deep_qa-master | tests/data/instances/text_instance_test.py |
deep_qa-master | tests/data/instances/__init__.py |
|
deep_qa-master | tests/data/instances/sequence_tagging/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from deep_qa.data.instances.sequence_tagging.tagging_instance import IndexedTaggingInstance
from deep_qa.testing.test_case import DeepQaTestCase
from numpy.testing import assert_array_almost_equal
class TestIndexedTaggingInstance(DeepQaTestCase):
def setUp(self):
super(TestIndexedTaggingInstance, self).setUp()
self.instance = IndexedTaggingInstance([1, 2, 3, 4], [4, 5, 6])
def test_get_padding_lengths_returns_correct_lengths(self):
assert self.instance.get_padding_lengths() == {'num_sentence_words': 4}
def test_pad_truncates_correctly(self):
self.instance.pad({'num_sentence_words': 2})
assert self.instance.text_indices == [1, 2]
def test_pad_adds_padding_correctly(self):
self.instance.pad({'num_sentence_words': 6})
assert self.instance.text_indices == [1, 2, 3, 4, 0, 0]
def test_as_training_data_produces_correct_arrays(self):
text_array, label_array = self.instance.as_training_data()
assert_array_almost_equal(text_array, [1, 2, 3, 4])
assert_array_almost_equal(label_array, [4, 5, 6])
| deep_qa-master | tests/data/instances/sequence_tagging/test_tagging_instance.py |
# pylint: disable=no-self-use,invalid-name
from typing import List
from deep_qa.common.params import Params
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.instances.instance import TextInstance
from deep_qa.data.instances.sequence_tagging.pretokenized_tagging_instance import PreTokenizedTaggingInstance
from deep_qa.data.tokenizers import tokenizers
from deep_qa.testing.test_case import DeepQaTestCase
from numpy.testing import assert_array_almost_equal
class TestPreTokenizedTaggingInstance(DeepQaTestCase):
def setUp(self):
super(TestPreTokenizedTaggingInstance, self).setUp()
tokens = ["cats", "are", "animals", "."]
tags = ["N", "V", "N", "."]
self.instance = PreTokenizedTaggingInstance(tokens, tags)
TextInstance.tokenizer = tokenizers['words'](Params({'processor': {'word_splitter': 'no_op'}}))
def tearDown(self):
super(TestPreTokenizedTaggingInstance, self).tearDown()
TextInstance.tokenizer = tokenizers['words'](Params({}))
@staticmethod
def instance_to_line(tokens: List[str], tags: List[str], index=None):
line = ''
if index is not None:
line += str(index) + '\t'
tagged_tokens = [token + '###' + tag for token, tag in zip(tokens, tags)]
line += '\t'.join(tagged_tokens)
return line
def test_read_from_line_handles_example_indices(self):
tokens = ["cats", "are", "animals", "."]
tags = ["N", "V", "N", "."]
index = 4
line = self.instance_to_line(tokens, tags, index)
instance = PreTokenizedTaggingInstance.read_from_line(line)
assert instance.text == tokens
assert instance.label == tags
assert instance.index == index
def test_read_from_line_handles_no_indices(self):
tokens = ["cats", "are", "animals", "."]
tags = ["N", "V", "N", "."]
index = None
line = self.instance_to_line(tokens, tags, index)
instance = PreTokenizedTaggingInstance.read_from_line(line)
assert instance.text == tokens
assert instance.label == tags
assert instance.index == index
def test_read_from_line_handles_hashes_in_words(self):
tokens = ["######", "#", "###", "#"]
tags = ["A", "B", "C", "D"]
index = None
line = self.instance_to_line(tokens, tags, index)
instance = PreTokenizedTaggingInstance.read_from_line(line)
assert instance.text == tokens
assert instance.label == tags
assert instance.index == index
def test_to_indexed_instance_converts_correctly(self):
data_indexer = DataIndexer()
cats_index = data_indexer.add_word_to_index("cats")
are_index = data_indexer.add_word_to_index("are")
animals_index = data_indexer.add_word_to_index("animals")
period_index = data_indexer.add_word_to_index(".")
n_tag_index = data_indexer.add_word_to_index("N", namespace="tags")
v_tag_index = data_indexer.add_word_to_index("V", namespace="tags")
period_tag_index = data_indexer.add_word_to_index(".", namespace="tags")
indexed_instance = self.instance.to_indexed_instance(data_indexer)
expected_indices = [cats_index, are_index, animals_index, period_index]
assert indexed_instance.text_indices == expected_indices
expected_label = [self.one_hot(n_tag_index - 2, 3),
self.one_hot(v_tag_index - 2, 3),
self.one_hot(n_tag_index - 2, 3),
self.one_hot(period_tag_index - 2, 3)]
assert_array_almost_equal(indexed_instance.label, expected_label)
train_inputs, train_labels = indexed_instance.as_training_data()
assert_array_almost_equal(train_labels, expected_label)
assert_array_almost_equal(train_inputs, expected_indices)
def test_words_returns_correct_dictionary(self):
assert self.instance.words() == {'words': ['cats', 'are', 'animals', '.'],
'tags': ['N', 'V', 'N', '.']}
| deep_qa-master | tests/data/instances/sequence_tagging/pretokenized_tagging_instance_test.py |
deep_qa-master | tests/data/instances/entailment/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import pytest
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.instances.entailment.snli_instance import SnliInstance
class TestSnliInstance:
@staticmethod
def instance_to_line(text: str, hypothesis: str, label: str, index=None):
line = ''
if index is not None:
line += str(index) + '\t'
line += text + '\t' + hypothesis + '\t' + label
return line
def test_read_from_line_handles_three_column(self):
text = "dogs eat cats"
hypothesis = "animals eat animals"
label = "contradicts"
line = self.instance_to_line(text, hypothesis, label)
instance = SnliInstance.read_from_line(line)
assert instance.first_sentence == text
assert instance.second_sentence == hypothesis
assert instance.label == [0, 1, 0]
assert instance.index is None
def test_read_from_line_handles_four_column(self):
text = "dogs eat cats"
hypothesis = "animals eat animals"
label = "entails"
index = 23
line = self.instance_to_line(text, hypothesis, label, index)
instance = SnliInstance.read_from_line(line)
assert instance.first_sentence == text
assert instance.second_sentence == hypothesis
assert instance.label == [1, 0, 0]
assert instance.index == index
def test_words_includes_text_and_hypothesis(self):
instance = SnliInstance("a b c", "d a", "entails")
assert instance.words() == {'words': ['a', 'b', 'c', 'd', 'a']}
def test_labels_are_mapped_correctly(self):
assert SnliInstance("", "", "entails").label == [1, 0, 0]
assert SnliInstance("", "", "contradicts").label == [0, 1, 0]
assert SnliInstance("", "", "neutral").label == [0, 0, 1]
assert SnliInstance("", "", "entails_softmax").label == [0, 1]
assert SnliInstance("", "", "not_entails_softmax").label == [1, 0]
assert SnliInstance("", "", "entails_sigmoid").label == [1]
assert SnliInstance("", "", "not_entails_sigmoid").label == [0]
assert SnliInstance("", "", "attention_true").label == [1]
assert SnliInstance("", "", "attention_false").label == [0]
def test_to_attention_instance_maps_label_correctly(self):
assert SnliInstance("", "", "entails").to_attention_instance().label == [1]
assert SnliInstance("", "", "contradicts").to_attention_instance().label == [1]
assert SnliInstance("", "", "neutral").to_attention_instance().label == [0]
with pytest.raises(Exception):
SnliInstance("", "", True).to_attention_instance()
with pytest.raises(Exception):
SnliInstance("", "", False).to_attention_instance()
def test_to_entails_instance_maps_label_correctly(self):
assert SnliInstance("", "", "entails").to_entails_instance("softmax").label == [0, 1]
assert SnliInstance("", "", "contradicts").to_entails_instance("softmax").label == [1, 0]
assert SnliInstance("", "", "neutral").to_entails_instance("softmax").label == [1, 0]
for label in SnliInstance.label_mapping:
if label not in ["entails", "contradicts", "neutral"]:
with pytest.raises(Exception):
SnliInstance("", "", label).to_entails_instance("softmax")
assert SnliInstance("", "", "entails").to_entails_instance("sigmoid").label == [1]
assert SnliInstance("", "", "contradicts").to_entails_instance("sigmoid").label == [0]
assert SnliInstance("", "", "neutral").to_entails_instance("sigmoid").label == [0]
for label in SnliInstance.label_mapping:
if label not in ["entails", "contradicts", "neutral"]:
with pytest.raises(Exception):
SnliInstance("", "", label).to_entails_instance("sigmoid")
def test_to_indexed_instance_converts_correctly(self):
instance = SnliInstance("a b", "d e f", "entails")
data_indexer = DataIndexer()
a_index = data_indexer.add_word_to_index("a")
d_index = data_indexer.add_word_to_index("d")
oov_index = data_indexer.get_word_index(data_indexer._oov_token) # pylint: disable=protected-access
indexed_instance = instance.to_indexed_instance(data_indexer)
assert indexed_instance.first_sentence_indices == [a_index, oov_index]
assert indexed_instance.second_sentence_indices == [d_index, oov_index, oov_index]
assert indexed_instance.label == instance.label
| deep_qa-master | tests/data/instances/entailment/snli_instance_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy
from deep_qa.data.instances.entailment.sentence_pair_instance import IndexedSentencePairInstance
from deep_qa.testing.test_case import DeepQaTestCase
class TestIndexedSentencePairInstance(DeepQaTestCase):
def test_get_padding_lengths_returns_max_of_both_sentences(self):
instance = IndexedSentencePairInstance([1, 2, 3], [1], True)
assert instance.get_padding_lengths() == {'num_sentence_words': 3}
instance = IndexedSentencePairInstance([1, 2, 3], [1, 2, 3, 4], True)
assert instance.get_padding_lengths() == {'num_sentence_words': 4}
def test_pad_pads_both_sentences(self):
instance = IndexedSentencePairInstance([1, 2], [3, 4], True)
instance.pad({'num_sentence_words': 3})
assert instance.first_sentence_indices == [0, 1, 2]
assert instance.second_sentence_indices == [0, 3, 4]
def test_as_training_data_produces_correct_numpy_arrays(self):
# pylint: disable=redefined-variable-type
instance = IndexedSentencePairInstance([1, 2], [3, 4], [0, 1, 0])
inputs, label = instance.as_training_data()
assert isinstance(inputs, tuple)
assert len(inputs) == 2
assert numpy.all(inputs[0] == numpy.asarray([1, 2]))
assert numpy.all(inputs[1] == numpy.asarray([3, 4]))
assert numpy.all(label == numpy.asarray([0, 1, 0]))
| deep_qa-master | tests/data/instances/entailment/sentence_pair_instance_test.py |
# pylint: disable=no-self-use,invalid-name
import numpy as np
from deep_qa.data.data_indexer import DataIndexer
# pylint: disable=line-too-long
from deep_qa.data.instances.reading_comprehension.mc_question_passage_instance import IndexedMcQuestionPassageInstance
from deep_qa.data.instances.reading_comprehension.mc_question_passage_instance import McQuestionPassageInstance
# pylint: enable=line-too-long
from deep_qa.testing.test_case import DeepQaTestCase
class TestMcQuestionPassageInstance:
@staticmethod
def instance_to_line(passage: str, question: str,
options: str, label: int, index=None):
line = ''
if index is not None:
line += str(index) + '\t'
line += passage + '\t' + question + '\t' + options + '\t'+ str(label)
return line
def test_words_has_question_passage_options(self):
instance = McQuestionPassageInstance("Cats from Nevada are eaten by dogs in XXX .",
"Dogs eat cats from Nevada in Washington .",
["Nevada", "Washington"], 1)
assert instance.words() == {'words': ['cats', 'from', 'nevada', 'are', 'eaten', 'by',
'dogs', 'in', 'xxx', '.', 'dogs', 'eat', 'cats',
'from', 'nevada', 'in', 'washington', '.', 'nevada',
'washington']}
def test_read_from_line_handles_five_column(self):
passage = "Dogs eat cats from Nevada in Washington ."
question = "Cats from Nevada are eaten by dogs in XXX ."
options_str = "Nevada###Washington"
label = 1
line = self.instance_to_line(passage, question, options_str, label)
instance = McQuestionPassageInstance.read_from_line(line)
assert instance.question_text == question
assert instance.passage_text == passage
options = ["Nevada", "Washington"]
assert instance.answer_options == options
assert instance.label == label
assert instance.index is None
def test_read_from_line_handles_six_column(self):
passage = "Dogs eat cats from Nevada in Washington ."
question = "Cats from Nevada are eaten by dogs in XXX ."
options_str = "Nevada###Washington"
label = 1
index = 42
line = self.instance_to_line(passage, question, options_str, label, index)
instance = McQuestionPassageInstance.read_from_line(line)
assert instance.question_text == question
assert instance.passage_text == passage
options = ["Nevada", "Washington"]
assert instance.answer_options == options
assert instance.label == label
assert instance.index == index
def test_to_indexed_instance_converts_correctly(self):
instance = McQuestionPassageInstance("Cats from Nevada are eaten by dogs in XXX .",
"Dogs eat cats from Nevada in Washington .",
["Nevada", "Washington"], 1)
data_indexer = DataIndexer()
cats_index = data_indexer.add_word_to_index("cats")
are_index = data_indexer.add_word_to_index("are")
eaten_index = data_indexer.add_word_to_index("eaten")
by_index = data_indexer.add_word_to_index("by")
dogs_index = data_indexer.add_word_to_index("dogs")
in_index = data_indexer.add_word_to_index("in")
XXX_index = data_indexer.add_word_to_index("xxx")
period_index = data_indexer.add_word_to_index(".")
eat_index = data_indexer.add_word_to_index("eat")
from_index = data_indexer.add_word_to_index("from")
nevada_index = data_indexer.add_word_to_index("nevada")
washington_index = data_indexer.add_word_to_index("washington")
indexed_instance = instance.to_indexed_instance(data_indexer)
assert indexed_instance.question_indices == [cats_index, from_index,
nevada_index, are_index,
eaten_index, by_index,
dogs_index, in_index,
XXX_index, period_index]
assert indexed_instance.passage_indices == [dogs_index, eat_index, cats_index,
from_index, nevada_index, in_index,
washington_index, period_index]
assert len(indexed_instance.option_indices) == 2
assert indexed_instance.option_indices[0] == [nevada_index]
assert indexed_instance.option_indices[1] == [washington_index]
assert indexed_instance.label == 1
class TestIndexedMcQuestionPassageInstance(DeepQaTestCase):
def setUp(self):
super(TestIndexedMcQuestionPassageInstance, self).setUp()
self.instance = IndexedMcQuestionPassageInstance([1, 2, 3, 5, 6],
[2, 3, 4, 5, 6, 7],
[[2], [3, 5], [6]],
1)
def test_get_padding_lengths_returns_three_correct_lengths(self):
assert self.instance.get_padding_lengths() == {
'num_question_words': 5,
'num_passage_words': 6,
'num_option_words': 2,
'num_options': 3
}
def test_pad_calls_pad_oun_all_options(self):
self.instance.pad({'num_question_words': 7, 'num_passage_words': 9,
'num_option_words': 2, 'num_options': 3})
assert self.instance.question_indices == [0, 0, 1, 2, 3, 5, 6]
assert self.instance.passage_indices == [2, 3, 4, 5, 6, 7, 0, 0, 0]
assert self.instance.option_indices[0] == [0, 2]
assert self.instance.option_indices[1] == [3, 5]
assert self.instance.option_indices[2] == [0, 6]
def test_pad_adds_empty_options_when_necessary(self):
self.instance.pad({'num_question_words': 3, 'num_passage_words': 4,
'num_option_words': 1, 'num_options': 4})
assert self.instance.question_indices == [3, 5, 6]
assert self.instance.passage_indices == [2, 3, 4, 5]
assert self.instance.option_indices[0] == [2]
assert self.instance.option_indices[1] == [5]
assert self.instance.option_indices[2] == [6]
assert self.instance.option_indices[3] == [0]
assert len(self.instance.option_indices) == 4
def test_pad_removes_options_when_necessary(self):
self.instance.pad({'num_question_words': 3, 'num_passage_words': 4,
'num_option_words': 1, 'num_options': 1})
assert self.instance.question_indices == [3, 5, 6]
assert self.instance.passage_indices == [2, 3, 4, 5]
assert self.instance.option_indices[0] == [2]
assert len(self.instance.option_indices) == 1
def test_as_training_data_produces_correct_numpy_arrays(self):
self.instance.pad({'num_question_words': 7, 'num_passage_words': 4,
'num_option_words': 2, 'num_options': 4})
inputs, label = self.instance.as_training_data()
assert np.all(label == np.asarray([0, 1, 0, 0]))
assert np.all(inputs[0] == np.asarray([0, 0, 1, 2, 3, 5, 6]))
assert np.all(inputs[1] == np.asarray([2, 3, 4, 5]))
assert np.all(inputs[2] == np.asarray([[0, 2], [3, 5], [0, 6], [0, 0]]))
| deep_qa-master | tests/data/instances/reading_comprehension/mc_question_passage_instance_test.py |
deep_qa-master | tests/data/instances/reading_comprehension/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from typing import Tuple
import numpy
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.instances.reading_comprehension.character_span_instance import CharacterSpanInstance
from deep_qa.testing.test_case import DeepQaTestCase
class TestCharacterSpanInstance(DeepQaTestCase):
@staticmethod
def instance_to_line(question: str, passage: str, label: Tuple[int, int],
index=None):
line = ''
if index is not None:
line += str(index) + '\t'
line += (question + '\t' + passage + '\t' +
str(label[0]) + ',' + str(label[1]))
return line
def test_read_from_line_handles_three_column(self):
question = "What do dogs eat?"
passage = "Dogs eat cats."
label = (9, 13)
line = self.instance_to_line(question, passage, label)
instance = CharacterSpanInstance.read_from_line(line)
assert instance.question_text == question
assert instance.passage_text == passage
assert instance.label == label
assert instance.index is None
def test_read_from_line_handles_four_column(self):
question = "What do dogs eat?"
passage = "Dogs eat cats."
label = (9, 13)
index = 23
line = self.instance_to_line(question, passage, label, index)
instance = CharacterSpanInstance.read_from_line(line)
assert instance.question_text == question
assert instance.passage_text == passage
assert instance.label == label
assert instance.index == index
def test_to_indexed_instance_converts_correctly(self):
instance = CharacterSpanInstance("What do dogs eat?", "Dogs eat cats.",
(9, 13))
data_indexer = DataIndexer()
what_index = data_indexer.add_word_to_index("what")
do_index = data_indexer.add_word_to_index("do")
dogs_index = data_indexer.add_word_to_index("dogs")
eat_index = data_indexer.add_word_to_index("eat")
cats_index = data_indexer.add_word_to_index("cats")
period_index = data_indexer.add_word_to_index(".")
question_index = data_indexer.add_word_to_index("?")
stop_index = data_indexer.add_word_to_index(CharacterSpanInstance.stop_token)
indexed_instance = instance.to_indexed_instance(data_indexer)
assert indexed_instance.question_indices == [what_index, do_index,
dogs_index, eat_index,
question_index]
assert indexed_instance.passage_indices == [dogs_index, eat_index,
cats_index, period_index, stop_index]
assert indexed_instance.label == (2, 3)
# I put this test in here, instead of its own `test_as_training_data` test, to be sure that
# the conversion to IndexedCharacterSpanIndex was performed correctly.
indexed_instance.pad({'num_question_words': 3, 'num_passage_words': 6})
(question_array, passage_array), label = indexed_instance.as_training_data()
assert isinstance(label, tuple)
assert numpy.all(label[0] == numpy.asarray([0, 0, 1, 0, 0, 0]))
assert numpy.all(label[1] == numpy.asarray([0, 0, 0, 1, 0, 0]))
assert numpy.all(question_array == numpy.asarray([dogs_index, eat_index, question_index]))
assert numpy.all(passage_array == numpy.asarray([dogs_index, eat_index, cats_index,
period_index, stop_index, 0]))
| deep_qa-master | tests/data/instances/reading_comprehension/character_span_instance_test.py |
deep_qa-master | tests/data/instances/text_classification/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
import numpy
from deep_qa.data.instances.text_classification import IndexedTextClassificationInstance
from deep_qa.data.instances.text_classification import TextClassificationInstance
from deep_qa.testing.test_case import DeepQaTestCase
class TestTextClassificationInstance:
@staticmethod
def instance_to_line(text, label=None, index=None):
line = ''
if index is not None:
line += str(index) + '\t'
line += text
if label is not None:
label_str = '1' if label else '0'
line += '\t' + label_str
return line
def test_read_from_line_handles_one_column(self):
text = "this is a sentence"
instance = TextClassificationInstance.read_from_line(text)
assert instance.text == text
assert instance.label is None
assert instance.index is None
def test_read_from_line_handles_three_column(self):
index = 23
text = "this is a sentence"
label = True
line = self.instance_to_line(text, label, index)
instance = TextClassificationInstance.read_from_line(line)
assert instance.text == text
assert instance.label is label
assert instance.index == index
def test_read_from_line_handles_two_column_with_label(self):
index = None
text = "this is a sentence"
label = True
line = self.instance_to_line(text, label, index)
instance = TextClassificationInstance.read_from_line(line)
assert instance.text == text
assert instance.label is label
assert instance.index == index
def test_read_from_line_handles_two_column_with_index(self):
index = 23
text = "this is a sentence"
label = None
line = self.instance_to_line(text, label, index)
instance = TextClassificationInstance.read_from_line(line)
assert instance.text == text
assert instance.label is label
assert instance.index == index
def test_words_tokenizes_the_sentence_correctly(self):
t = TextClassificationInstance("This is a sentence.", None)
assert t.words() == {'words': ['this', 'is', 'a', 'sentence', '.']}
t = TextClassificationInstance("This isn't a sentence.", None)
assert t.words() == {'words': ['this', 'is', "n't", 'a', 'sentence', '.']}
t = TextClassificationInstance("And, I have commas.", None)
assert t.words() == {'words': ['and', ',', 'i', 'have', 'commas', '.']}
class TestIndexedTextClassificationInstance(DeepQaTestCase):
def test_get_padding_lengths_returns_length_of_word_indices(self):
instance = IndexedTextClassificationInstance([1, 2, 3, 4], True)
assert instance.get_padding_lengths() == {'num_sentence_words': 4}
def test_pad_adds_zeros_on_left(self):
instance = IndexedTextClassificationInstance([1, 2, 3, 4], True)
instance.pad({'num_sentence_words': 5})
assert instance.word_indices == [0, 1, 2, 3, 4]
def test_pad_truncates_from_right(self):
instance = IndexedTextClassificationInstance([1, 2, 3, 4], True)
instance.pad({'num_sentence_words': 3})
assert instance.word_indices == [2, 3, 4]
def test_as_training_data_produces_correct_numpy_arrays(self):
instance = IndexedTextClassificationInstance([1, 2, 3, 4], True)
inputs, label = instance.as_training_data()
assert numpy.all(label == numpy.asarray([0, 1]))
assert numpy.all(inputs == numpy.asarray([1, 2, 3, 4]))
instance.label = False
_, label = instance.as_training_data()
assert numpy.all(label == numpy.asarray([1, 0]))
| deep_qa-master | tests/data/instances/text_classification/text_classification_instance_test.py |
deep_qa-master | tests/data/instances/language_modeling/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from deep_qa.common.params import Params
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.instances import TextInstance
from deep_qa.data.instances.language_modeling import IndexedSentenceInstance
from deep_qa.data.instances.language_modeling import SentenceInstance
from deep_qa.data.tokenizers import tokenizers
from deep_qa.testing.test_case import DeepQaTestCase
from numpy.testing import assert_array_equal
class TestSentenceInstance(DeepQaTestCase):
def setUp(self):
super(TestSentenceInstance, self).setUp()
self.data_indexer = DataIndexer()
self.this_index = self.data_indexer.add_word_to_index("this")
self.is_index = self.data_indexer.add_word_to_index("is")
self.a_index = self.data_indexer.add_word_to_index("a")
self.sentence_index = self.data_indexer.add_word_to_index("sentence")
self.start_index = self.data_indexer.add_word_to_index("<S>")
self.end_index = self.data_indexer.add_word_to_index("</S>")
self.space_index = self.data_indexer.add_word_to_index(' ')
self.c_index = self.data_indexer.add_word_to_index('c')
self.e_index = self.data_indexer.add_word_to_index('e')
self.h_index = self.data_indexer.add_word_to_index('h')
self.i_index = self.data_indexer.add_word_to_index('i')
self.n_index = self.data_indexer.add_word_to_index('n')
self.s_index = self.data_indexer.add_word_to_index('s')
self.t_index = self.data_indexer.add_word_to_index('t')
self.a_char_index = self.data_indexer.add_word_to_index('a', namespace='characters')
self.c_char_index = self.data_indexer.add_word_to_index('c', namespace='characters')
self.e_char_index = self.data_indexer.add_word_to_index('e', namespace='characters')
self.h_char_index = self.data_indexer.add_word_to_index('h', namespace='characters')
self.i_char_index = self.data_indexer.add_word_to_index('i', namespace='characters')
self.n_char_index = self.data_indexer.add_word_to_index('n', namespace='characters')
self.s_char_index = self.data_indexer.add_word_to_index('s', namespace='characters')
self.t_char_index = self.data_indexer.add_word_to_index('t', namespace='characters')
def tearDown(self):
super(TestSentenceInstance, self).tearDown()
TextInstance.tokenizer = tokenizers['words'](Params({}))
@staticmethod
def instance_to_line(text, index=None):
index_str = '' if index is None else str(index) + '\t'
return index_str + text
def test_read_from_line_handles_one_column(self):
text = "this is a sentence"
instance = SentenceInstance.read_from_line(text)
assert instance.text == text
assert instance.label is None
assert instance.index is None
def test_read_from_line_handles_two_column(self):
index = 23
text = "this is a sentence"
line = self.instance_to_line(text, index)
instance = SentenceInstance.read_from_line(line)
assert instance.text == text
assert instance.index == index
assert instance.label is None
def test_end_to_end_conversion_to_arrays(self):
instance = SentenceInstance("this is a sentence")
indexed_instance = instance.to_indexed_instance(self.data_indexer)
indexed_instance.pad({'num_sentence_words': 7})
word_array, label_array = indexed_instance.as_training_data()
assert_array_equal(word_array, [0, 0, self.start_index, self.this_index, self.is_index,
self.a_index, self.sentence_index])
assert_array_equal(label_array, [[0], [0], [self.this_index], [self.is_index],
[self.a_index], [self.sentence_index], [self.end_index]])
def test_end_to_end_conversion_to_arrays_with_character_tokenizer(self):
TextInstance.tokenizer = tokenizers['characters'](Params({}))
instance = SentenceInstance("a sentence")
indexed_instance = instance.to_indexed_instance(self.data_indexer)
indexed_instance.pad({'num_sentence_words': 13})
word_array, label_array = indexed_instance.as_training_data()
assert_array_equal(word_array, [0, 0, self.start_index, self.a_index, self.space_index,
self.s_index, self.e_index, self.n_index, self.t_index,
self.e_index, self.n_index, self.c_index, self.e_index])
assert_array_equal(label_array, [[0], [0], [self.a_index], [self.space_index],
[self.s_index], [self.e_index], [self.n_index],
[self.t_index], [self.e_index], [self.n_index],
[self.c_index], [self.e_index], [self.end_index]])
def test_end_to_end_conversion_to_arrays_with_word_and_character_tokenizer(self):
TextInstance.tokenizer = tokenizers['words and characters'](Params({}))
instance = SentenceInstance("this is a sentence")
indexed_instance = instance.to_indexed_instance(self.data_indexer)
indexed_instance.pad({'num_sentence_words': 6, 'num_word_characters': 5})
word_array, label_array = indexed_instance.as_training_data()
assert_array_equal(word_array, [[0, 0, 0, 0, 0],
[self.start_index, 0, 0, 0, 0],
[self.this_index, self.t_char_index, self.h_char_index,
self.i_char_index, self.s_char_index],
[self.is_index, self.i_char_index, self.s_char_index, 0, 0],
[self.a_index, self.a_char_index, 0, 0, 0],
[self.sentence_index, self.s_char_index, self.e_char_index,
self.n_char_index, self.t_char_index],
])
assert_array_equal(label_array, [[0], [self.this_index], [self.is_index], [self.a_index],
[self.sentence_index], [self.end_index]])
class TestIndexedSentenceInstance(DeepQaTestCase):
def test_get_padding_lengths_returns_length_of_word_indices(self):
instance = IndexedSentenceInstance([1, 2, 3, 4], [2, 3, 4, 5])
assert instance.get_padding_lengths() == {'num_sentence_words': 4}
def test_pad_adds_zeros_on_left(self):
instance = IndexedSentenceInstance([1, 2, 3, 4], [2, 3, 4, 5])
instance.pad({'num_sentence_words': 5})
assert instance.word_indices == [0, 1, 2, 3, 4]
assert instance.label == [0, 2, 3, 4, 5]
def test_pad_truncates_from_right(self):
instance = IndexedSentenceInstance([1, 2, 3, 4], [2, 3, 4, 5])
instance.pad({'num_sentence_words': 3})
assert instance.word_indices == [2, 3, 4]
assert instance.label == [3, 4, 5]
def test_as_training_data_produces_correct_numpy_arrays(self):
instance = IndexedSentenceInstance([1, 2, 3, 4], [2, 3, 4, 5])
inputs, label = instance.as_training_data()
assert_array_equal(inputs, [1, 2, 3, 4])
assert_array_equal(label, [[2], [3], [4], [5]])
def test_as_training_data_produces_correct_numpy_arrays_with_character_tokenization(self):
instance = IndexedSentenceInstance([[1, 2], [3, 1, 2]], [3, 4])
instance.pad({'num_sentence_words': 3, 'num_word_characters': 4})
inputs, label = instance.as_training_data()
assert_array_equal(inputs, [[0, 0, 0, 0], [1, 2, 0, 0], [3, 1, 2, 0]])
assert_array_equal(label, [[0], [3], [4]])
| deep_qa-master | tests/data/instances/language_modeling/sentence_instance_test.py |
# -*- coding: utf-8 -*-
"""
This script takes as input raw TSV files from the Omnibus dataset and
preprocesses them to be compatible with the deep_qa pipeline.
"""
import logging
import os
import csv
from argparse import ArgumentParser
import pandas
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def main():
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
parser = ArgumentParser(description=("Transform a raw Omnibus TSV "
"to the format that the pipeline "
"expects."))
parser.add_argument('input_csv', nargs='+',
metavar="<input_csv>", type=str,
help=("Path of TSV files to clean up. Pass in "
"as many as you want, and the output "
"will be a concatenation of them "
"written to <last_input_csv>.clean"))
arguments = parser.parse_args()
all_clean_file_rows = []
for omnibus_file in arguments.input_csv:
all_clean_file_rows.extend(clean_omnibus_csv(omnibus_file))
# turn the list of rows into a dataframe, and write to TSV
dataframe = pandas.DataFrame(all_clean_file_rows)
folder, filename = os.path.split(arguments.input_csv[-1])
outdirectory = folder + "/cleaned/"
os.makedirs(outdirectory, exist_ok=True)
outpath = outdirectory + filename + ".clean"
logger.info("Saving cleaned file to %s", outpath)
dataframe.to_csv(outpath, encoding="utf-8", index=False,
sep="\t", header=False,
quoting=csv.QUOTE_NONE)
def clean_omnibus_csv(omnibus_file_path):
logger.info("cleaning up %s", omnibus_file_path)
# open the file as a csv
dataframe = pandas.read_csv(omnibus_file_path, sep="\t",
encoding='utf-8', header=None,
quoting=csv.QUOTE_NONE)
dataframe_trimmed = dataframe[[3, 9]]
clean_rows = dataframe_trimmed.values.tolist()
return clean_rows
if __name__ == '__main__':
main()
| deep_qa-master | scripts/clean_raw_omnibus.py |
import logging
import os
import sys
# pylint: disable=wrong-import-position
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from deep_qa import run_model_from_file, evaluate_model
from deep_qa.common.checks import ensure_pythonhashseed_set
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def main():
usage = 'USAGE: run_model.py [param_file] [train|test]'
if len(sys.argv) == 2:
run_model_from_file(sys.argv[1])
elif len(sys.argv) == 3:
mode = sys.argv[2]
if mode == 'train':
run_model_from_file(sys.argv[1])
elif mode == 'test':
evaluate_model(sys.argv[1])
else:
print(usage)
sys.exit(-1)
else:
print(usage)
sys.exit(-1)
if __name__ == "__main__":
ensure_pythonhashseed_set()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
main()
| deep_qa-master | scripts/run_model.py |
# -*- coding: utf-8 -*-
"""
This script takes as input CSV files from the Maluuba NewsQA dataset.
The dataset is quite dirty by default, so this script does some preprocessing
and extracts the relevant information we neeed in the deep_qa library.
"""
import json
import logging
import os
import re
from argparse import ArgumentParser
import pandas
from tqdm import tqdm
from scipy.stats import mode
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def main():
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
parser = ArgumentParser(description=("Clean up a CSV file in "
"the NewsQA dataset."))
parser.add_argument('input_csv', nargs='+',
metavar="<input_csv>", type=str,
help=("Path to CSV files to clean up. Pass in "
"as many as you want, and the output "
"will be written to <input_csv>.clean"))
arguments = parser.parse_args()
for newsqa_file in arguments.input_csv:
clean_newsqa_csv(newsqa_file)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def clean_newsqa_csv(newsqa_file_path):
logger.info("cleaning up %s", newsqa_file_path)
# open the file as a csv
dataframe = pandas.read_csv(newsqa_file_path, encoding='utf-8')
dirty_rows = dataframe.values.tolist() # pylint: disable=no-member
clean_rows = []
clean_headers = ["question_text", "label", "answer_string", "passage"]
for row in tqdm(dirty_rows):
clean_row = []
# clean up dirty file
candidate_answers = re.split(r"\||,", row[2])
answer_absent_prob = float(row[3]) if isfloat(row[3]) else 1.0
passage_bad_prob = float(row[4]) if isfloat(row[4]) else 1.0
validated_answers = row[5]
raw_passage_text = row[6]
# figure out the label span (answer_span)
if validated_answers and not pandas.isnull(validated_answers):
# pick the validated answer with the most votes
# in case of tie, pick the longest one
validated_answers_dict = json.loads(validated_answers)
answer_span = max(validated_answers_dict,
key=validated_answers_dict.get)
else:
# fall back and pick the candidate answer that
# occurs most frequently.
answer_span = mode(candidate_answers)[0][0]
if (answer_span.lower() == "none" or answer_span.lower() == "bad_question" or
answer_absent_prob >= 0.5 or passage_bad_prob >= 0.5):
continue
initial_span_start, initial_span_end = [int(x) for x in
answer_span.split(":")]
if not raw_passage_text[initial_span_start:initial_span_end][-1].isalnum():
initial_span_end -= 1
raw_answer_snippet = raw_passage_text[:initial_span_start]
# count the number of spaces to add before the answer (newlines following non-newline)
num_spaces_added = len(re.findall("(?<=[^\\n|\\r])(\\n|\\r)",
raw_answer_snippet))
# count the number of newlines that we're going to remove
# before the answer (all newlines before the answer)
num_newlines_removed = len(re.findall("(\\r|\\n)", raw_answer_snippet))
# offset refers to how much to shift the span by
offset = (num_newlines_removed) - num_spaces_added
# remove consecutive newlines with spaces in the raw passage text
# to get a clean version with no linebreaks
processed_passage_text = re.sub("(\\r|\\n)+", " ", raw_passage_text)
# calculate the new span indices by subtracting the previously calcuated offset
final_span_start = initial_span_start - offset
final_span_end = initial_span_end - offset
# build the new row of the dataset
# question text
clean_row.append(row[1])
# label
clean_row.append(str(final_span_start) + ":" + str(final_span_end))
# answer as a string
clean_row.append(processed_passage_text[final_span_start:final_span_end])
# passage text
clean_row.append(processed_passage_text)
clean_rows.append(clean_row)
# turn the list of rows into a dataframe, and write to CSV
dataframe = pandas.DataFrame(clean_rows, columns=clean_headers)
folder, filename = os.path.split(newsqa_file_path)
outdirectory = folder + "/cleaned/"
os.makedirs(outdirectory, exist_ok=True)
outpath = outdirectory + filename + ".clean"
logger.info("Saving cleaned file to %s", outpath)
dataframe.to_csv(outpath, encoding="utf-8", index=False)
if __name__ == '__main__':
main()
| deep_qa-master | scripts/clean_newsqa.py |
import logging
import os
import sys
# pylint: disable=wrong-import-position
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from deep_qa import score_dataset_with_ensemble, compute_accuracy
from deep_qa.common.checks import ensure_pythonhashseed_set
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def main():
usage = 'USAGE: run_ensemble.py [param_file]+ -- [data_file]+'
try:
separator_index = sys.argv.index('--')
except ValueError:
print(usage)
sys.exit(-1)
param_files = sys.argv[1:separator_index]
dataset_files = sys.argv[separator_index + 1:]
predictions, labels = score_dataset_with_ensemble(param_files, dataset_files)
compute_accuracy(predictions, labels)
if __name__ == "__main__":
ensure_pythonhashseed_set()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
main()
| deep_qa-master | scripts/run_ensemble.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# deep_qa documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 25 11:35:07 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sphinx
from distutils.version import LooseVersion
import sphinx_rtd_theme
import os
import sys
import inspect
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.linkcode',
'numpydoc',
'sphinx.ext.autosummary'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'deep_qa'
copyright = '2017, Allen Institute for Artificial Intelligence'
author = 'Allen Institute for Artificial Intelligence'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
numpydoc_show_class_members = False
autodoc_default_flags = ['private-members']
def maybe_skip_member(app, what, name, obj, skip, options):
if '__' in name:
return True
if name == 'call':
return True
return skip
def setup(app):
app.connect('autodoc-skip-member', maybe_skip_member)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'deep_qadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'deep_qa.tex', 'deep\\_qa Documentation',
'AI2', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deep_qa', 'deep_qa Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'deep_qa', 'deep_qa Documentation',
author, 'deep_qa', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Miscellaneous Extra Tweaks -------------------------------------------
# pngmath / imgmath compatibility layer for different sphinx versions
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
# make github links resolve
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
This code is from
https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L290
and https://github.com/Lasagne/Lasagne/pull/262
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
filename = info['module'].replace('.', '/')
return "http://github.com/allenai/deep_qa/blob/master/%s.py%s" % (filename, linespec)
| deep_qa-master | doc/conf.py |
import logging
import os
import openai
from diskcache import Cache
logger = logging.getLogger(__name__)
cache = Cache(os.path.expanduser("~/.cache/gpt3calls"))
@cache.memoize()
def cached_openai_call( # kwargs doesn't work with caching.
prompt, engine, temperature, max_tokens, top_p,
frequency_penalty, presence_penalty, stop,
n, best_of, logprobs,
):
return openai.Completion.create(
prompt=prompt, engine=engine, temperature=temperature, max_tokens=max_tokens,
top_p=top_p, frequency_penalty=frequency_penalty, presence_penalty=presence_penalty,
stop=stop, n=n, best_of=best_of, logprobs=logprobs
)
def openai_call(
prompt, engine, temperature, max_tokens, top_p,
frequency_penalty, presence_penalty, stop,
n, best_of, logprobs,
):
function = cached_openai_call if temperature == 0 else openai.Completion.create
return function(
prompt=prompt, engine=engine, temperature=temperature, max_tokens=max_tokens,
top_p=top_p, frequency_penalty=frequency_penalty, presence_penalty=presence_penalty,
stop=stop, n=n, best_of=best_of, logprobs=logprobs
)
class GPT3Generator:
def __init__(self, engine="text-davinci-002", temperature=0, max_tokens=100,
top_p=1, frequency_penalty=0, presence_penalty=0, stop=["\n"],
n=1, best_of=1, logprobs=0):
self.engine = engine
self.logprobs = logprobs
self.n = n
self.best_of = best_of
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.max_tokens = max_tokens
self.top_p = top_p
self.stop = stop
self.temperature = temperature
def generate_text_sequence(self, prompt):
"""
:param input_text:
:return: returns a sequence of tuples (string, score) where lower score is better
"""
# GPT3 can't handle trailing white-space
prompt = prompt.rstrip()
if self.best_of is None:
response = openai.Completion.create(
engine=self.engine,
prompt=prompt,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
n=self.n,
logprobs=self.logprobs,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
stop=self.stop
)
else:
response = openai_call(
engine=self.engine,
prompt=prompt,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
n=self.n,
best_of=self.best_of,
logprobs=self.logprobs,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
stop=self.stop
)
output_seq_score = []
for index, choice in enumerate(response["choices"]):
# print(choice)
if "logprobs" in choice and "token_logprobs" in choice["logprobs"]:
# get probs of the tokens used in text (i.e. till the stop token)
probs = []
# selected_toks = []
for prob, tok in zip(choice["logprobs"]["token_logprobs"],
choice["logprobs"]["tokens"]):
if tok not in self.stop and tok != "<|endoftext|>":
probs.append(prob)
# selected_toks.append(tok)
else:
# include the probability of the stop character too. This will also
# ensure that an empty string (i.e. first predicted character being a stop
# character) also has a reasonable probability measure
# selected_toks.append(tok)
probs.append(prob)
break
# average the logits and negate to make them +ve scores where lower is better
# set a high +ve score if no predictions
# print(probs, selected_toks)
score = -sum(probs) / len(probs) if len(probs) else 100.0
output_seq_score.append((choice["text"], score))
else:
# no score, just use index
output_seq_score.append((choice["text"], index))
# Ensure sorted output
return sorted(output_seq_score, key=lambda x: x[1])
| DecomP-main | src/decomp/models/gpt3generator.py |
import requests
class LLMClientGenerator:
# Instructions to start the LLM Server are in the README here:
# https://github.com/harshTrivedi/llm_server
def __init__(self, model_name, host, port,
max_input=None, max_tokens=100,
min_length=1, do_sample=False, stop=["\n"],
temperature=1.0, top_k=50, top_p=1.0,
num_return_sequences=1, repetition_penalty=None,
length_penalty=None):
valid_model_names = ["gpt-j-6B", "opt-66b", "gpt-neox-20b", "T0pp"]
assert model_name in valid_model_names, \
f"Model name {model_name} not in {valid_model_names}"
self.model_name = model_name
self.host = host
self.port = port
self.max_input = max_input
self.max_length = max_tokens
self.min_length = min_length
self.do_sample = do_sample
self.temperature = temperature
self.top_k = top_k
self.top_p = top_p
self.stop = stop
self.num_return_sequences = num_return_sequences
self.repetition_penalty = repetition_penalty
self.length_penalty = length_penalty
def generate_text_sequence(self, prompt):
"""
:param input_text:
:return: returns a sequence of tuples (string, score) where lower score is better
"""
prompt = prompt.rstrip()
params = {
"prompt": prompt,
"max_input": self.max_input,
"max_length": self.max_length,
"min_length": self.min_length,
"do_sample": self.do_sample,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"num_return_sequences": self.num_return_sequences,
"repetition_penalty": self.repetition_penalty,
"length_penalty": self.length_penalty,
}
response = requests.get(self.host + ":" + str(self.port) + "/generate", params=params)
if response.status_code != 200:
raise Exception("LLM Generation request failed!")
result = response.json()
generated_texts = result.get("generated_texts", "")
modified_texts = []
for text in generated_texts:
# remove the prompt
if text.startswith(prompt):
text = text[len(prompt):]
print(text)
if self.stop:
for stop_str in self.stop:
if stop_str in text:
text = text[:text.index(stop_str)]
print(text)
modified_texts.append(text)
generated_texts = modified_texts
model_name = result.get("model_name",
"") # To assure that response is from the right model.
if model_name != self.model_name:
raise Exception(
f"Looks like incorrect LLM server is ON: {model_name} != {self.model_name}.")
output_seq_score = [(text, 1 / (index + 1)) for index, text in enumerate(generated_texts)]
# TODO: Deal with output-probabilities if needed.
return sorted(output_seq_score, key=lambda x: x[1])
| DecomP-main | src/decomp/models/llm_client_generator.py |
import json
import re
from decomp.models.gpt3generator import GPT3Generator
from decomp.models.llm_client_generator import LLMClientGenerator
class LLMQAModel:
def __init__(self, prompt_file="", regex_extract=None, add_context=True, gen_model="gpt3",
**kwargs):
if prompt_file:
with open(prompt_file, "r") as input_fp:
self.prompt = "".join(input_fp.readlines())
else:
self.prompt = None
if gen_model == "gpt3":
self.generator = GPT3Generator(**kwargs)
elif gen_model == "llm_api":
self.generator = LLMClientGenerator(**kwargs)
else:
raise ValueError("Unknown gen_model: " + gen_model)
self.num_calls = 0
self.regex_extract = regex_extract
self.add_context = add_context
def ask_question(self, input_question, context):
question_prompt = self.prompt
if context and self.add_context:
# TODO Hack!! Needs a better fix
m = re.match(" *PARA_([0-9]+) (.*)", input_question)
if m:
assert isinstance(context, list)
context = context[int(m.group(1))]
input_question = m.group(2)
elif isinstance(context, list):
context = "\n".join(context)
if context:
question_prompt += "\n\n" + context
question_prompt += "\n\nQ: " + input_question + "\nA:"
# print("<QA>: ... %s" % question_prompt[-500:])
output_text_scores = self.generator.generate_text_sequence(question_prompt)
self.num_calls += 1
if len(output_text_scores) > 1:
print("Can not handle more than one answer for QA model yet" +
"\n" + str(output_text_scores))
output_text_scores = [output_text_scores[0]]
# print(input_question)
# print(output_text_scores)
# only answer string
answer_str = output_text_scores[0][0].strip()
if self.regex_extract:
m = re.match(self.regex_extract, answer_str)
if m:
answer_str = m.group(1).strip()
else:
# No match
print("Did not find a match for input regex: {} in {}".format(self.regex_extract,
answer_str))
return "", []
try:
json_answer = json.loads(answer_str)
return json_answer, []
except ValueError:
# Not a valid json ignore
pass
return answer_str, []
| DecomP-main | src/decomp/models/llm_qa_model.py |
import torch
from transformers import AutoConfig, AutoTokenizer, AutoModelWithLMHead
from transformers.generation_utils import SampleEncoderDecoderOutput
import logging
logger = logging.getLogger(__name__)
class LMGenerator:
def __init__(self, model_path, device=None,
generation_args={}, encoder_args={}, decoder_args={}):
if device is None:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.config = AutoConfig.from_pretrained(model_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.model = AutoModelWithLMHead.from_pretrained(model_path, config=self.config).to(
self.device)
self.generation_args = generation_args
# always generate output with scores
self.generation_args["output_scores"] = True
self.generation_args["return_dict_in_generate"] = True
self.encoder_args = encoder_args
self.decoder_args = decoder_args
def generate_text_sequence(self, input_text):
"""
:param input_text:
:return: returns a sequence of tuples (string, score) where lower score is better
"""
encoded_prompt = self.tokenizer.encode(input_text, **self.encoder_args)
encoded_prompt = encoded_prompt.to(self.device)
generated_dict = self.model.generate(input_ids=encoded_prompt, **self.generation_args)
generated_seqs = generated_dict.sequences
if isinstance(generated_dict, SampleEncoderDecoderOutput):
logger.warning("No scores generated when sampled sequences")
generated_scores = [0] * len(generated_seqs)
else:
generated_scores = generated_dict.sequences_scores.tolist()
if len(generated_seqs.shape) > 2:
generated_seqs.squeeze_()
output_seq_score = []
for generated_sequence_idx, generated_seq in enumerate(generated_seqs):
generated_output = generated_seq.tolist()
text = self.tokenizer.decode(generated_output, **self.decoder_args)
# flip the negative logit so that sequence with lowest scores is best
output_seq_score.append((text, -generated_scores[generated_sequence_idx]))
# Ensure sorted output
return sorted(output_seq_score, key=lambda x: x[1])
| DecomP-main | src/decomp/models/generator.py |
"""
Script to build dataset for concatenating letters from words.
E.g.
Take the letters at position 3 of the words in "Nestor Geng Duran" and concatenate them.
Sample usage:
python -m decomp.datasets_utils.build_letter_cat_dataset \
--input_first_names configs/letter_datasets/first_names.txt \
--input_last_names configs/letter_datasets/last_names.txt \
--output datasets/letter_cat/n3_e20_pos3.txt \
--num_words 3 --num_examples 20 --position 3 --add_space
"""
import argparse
import json
import math
import random
def parse_arguments():
arg_parser = argparse.ArgumentParser(
description='Create dataset for letter concatenation')
arg_parser.add_argument('--input_first_names', type=str, required=True,
help="Input file of first names")
arg_parser.add_argument('--input_last_names', type=str, required=True,
help="Input file of last names")
arg_parser.add_argument('--output', type=str, required=True, help="Output JSON file")
arg_parser.add_argument('--num_words', type=int, default=3, required=False,
help="Number of words")
arg_parser.add_argument('--num_examples', type=int, default=20, required=False,
help="Number of examples in dataset")
arg_parser.add_argument('--position', type=int, default=1, required=False,
help="Position of letter to concatenate(-1 used for last letter, "
"-100 for random sampling between 1 and 7 and -1.")
arg_parser.add_argument("--delim", default="")
return arg_parser.parse_args()
def words_with_min_length(words, length):
output = []
for w in words:
if len(w) > length >= 0:
output.append(w)
elif length < 0 and len(w) >= abs(length):
# e.g. if length is -1, word should have at least one letter
output.append(w)
return output
def create_word_concat_question(pos, name, delim):
if pos == -1:
pos_str = "the last letters"
elif pos == -2:
pos_str = "the second last letters"
else:
pos_str = "the letters at position {}".format(pos + 1)
if delim == "":
delim_str = ""
elif delim == " ":
delim_str = " using a space"
elif delim == ",":
delim_str = " using a comma"
elif delim == ";":
delim_str = " using a semi-colon"
else:
raise ValueError("Cannot handle delim: {} to create question".format(delim))
return "Take {} of the words in \"{}\" and concatenate them{}.".format(pos_str, name, delim_str)
def create_single_word_question(pos, name):
if pos == -1:
pos_str = "the last letter"
elif pos == -2:
pos_str = "the second last letter"
else:
pos_str = "the letter at position {}".format(pos + 1)
return "Return {} of the word \"{}\".".format(pos_str, name)
def accept_word(word):
if " " in word:
return False
if "-" in word:
return False
if not word.isascii():
return False
return True
if __name__ == '__main__':
args = parse_arguments()
with open(args.input_first_names, "r") as input_fp:
first_names = [f.strip() for f in input_fp.readlines()
if accept_word(f.strip())]
with open(args.input_last_names, "r") as input_fp:
last_names = [f.strip() for f in input_fp.readlines()
if accept_word(f.strip())]
qa_pairs = []
for eg_idx in range(args.num_examples):
# construct name
if args.position == -100:
pos = random.choice(list(range(7)) + [-1])
else:
pos = args.position
valid_first_names = words_with_min_length(first_names, pos)
valid_last_names = words_with_min_length(last_names, pos)
if len(valid_first_names) == 0 or len(valid_last_names) == 0:
raise ValueError("No names with length exceeding {}. Choose a different value for"
" position".format(pos))
words = []
for n_idx in range(args.num_words):
if n_idx < math.floor(args.num_words / 2):
words.append(random.choice(valid_first_names))
else:
words.append(random.choice(valid_last_names))
name = " ".join(words)
answer = args.delim.join([w[pos] for w in words])
drop_answer = {
"number": "",
"date": {
"day": "",
"month": "",
"year": ""
},
"spans": [answer]
}
if args.num_words == 1:
question = create_single_word_question(pos, name)
else:
question = create_word_concat_question(pos, name, args.delim)
qa_pairs.append({
"question": question,
"answer": drop_answer,
"query_id": str(eg_idx),
"name": name,
"words": words
})
output_json = {
"1": {
"passage": "",
"qa_pairs": qa_pairs
}
}
with open(args.output, "w") as output_fp:
output_fp.write(json.dumps(output_json, indent=2))
| DecomP-main | src/decomp/datasets_utils/build_letter_cat_dataset.py |
# Reverse the sequence "card, stamp, book, water, glasses".
import argparse
import logging
import json
import math
import os
import random
import string
log = logging.getLogger(__name__)
questions = {
("reversed", "words"): 'Reverse the sequence "{}".',
("reversed", "letters"): 'Reverse the sequence "{}".',
("reversed", "numbers"): 'Reverse the sequence "{}".',
("sorted", "words"): 'Sort alphabetically the sequence "{}".',
("sorted", "letters"): 'Sort alphabetically the sequence "{}".',
("sorted", "numbers"): 'Sort the sequence "{}".',
("sort_last", "words"): 'Sort alphabetically by last letter the sequence "{}".',
("sort_last", "numbers"): 'Sort by last digit the sequence "{}".',
}
def main():
random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument("--elts", choices=["words", "letters", "numbers"])
parser.add_argument("--listop", choices=["reversed", "sorted", "sort_last"])
parser.add_argument("--num_examples", type=int, default=100)
parser.add_argument("--train_size", type=int, default=10)
parser.add_argument("--out_dir", default=".")
parser.add_argument("--word_list", default="configs/data/wordlist.txt")
parser.add_argument("--hard", action="store_true")
parser.add_argument("--length", default=[4], nargs="+", type=int)
parser.add_argument("--cot", action="store_true")
parser.add_argument("--delim", default=", ")
args = parser.parse_args()
with open(args.word_list, "r") as f:
wordlist = list(map(str.strip, f))
lst = (
string.ascii_letters
if args.elts == "letters"
else wordlist
if args.elts == "words"
else list(range(1_000))
)
question = questions[args.listop, args.elts]
if args.hard and args.elts == "words":
sublists = get_hard_sublists(
lst, args.length, args.num_examples, args.listop
)
else:
try:
sublists = get_sublists(lst, args.length, args.num_examples)
except OverflowError:
sublists = repeated_sample(lst, args.length, args.num_examples)
func = eval(args.listop)
qa_pairs = [listop(func, args.delim, question, sublist) for sublist in sublists]
qa_pairs = [
{
"question": question,
"answer": {
"number": "",
"date": {"day": "", "month": "", "year": ""},
"spans": [answer],
},
"query_id": str(i),
"validated_answers": [],
}
for i, (question, answer) in enumerate(qa_pairs)
]
qa_pairs = random.sample(qa_pairs, len(qa_pairs))
test = {
"alg_qa": {
"passage": "",
"qa_pairs": qa_pairs[: -args.train_size],
}
}
train = {
"alg_qa": {
"passage": "",
"qa_pairs": qa_pairs[-args.train_size :],
}
}
filename = "{}_{}_{}.json".format(
str.join("_", map(str, args.length)),
"hard" if args.hard else "normal",
args.elts,
args.num_examples,
)
if args.cot:
cot_examples = map(get_cot, random.sample(train["alg_qa"]["qa_pairs"], 5))
with open("cot.txt", "w") as f:
f.write(str.join("\n\n", cot_examples))
with open(os.path.join(args.out_dir, "test_" + filename), "w") as f:
json.dump(test, f, indent=2)
with open(os.path.join(args.out_dir, "train_" + filename), "w") as f:
json.dump(train, f, indent=2)
def get_cot(qa_pair: dict) -> str:
answer = qa_pair["answer"]["spans"][0]
lines = [
"QC: {}".format(qa_pair["question"]),
'QS: The answer is "{}".'.format(answer),
"A: {}".format(answer),
"QS: [EOQ]",
]
return str.join("\n", lines)
def listop(func, sep, question, sublist):
sequence = sep.join(map(str, sublist))
question = question.format(sequence)
answer = sep.join(map(str, func(sublist)))
return question, answer
def repeated_sample(lst, lengths: list, num_examples):
samples = num_examples // len(lengths)
log.info("sampling {} instances".format(samples * len(lengths)))
return (random.sample(lst, l) for _ in range(samples) for l in lengths)
def get_hard_sublists(lst, lengths: list, num_examples, listop):
if len(lengths) > 1:
log.warning("--hard not implemented for multiple lengths. Only the first length will be used.")
length = lengths[0]
if listop == "reversed":
return get_sublists(lst, [length], num_examples)
elif listop == "sorted" or listop == "sort_last":
pos = 0 if listop == "sorted" else -1
output_list = []
valid_list = lst.copy()
while len(output_list) < num_examples:
seed_word = random.choice(valid_list)
wordlist = [x for x in valid_list if x[pos] == seed_word[pos]]
if len(wordlist) > length:
output_list.append(random.sample(wordlist, length))
else:
for x in wordlist:
valid_list.remove(x)
return output_list
else:
raise ValueError("Hard examples not implemented for {}".format(listop))
def get_sublists(lst, lengths: list, num_examples):
sublists = list()
samples = num_examples // len(lengths)
log.info("sampling {} instances".format(samples * len(lengths)))
for l in lengths:
permutation_count = math.perm(len(lst), l)
permutation_idxs = random.sample(range(permutation_count), samples)
sublists.extend(get_permutation(i, lst, l) for i in permutation_idxs)
return sublists
def get_permutation(i, lst, length):
permutation = list()
for _ in range(length):
i, idx = divmod(i, len(lst))
permutation.append(lst[idx])
# remove to prevent duplicates
lst = lst[:idx] + lst[idx + 1 :]
return permutation
def sort_last(sublist):
return sorted(sublist, key=lambda x: str(list(reversed(x))))
if __name__ == "__main__":
main()
| DecomP-main | src/decomp/datasets_utils/build_reverse_dataset.py |
#!/usr/bin/python
from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple, Union, Optional
import json
import argparse
import string
import re
import sys
import numpy as np
from scipy.optimize import linear_sum_assignment
# From here through _normalize_answer was originally copied from:
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
# Then cleaned up and modified a bit.
def _remove_articles(text: str) -> str:
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def _white_space_fix(text: str) -> str:
return " ".join(text.split())
EXCLUDE = set(string.punctuation)
def _remove_punc(text: str) -> str:
if not _is_number(text):
return "".join(ch for ch in text if ch not in EXCLUDE)
else:
return text
def _lower(text: str) -> str:
return text.lower()
def _tokenize(text: str) -> List[str]:
return re.split(" |-", text)
def _normalize_answer(text: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
parts = [
_white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token)))))
for token in _tokenize(text)
]
parts = [part for part in parts if part.strip()]
normalized = " ".join(parts).strip()
return normalized
def _is_number(text: str) -> bool:
try:
float(text)
return True
except ValueError:
return False
def _normalize_number(text: str) -> str:
if _is_number(text):
return str(float(text))
else:
return text
def _answer_to_bags(
answer: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[List[str], List[Set[str]]]:
if isinstance(answer, (list, tuple)):
raw_spans = answer
else:
raw_spans = [answer]
normalized_spans: List[str] = []
token_bags = []
for raw_span in raw_spans:
if not isinstance(raw_span, str):
return None
normalized_span = _normalize_answer(raw_span)
normalized_spans.append(normalized_span)
token_bags.append(set(normalized_span.split()))
return normalized_spans, token_bags
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]:
"""
Takes gold and predicted answer sets and first finds the optimal 1-1 alignment
between them and gets maximum metric values over all the answers.
"""
scores = np.zeros([len(gold), len(predicted)])
for gold_index, gold_item in enumerate(gold):
for pred_index, pred_item in enumerate(predicted):
if _match_numbers_if_present(gold_item, pred_item):
scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item)
row_ind, col_ind = linear_sum_assignment(-scores)
max_scores = np.zeros([max(len(gold), len(predicted))])
for row, column in zip(row_ind, col_ind):
max_scores[row] = max(max_scores[row], scores[row, column])
return max_scores
def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float:
intersection = len(gold_bag.intersection(predicted_bag))
if not predicted_bag:
precision = 1.0
else:
precision = intersection / float(len(predicted_bag))
if not gold_bag:
recall = 1.0
else:
recall = intersection / float(len(gold_bag))
f1 = (
(2 * precision * recall) / (precision + recall)
if not (precision == 0.0 and recall == 0.0)
else 0.0
)
return f1
def _match_numbers_if_present(gold_bag: Set[str], predicted_bag: Set[str]) -> bool:
gold_numbers = set()
predicted_numbers = set()
for word in gold_bag:
if _is_number(word):
gold_numbers.add(word)
for word in predicted_bag:
if _is_number(word):
predicted_numbers.add(word)
if (not gold_numbers) or gold_numbers.intersection(predicted_numbers):
return True
return False
def get_metrics(
predicted: Union[str, List[str], Tuple[str, ...]], gold: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[float, float]:
"""
Takes a predicted answer and a gold answer (that are both either a string or a list of
strings), and returns exact match and the DROP F1 metric for the prediction. If you are
writing a script for evaluating objects in memory (say, the output of predictions during
validation, or while training), this is the function you want to call, after using
:func:`answer_json_to_strings` when reading the gold answer from the released data file.
"""
predicted_bags = _answer_to_bags(predicted)
gold_bags = _answer_to_bags(gold)
if predicted_bags is None:
print("Could not parse answer: {}".format(predicted))
return 0.0, 0.0
if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len(gold_bags[0]):
exact_match = 1.0
else:
exact_match = 0.0
f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1])
f1 = np.mean(f1_per_bag)
f1 = round(f1, 2)
return exact_match, f1
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]:
"""
Takes an answer JSON blob from the DROP data release and converts it into strings used for
evaluation.
"""
if "number" in answer and answer["number"]:
return tuple([str(answer["number"])]), "number"
elif "spans" in answer and answer["spans"]:
return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans"
elif "date" in answer:
return (
tuple(
[
"{0} {1} {2}".format(
answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]
)
]
),
"date",
)
else:
raise ValueError(
f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}"
)
def evaluate_json(
annotations: Dict[str, Any], predicted_answers: Dict[str, Any], quiet: bool
) -> Tuple[float, float]:
"""
Takes gold annotations and predicted answers and evaluates the predictions for each question
in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to
match predictions to gold annotations (note that these are somewhat deep in the JSON for the
gold annotations, but must be top-level keys in the predicted answers).
The ``annotations`` are assumed to have the format of the dev set in the DROP data release.
The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a string
(or list of strings) that is the answer.
"""
instance_exact_match = []
instance_f1 = []
# for each type as well
type_to_em: Dict[str, List[float]] = defaultdict(list)
type_to_f1: Dict[str, List[float]] = defaultdict(list)
for _, annotation in annotations.items():
for qa_pair in annotation["qa_pairs"]:
query_id = qa_pair["query_id"]
max_em_score = 0.0
max_f1_score = 0.0
max_type = None
if query_id in predicted_answers:
predicted = predicted_answers[query_id]
# if predicted == "":
# print(query_id + "\n" + qa_pair["question"])
candidate_answers = [qa_pair["answer"]]
if "validated_answers" in qa_pair and qa_pair["validated_answers"]:
candidate_answers += qa_pair["validated_answers"]
for answer in candidate_answers:
gold_answer, gold_type = answer_json_to_strings(answer)
em_score, f1_score = get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
if max_em_score == em_score and max_f1_score == f1_score:
max_type = gold_type
else:
print("Missing prediction for question: {}".format(query_id))
if qa_pair and qa_pair["answer"]:
max_type = answer_json_to_strings(qa_pair["answer"])[1]
else:
max_type = "number"
max_em_score = 0.0
max_f1_score = 0.0
if max_em_score < 0.5 and not quiet:
print(query_id + "\nQuestion:" + qa_pair["question"])
print("Gold: " + str(candidate_answers))
print("Pred: " + str(predicted))
print("Score: {} / {}".format(max_em_score, max_f1_score))
instance_exact_match.append(max_em_score)
instance_f1.append(max_f1_score)
type_to_em[max_type].append(max_em_score)
type_to_f1[max_type].append(max_f1_score)
global_em = np.mean(instance_exact_match)
global_f1 = np.mean(instance_f1)
print("Exact-match accuracy {0:.2f}".format(global_em * 100))
print("F1 score {0:.2f}".format(global_f1 * 100))
print("{0:.2f} & {1:.2f}".format(global_em * 100, global_f1 * 100))
print("----")
total = np.sum([len(v) for v in type_to_em.values()])
for typ in sorted(type_to_em.keys()):
print(
"{0}: {1} ({2:.2f}%)".format(
typ, len(type_to_em[typ]), 100.0 * len(type_to_em[typ]) / total
)
)
print(" Exact-match accuracy {0:.3f}".format(100.0 * np.mean(type_to_em[typ])))
print(" F1 score {0:.3f}".format(100.0 * np.mean(type_to_f1[typ])))
return global_em, global_f1
def evaluate_prediction_file(
prediction_path: str, gold_path: str, output_path: Optional[str] = None,
quiet: bool = False
) -> Tuple[float, float]:
"""
Takes a prediction file and a gold file and evaluates the predictions for each question in the
gold file. Both files must be json formatted and must have query_id keys, which are used to
match predictions to gold annotations. The gold file is assumed to have the format of the dev
set in the DROP data release. The prediction file must be a JSON dictionary keyed by query id,
where the value is either a JSON dictionary with an "answer" key, or just a string (or list of
strings) that is the answer. Writes a json with global_em and global_f1 metrics to file at
the specified output path, unless None is passed as output path.
"""
predicted_answers = json.load(open(prediction_path, encoding="utf-8"))
annotations = json.load(open(gold_path, encoding="utf-8"))
global_em, global_f1 = evaluate_json(annotations, predicted_answers, quiet)
# Output predictions to file if an output path is given
if output_path is not None:
output_dict = {"global_em": global_em, "global_f1": global_f1}
with open(output_path, "w", encoding="utf8") as outfile:
json.dump(output_dict, outfile)
return (global_em, global_f1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="evaluate on drop dataset")
parser.add_argument(
"--gold_path",
type=str,
required=False,
default="drop_dataset_test.gold.json",
help="location of the gold file",
)
parser.add_argument(
"--prediction_path",
type=str,
required=False,
default="sample_predictions.json",
help="location of the prediction file",
)
parser.add_argument(
"--output_path",
type=str,
required=False,
default=None,
help="location of the output metrics file",
)
parser.add_argument(
"--quiet",
action='store_true',
default=False,
help="Do not print debug messages"
)
args = parser.parse_args()
evaluate_prediction_file(args.prediction_path, args.gold_path, args.output_path, args.quiet)
| DecomP-main | src/decomp/datasets_utils/drop_eval.py |
from __future__ import annotations
import json
from dataclasses import dataclass
from typing import Any
class BasicDataInstance(dict):
_REQUIRED_ATTRS = set([])
def __init__(self, input_data):
dict.__init__({})
self.update(input_data)
for item in type(self)._REQUIRED_ATTRS:
if item not in self:
self[item] = []
class QuestionGeneratorData(BasicDataInstance):
_REQUIRED_ATTRS = set([
"question_seq",
"subquestion_seq",
"answer_seq",
"command_seq",
"model_seq",
"operation_seq",
"score_seq",
"para_seq"
])
@dataclass
class InferenceStep:
score: float
participant: str
@dataclass
class QuestionGenerationStep(InferenceStep):
question: str
@dataclass
class QuestionParsingStep(InferenceStep):
operation: str
model: str
subquestion: str
@dataclass
class QuestionAnsweringStep(InferenceStep):
answer: str
@dataclass
class AnswerSubOperationStep(InferenceStep):
sub_operation: str
input_answer: Any
output_answer: Any
@dataclass
class Task:
task_question: QuestionGenerationStep
task_participant: str
class StructuredDataInstance(BasicDataInstance):
_REQUIRED_ATTRS = set(["inference_seq"])
def __init__(self, input_data):
super().__init__(input_data)
self.inference_ref_stack = [self]
self.task_stack = []
def add_answer(self, qastep: QuestionAnsweringStep):
self.get_current_inference_seq().append(qastep)
def add_qgen(self, qgenstep: QuestionGenerationStep):
self.get_current_inference_seq().append(qgenstep)
def add_qparse(self, qgenstep: QuestionParsingStep):
self.get_current_inference_seq().append(qgenstep)
def add_suboperation_step(self, subop_step: AnswerSubOperationStep):
self.get_current_inference_seq().append(subop_step)
def add_subdecomp(self, subdata: StructuredDataInstance):
self.get_current_inference_seq().append(subdata)
self.inference_ref_stack.append(subdata)
def add_task(self, task: Task):
self.task_stack.append(task)
def pop_task(self):
return self.task_stack.pop()
def has_tasks(self):
return len(self.task_stack) > 0
def get_last_step(self):
return self.get_current_inference_seq()[-1]
def get_last_generator(self):
for step in reversed(self.get_current_inference_seq()):
if isinstance(step, QuestionGenerationStep):
return step.participant
def popup_decomp_level(self):
if len(self.inference_ref_stack) == 1:
raise ValueError("Will pop up to an empty inference stack!\n{}".format(
self.get_printable_reasoning_chain()))
self.inference_ref_stack.pop()
def at_root_level(self):
return len(self.inference_ref_stack) == 1
def get_current_inference_seq(self):
return self.inference_ref_stack[-1]["inference_seq"]
def get_current_inference_data(self):
return self.inference_ref_stack[-1]
def get_current_aseq(self):
aseq = []
for step in self.get_current_inference_seq():
if isinstance(step, QuestionAnsweringStep):
aseq.append(step.answer)
return aseq
def get_current_qseq(self):
qseq = []
for step in self.get_current_inference_seq():
if isinstance(step, QuestionGenerationStep):
qseq.append(step.question)
return qseq
def get_current_subqseq(self):
qseq = []
for step in self.get_current_inference_seq():
if isinstance(step, QuestionParsingStep):
qseq.append(step.subquestion)
return qseq
def get_last_question(self):
for step in reversed(self.get_current_inference_seq()):
if isinstance(step, QuestionGenerationStep):
return step.question
raise ValueError("No last question! No question generated yet")
def get_last_answer(self):
for step in reversed(self.get_current_inference_seq()):
if isinstance(step, QuestionAnsweringStep):
return step.answer
raise ValueError("No last answer! No answer generated yet")
def get_printable_reasoning_chain(self, indent=""):
# chain = ": {}".format(self["qid"], self["question"])
chain = ""
for step in self["inference_seq"]:
if isinstance(step, AnswerSubOperationStep):
if chain:
chain += "\n"
chain += indent + "O: {}({}) ==> {}".format(step.sub_operation,
json.dumps(step.input_answer),
json.dumps(step.output_answer))
if isinstance(step, QuestionGenerationStep):
if chain:
chain += "\n"
chain += indent + "Q: " + step.question
if isinstance(step, QuestionAnsweringStep):
if chain:
chain += "\n"
chain += indent + "A: " + step.answer
if isinstance(step, StructuredDataInstance):
if chain:
chain += "\n"
chain += step.get_printable_reasoning_chain(indent=indent + " ")
return chain
def get_last_question_generator(self):
for step in reversed(self["inference_seq"]):
if isinstance(step, QuestionGenerationStep):
return step.participant
raise ValueError("No last question! No question generated yet")
| DecomP-main | src/decomp/inference/data_instances.py |
import json
from datetime import datetime
from dateutil.parser import parse
from decomp.models.llm_qa_model import LLMQAModel
from decomp.inference.data_instances import QuestionAnsweringStep, StructuredDataInstance
from decomp.inference.model_search import ParticipantModel
from decomp.inference.participant_qgen import QuestionGenParticipant
class LLMQAParticipantModel(ParticipantModel):
def __init__(self, next_model=None, end_state="[EOQ]", allow_empty_answers=False, **kwargs):
self.qa_model = LLMQAModel(**kwargs)
self.next_model = next_model
self.end_state = end_state
self.num_calls = 0
self.allow_empty_answers = allow_empty_answers
def return_model_calls(self):
return {"llm_qa": self.num_calls}
def update_state(self, answer, state):
if not self.allow_empty_answers and answer == "":
return []
new_state = state.copy()
new_state.data.add_answer(QuestionAnsweringStep(
answer=json.dumps(answer),
score=0,
participant=state.next
))
new_state.next = self.next_model if self.next_model else self.end_state
return new_state
def query(self, state, debug=False):
question = state.data.get_last_question()
context = state.data["paras"] if "paras" in state.data else ""
self.num_calls += 1
answer, facts_used = self.qa_model.ask_question(input_question=question, context=context)
return self.update_state(answer=answer, state=state)
class LLMQADecompParticipantModel(QuestionGenParticipant):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def query(self, state, debug=False):
# Is this being called to generate a question?
if len(state.data.get_current_inference_seq()) == 0 or \
isinstance(state.data.get_last_step(), QuestionAnsweringStep):
# if there is no previous question or the last step was a QA Step
new_states = super().query(state=state, debug=debug)
else:
# or answer a question
new_state = state.copy()
question = state.data.get_last_question()
# take the last question and a decomposition level
new_state.data.add_subdecomp(StructuredDataInstance(input_data={
"qid": state.data["qid"],
"query": question,
"question": question
}))
# then generate the decomposition
new_states = super().query(state=new_state, debug=debug)
if not isinstance(new_states, list):
new_states = [new_states]
for new_state in new_states:
# if [EOQ] was generated, i.e. the module is done answering this question
if new_state.next == self.end_state and not new_state.data.at_root_level():
last_answer = new_state.data.get_last_answer()
new_state.data.popup_decomp_level()
new_state.data.add_answer(QuestionAnsweringStep(
answer=last_answer,
score=0,
participant=state.next
))
return new_states
def date_difference(date1: str, date2: str, units: str = "years"):
default_date = datetime(3000, 1, 1)
try:
date1_datetime = parse(date1, default=default_date)
date2_datetime = parse(date2, default=default_date)
except Exception:
# couldn't parse date
return None
# if one doesn't have month set, not usable
if date1_datetime.year == default_date.year and date1_datetime.month == default_date.month:
return None
if date2_datetime.year == default_date.year and date2_datetime.month == default_date.month:
return None
if date1_datetime.year == default_date.year and date2_datetime.year != default_date.year:
# one date is relative and other is not
date1_datetime = date1_datetime.replace(year=date2_datetime.year)
elif date2_datetime.year == default_date.year and date1_datetime.year != default_date.year:
# one date is relative and other is not
date2_datetime = date2_datetime.replace(year=date1_datetime.year)
if units == "days":
return (date1_datetime - date2_datetime).days
if units == "months":
return (date1_datetime.year - date2_datetime.year) * 12 + (
date1_datetime.month - date2_datetime.month)
if units == "years":
# human annotations are often on just the year value
return date1_datetime.year - date2_datetime.year
print("Unknown unit:" + units)
return None
def sort_without_duplicates(arr):
last_val = None
output_arr = []
for (key, val) in sorted(arr, key=lambda x: x[1]):
if val == last_val:
continue
else:
output_arr.append((key, val))
last_val = val
return output_arr
def sorted_key(arr):
return [x[0] for x in sort_without_duplicates(arr)]
def sorted_value(arr):
return [x[1] for x in sort_without_duplicates(arr)]
class ExprEvalQAParticipantModel(ParticipantModel):
def __init__(self, next_model=None, end_state="[EOQ]",
**kwargs):
"""
:param kwargs: Unused. Only here for ease of JSON specification
"""
self.next_model = next_model
self.end_state = end_state
self.num_calls = 0
def return_model_calls(self):
return {"expr_eval": self.num_calls}
def query(self, state, debug=False):
question = state.data.get_last_question()
try:
answer = eval(question)
if isinstance(answer, float):
answer = round(answer, 3)
elif isinstance(answer, set):
answer = list(answer)
except Exception:
# could not evaluate question
answer = None
self.num_calls += 1
new_state = state.copy()
new_state.data.add_answer(QuestionAnsweringStep(
answer=json.dumps(answer),
score=0,
participant=state.next
))
if answer is None:
return []
else:
new_state.next = self.next_model if self.next_model else self.end_state
return new_state
| DecomP-main | src/decomp/inference/participant_qa.py |
import json
import logging
import re
from json import JSONDecodeError
from decomp.inference.utils import flatten_list, get_answer_indices
from decomp.inference.data_instances import Task, QuestionGenerationStep, AnswerSubOperationStep, \
QuestionAnsweringStep, StructuredDataInstance, QuestionParsingStep
from decomp.inference.model_search import ParticipantModel
logger = logging.getLogger(__name__)
class RoutedExecutionParticipant(ParticipantModel):
def __init__(self, next_model=None, end_state="[EOQ]"):
self.next_model = next_model
self.end_state = end_state
self.per_model_calls = {"executer": 0, "op_executer": 0}
self.operation_model_question_regex = re.compile("\((.+)\) \[([^\]]+)\] (.*)")
self.model_question_regex = re.compile("\[([^\]]+)\] (.*)")
self.operation_only_regex = re.compile("\((.+)\)(.*)")
def return_model_calls(self):
return self.per_model_calls
def query(self, state, debug=False):
"""The main function that interfaces with the overall search and
model controller, and manipulates the incoming data.
:param state: the state of controller and model flow.
:type state: launchpadqa.question_search.model_search.SearchState
:rtype: list
"""
## the data
data = state._data
question = data.get_last_question()
# if only model question matches, assume the operation to be select
model_question_match = self.model_question_regex.match(question)
operation = None
model = None
sub_question = None
arguments = None
if model_question_match:
operation = "select"
model = model_question_match.group(1)
sub_question = model_question_match.group(2)
else:
operation_model_question_match = self.operation_model_question_regex.match(question)
if operation_model_question_match:
operation = operation_model_question_match.group(1)
model = operation_model_question_match.group(2)
sub_question = operation_model_question_match.group(3)
else:
operation_only_match = self.operation_only_regex.match(question)
operation = operation_only_match.group(1)
arguments = operation_only_match.group(2)
if model is not None:
# If you have a model, pass the question to the model
try:
return self.add_model_questions(operation=operation,
model=model,
sub_question=sub_question,
state=state)
except ValueError as e:
logger.debug("Failed execution {}".format(data.get_printable_reasoning_chain()))
if debug:
raise e
return []
elif operation is not None:
# if you have an operation only, execute the operation
try:
return self.execute_operation(operation=operation,
arguments=arguments,
state=state)
except Exception as e:
logger.debug("Failed execution {}".format(data.get_printable_reasoning_chain()))
if debug:
raise e
return []
else:
logger.debug("No match for {}".format(question))
return []
def add_model_questions(self, operation, model, sub_question, state):
self.per_model_calls["executer"] += 1
data = state.data
question = data.get_last_question()
assignment = {}
for ans_idx, ans in enumerate(data.get_current_aseq()):
assignment["#" + str(ans_idx + 1)] = json.loads(ans)
if "paras" in data and len(data["paras"]) > 0:
assignment["#C"] = ["PARA_" + str(x) for x in range(len(data["paras"]))]
new_state = state.copy()
new_state.data.add_qparse(QuestionParsingStep(score=0,
participant=state.next,
operation=operation,
model=model,
subquestion=sub_question))
new_state.data.add_subdecomp(StructuredDataInstance(input_data={
"qid": state.data["qid"],
"query": question,
"question": question
}))
if operation.startswith("select"):
new_state = self.get_select_state(question=sub_question,
model=model,
operation=operation,
assignments=assignment,
state=new_state)
elif operation.startswith("project"):
new_state = self.get_project_state(question=sub_question,
model=model,
operation=operation,
assignments=assignment,
state=new_state)
elif operation.startswith("filter"):
new_state = self.get_filter_state(question=sub_question,
model=model,
operation=operation,
assignments=assignment,
state=new_state)
else:
raise ValueError("Not implemented! " + operation)
new_state.next = self.end_state
return new_state
def get_select_state(self, question, model, operation, assignments, state):
indices = get_answer_indices(question)
for index in indices:
idx_str = "#" + str(index)
if idx_str not in assignments:
raise ValueError("SELECT: Can not perform select operation with input arg: {}"
" No assignments yet!".format(idx_str))
question = question.replace(idx_str, json.dumps(assignments[idx_str]))
# add tasks in reverse order so they are popped correctly
# merge
merge_question = "(" + operation + ")"
state.data.add_task(Task(task_question=QuestionGenerationStep(
score=0, participant=state.next, question=merge_question),
task_participant=state.next))
state.data.add_task(Task(task_question=QuestionGenerationStep(
score=0, participant=state.next, question=question),
task_participant=model))
return state
def get_project_state(self, question, model, operation, assignments, state):
indices = get_answer_indices(question)
if "#C" in question and len(indices) == 0:
indices = ["C"]
if len(indices) > 1:
raise ValueError("PROJECT: Can not handle more than one answer idx: {} "
"for project: {}".format(indices, question))
if len(indices) == 0:
raise ValueError("PROJECT: Did not find any indices to project on " + str(question))
idx_str = "#" + str(indices[0])
if idx_str not in assignments:
raise ValueError("PROJECT: Can not perform project operation with input arg: {}"
" No assignments yet!".format(idx_str))
operation_seq = operation.split("_")
first_op = operation_seq[0]
if not isinstance(assignments[idx_str], list):
if isinstance(assignments[idx_str], str):
# make a list of single element
assignments[idx_str] = [assignments[idx_str]]
else:
raise ValueError("PROJECT: Can not perform project operation on a non-list input: {}"
" Operation: {} Question: {}".format(assignments[idx_str],
operation, question))
new_questions = []
for item in assignments[idx_str]:
if isinstance(item, list) and len(item) == 2:
item = tuple(item)
if first_op == "projectValues":
# item should be a tuple
if not isinstance(item, tuple):
raise ValueError("PROJECT: Item: {} is not a tuple in assignments: {}. "
"Expected for projectValues".format(item,
assignments[idx_str]))
new_question = question.replace(idx_str, json.dumps(item[1]))
elif first_op == "projectKeys":
# item should be a tuple
if not isinstance(item, tuple):
raise ValueError("PROJECT: Item: {} is not a tuple in assignments: {}. "
"Expected for projectKeys".format(item,
assignments[idx_str]))
new_question = question.replace(idx_str, json.dumps(item[0]))
else:
if isinstance(item, int) or isinstance(item, float):
item = str(item)
if not isinstance(item, str):
raise ValueError("PROJECT: Item: {} is not a string in assignments: {}. "
"Expected for project".format(item, assignments[idx_str]))
new_question = question.replace(idx_str, item)
new_questions.append(new_question)
# add tasks in reverse order so they are popped correctly
merge_question = "(" + operation + ") " + json.dumps(assignments[idx_str])
state.data.add_task(Task(task_question=QuestionGenerationStep(
score=0, participant=state.next, question=merge_question),
task_participant=state.next))
for q in reversed(new_questions):
state.data.add_task(Task(task_question=QuestionGenerationStep(
score=0, participant=state.next, question=q),
task_participant=model))
return state
def get_filter_state(self, question, model, operation, assignments, state):
q_answer_indices = get_answer_indices(question)
if len(q_answer_indices) > 1:
# more than one answer index in the question, use the operation to identify the
# answer idx to operate over
op_answer_indices = get_answer_indices(operation)
if len(op_answer_indices) != 1:
raise ValueError("Need one answer idx to be specified in filter operation since "
"multiple specified in the question! "
"Operation: {} Question: {}".format(operation, question))
else:
operation_idx = op_answer_indices[0]
for idx in q_answer_indices:
if idx != operation_idx:
# modify question directly to include the non-operated answer id
idx_str = "#" + str(idx)
if idx_str not in assignments:
raise ValueError(
"FILTER: Can not perform filter operation with input arg: {} "
"No assignments yet!".format(idx_str))
# print(question, idx_str, assignments)
question = question.replace(idx_str, json.dumps(assignments[idx_str]))
elif len(q_answer_indices) == 1:
operation_idx = q_answer_indices[0]
else:
raise ValueError("FILTER: No answer index in question for filter"
"Operation: {} Question: {}".format(operation, question))
idx_str = "#" + str(operation_idx)
if idx_str not in assignments:
raise ValueError("FILTER: Can not perform filter operation with input arg: {}"
" No assignments yet!".format(idx_str))
if not isinstance(assignments[idx_str], list):
raise ValueError("FILTER: Can not perform filter operation on a non-list input: {}"
" Operation: {} Question: {}".format(assignments[idx_str],
operation, question))
operation_seq = operation.split("_")
first_op = operation_seq[0]
new_questions = []
for item in assignments[idx_str]:
if isinstance(item, list) and len(item) == 2:
item = tuple(item)
if first_op.startswith("filterValues"):
# item should be a tuple
if not isinstance(item, tuple):
raise ValueError("FILTER: Item: {} is not a tuple in assignments: {}. "
"Expected for filterValues".format(item, assignments[idx_str]))
(key, value) = item
new_question = question.replace(idx_str, json.dumps(value))
elif first_op.startswith("filterKeys"):
if not isinstance(item, tuple):
raise ValueError("FILTER: Item: {} is not a tuple in assignments: {}. "
"Expected for filterKeys".format(item,
assignments[idx_str]))
(key, value) = item
new_question = question.replace(idx_str, json.dumps(key))
else:
if isinstance(item, int) or isinstance(item, float):
item = str(item)
if not isinstance(item, str):
raise ValueError("FILTER: Item: {} is not a string in assigments: {}. "
"Expected for filter".format(item, assignments[idx_str]))
new_question = question.replace(idx_str, item)
new_questions.append(new_question)
print(new_question)
# add tasks in reverse order so they are popped correctly
merge_question = "(" + operation + ") " + json.dumps(assignments[idx_str])
state.data.add_task(Task(task_question=QuestionGenerationStep(
score=0, participant=state.next, question=merge_question),
task_participant=state.next))
for q in reversed(new_questions):
state.data.add_task(Task(task_question=QuestionGenerationStep(
score=0, participant=state.next, question=q),
task_participant=model))
return state
def is_true(self, ans):
if isinstance(ans, bool):
return ans
elif isinstance(ans, str):
answer = ans.lower()
return answer == "yes" or answer == "1" or answer == "true"
else:
raise ValueError("Can't verify truth value for {}".format(ans))
def execute_operation(self, operation, arguments, state):
self.per_model_calls["op_executer"] += 1
operation_seq = operation.split("_")
# ignore any argument e.g. filter(#2)
first_op = operation_seq[0].split("(")[0]
new_state = state.copy()
answer_seq = []
for a in new_state.data.get_current_aseq():
try:
answer_json = json.loads(a)
answer_seq.append(answer_json)
except JSONDecodeError:
answer_seq.append(a)
if arguments:
parsed_arguments = json.loads(arguments)
assert len(answer_seq) == len(parsed_arguments), \
"Answer_seq is not of the same length as parsed_args\n{}\n{}".format(
answer_seq, parsed_arguments
)
if first_op == "projectValues":
answers = list(zip([x[0] for x in parsed_arguments], answer_seq))
elif first_op == "projectKeys":
answers = list(zip(answer_seq, [x[1] for x in parsed_arguments]))
elif first_op == "project":
answers = list(zip(parsed_arguments, answer_seq))
elif first_op == "select":
assert len(answer_seq) == 1
answers = answer_seq[0]
elif first_op == "filter":
answers = [x[0] for x in zip(parsed_arguments, answer_seq) if self.is_true(x[1])]
else:
raise ValueError("Not implemented! " + first_op)
new_answers = self.execute_sub_operations(answers=answers, operation=operation)
new_state.data.add_suboperation_step(AnswerSubOperationStep(score=0,
participant=new_state.next,
sub_operation=operation,
input_answer=answers,
output_answer=new_answers))
new_state.data.add_answer(QuestionAnsweringStep(
answer=json.dumps(new_answers),
score=0,
participant=new_state.next
))
new_state.data.popup_decomp_level()
new_state.data.add_answer(QuestionAnsweringStep(
answer=json.dumps(new_answers),
score=0,
participant=new_state.next
))
new_state.next = self.end_state
return new_state
def execute_sub_operations(self, answers, operation):
operation_seq = operation.split("_")
for op in operation_seq[1:]:
if op == "flat":
answers = flatten_list(answers)
elif op == "unique":
if not isinstance(answers, list):
raise ValueError("SUBOP: unique can only be applied to list. "
"Input: {}".format(answers))
seen_objs = set()
output_answers = []
for item in answers:
# handle any structure: convert to str
item_str = json.dumps(item)
if item_str not in seen_objs:
output_answers.append(item)
seen_objs.add(item_str)
answers = output_answers
elif op == "keys":
answers = [x[0] for x in answers]
elif op == "values":
answers = [x[1] for x in answers]
else:
raise ValueError("SUBOP: Unknown sub-operation: {}".format(op))
return answers
| DecomP-main | src/decomp/inference/participant_execution_routed.py |
from typing import Dict
from decomp.inference.dataset_readers import HotpotQAReader, DatasetReader, DropReader
from decomp.inference.participant_execution_routed import RoutedExecutionParticipant
from decomp.inference.participant_qa import ExprEvalQAParticipantModel, LLMQAParticipantModel, \
LLMQADecompParticipantModel
from decomp.inference.participant_qgen import QuestionGenParticipant, RandomGenParticipant
from decomp.inference.participant_util import DumpChainsParticipant, AnswerExtractor
MODEL_NAME_CLASS = {
"lmgen": QuestionGenParticipant, # for backward-compatibility
"randgen": RandomGenParticipant,
"dump_chains": DumpChainsParticipant,
"execute_router": RoutedExecutionParticipant,
"answer_extractor": AnswerExtractor,
"llmqa": LLMQAParticipantModel,
"llmqadecomp": LLMQADecompParticipantModel,
"expr_eval": ExprEvalQAParticipantModel
}
READER_NAME_CLASS: Dict[str, DatasetReader] = {
"hotpot": HotpotQAReader,
"drop": DropReader
}
| DecomP-main | src/decomp/inference/constants.py |
DecomP-main | src/decomp/inference/__init__.py |
|
import logging
import math
import random
import re
from itertools import product, permutations
from decomp.inference.data_instances import QuestionGenerationStep, Task
from decomp.inference.model_search import ParticipantModel
from decomp.inference.utils import get_sequence_representation, stem_filter_tokenization, BLANK, \
stop_words_set
from decomp.models.generator import LMGenerator
from decomp.models.gpt3generator import GPT3Generator
from decomp.models.llm_client_generator import LLMClientGenerator
logger = logging.getLogger(__name__)
class QuestionGenParticipant(ParticipantModel):
def __init__(self, scale_by_step=1, add_eos=False, prompt_file="", next_model="execute",
end_state="[EOQ]", use_special_number_format=False, add_context=False,
max_steps=10, gen_model="lm", **kwargs):
self.scale_by_step = scale_by_step
self.add_eos = add_eos
if prompt_file:
with open(prompt_file, "r") as input_fp:
self.prompt = "".join(input_fp.readlines())
else:
self.prompt = None
self.next_model = next_model
self.end_state = end_state
self.num_calls = 0
self.use_special_number_format = use_special_number_format
self.add_context = add_context
self.max_steps = max_steps
self.gen_model = gen_model
if gen_model == "lm":
self.generator = LMGenerator(**kwargs)
elif gen_model == "gpt3":
self.generator = GPT3Generator(**kwargs)
elif gen_model == "llm_api":
self.generator = LLMClientGenerator(**kwargs)
else:
raise ValueError("Unknown gen_model: " + gen_model)
def return_model_calls(self):
return {self.gen_model + "gen": self.num_calls}
def query(self, state, debug=False):
"""The main function that interfaces with the overall search and
model controller, and manipulates the incoming data.
:param data: should have a dictionary as input containing
mutable data
:type data: dict
:param state: the state of controller and model flow.
:type state: launchpadqa.question_search.model_search.SearchState
:rtype: list
:raises: ValueError
"""
## first checks state of `json_input` to figure out how to format things
## the first question
data = state.data
question_seq = data.get_current_qseq()
answer_seq = data.get_current_aseq()
# avoid long chains since this is costly and most likely an error
if len(question_seq) >= self.max_steps:
new_state = state.copy()
output = self.end_state
new_state.next = self.end_state
new_state.data.add_qgen(QuestionGenerationStep(
question=output,
score=0,
participant=state.next
))
return new_state
if self.use_special_number_format:
gen_seq = "QC: " + data.get_current_inference_data()["query"]
for qidx, (ques, ans) in enumerate(zip(question_seq, answer_seq)):
gen_seq += "\nQ{}: {}\n#{}: {}".format(qidx + 1, ques, qidx + 1, ans)
gen_seq += "\nQ{}:".format(len(question_seq) + 1)
else:
gen_seq = get_sequence_representation(origq=data.get_current_inference_data()["query"],
question_seq=question_seq,
answer_seq=answer_seq, compq_marker="QC: ",
simpq_marker="\nQS: ", answer_marker="\nA: ",
interq_marker="\nQS: ")
if self.add_context and "paras" in state.data:
gen_seq = "\n".join(state.data["paras"]) + "\n\n" + gen_seq
if self.prompt:
gen_seq = self.prompt + "\n\n" + gen_seq
## eventual output
new_states = []
## go through generated questions
output_seq_scores = self.generator.generate_text_sequence(gen_seq)
self.num_calls += 1
observed_outputs = set()
for (output_seq, score) in output_seq_scores:
if debug:
print("--> " + output_seq + " : " + str(score))
output = output_seq.strip()
# catch potentially spurious duplicates
if output in observed_outputs:
continue
else:
observed_outputs.add(output)
# copy state
new_state = state.copy()
# lower is better, same as the scores returned by generate_text_sequence
assert score >= 0, "Score from generation assumed to be +ve. Got: {}! Needs to be " \
"+ve to ensure monotonically increasing scores as expected by the" \
" search.".format(score)
new_state._score += score
new_state.data.add_qgen(QuestionGenerationStep(
question=output,
score=score,
participant=state.next
))
if output == self.end_state:
new_state.next = self.end_state
else:
new_state.data.add_task(Task(task_question=None,
task_participant=new_state.next))
new_state.next = self.next_model
new_states.append(new_state)
return new_states
class RandomGenParticipant(ParticipantModel):
def __init__(self, operations_file, model_questions_file, sample_operations, sample_questions,
max_steps=6, next_model="execute", topk_questions=True, end_state="[EOQ]"):
self.operations = self.load_operations(operations_file)
self.model_questions = self.load_model_questions(model_questions_file)
self.sample_operations = sample_operations
self.sample_questions = sample_questions
self.end_state = end_state
self.next_model = next_model
self.max_steps = max_steps
self.num_calls = 0
self.topk_questions = topk_questions
def return_model_calls(self):
return {"randomgen": self.num_calls}
def load_operations(self, operations_file):
with open(operations_file, "r") as input_fp:
ops = [x.strip() for x in input_fp.readlines()]
return ops
def load_model_questions(self, model_questions_file):
model_question_list = {}
with open(model_questions_file, "r") as input_fp:
for line in input_fp:
fields = line.strip().split("\t")
model = fields[0]
if model not in model_question_list:
model_question_list[model] = []
for question in fields[1:]:
question_entities = self.find_question_entities(question)
for q_ent in question_entities:
question = question.replace(q_ent, BLANK)
model_question_list[model].append(question)
# get unique questions
output_model_questions = []
for model_key, question_list in model_question_list.items():
unique_questions = list(set(question_list))
for q in unique_questions:
output_model_questions.append((model_key, q))
print(model_key, q)
logger.info("{} Questions in {} language".format(len(unique_questions),
model_key))
return output_model_questions
def select(self, population, sample_size_or_prop, samplek=True):
if sample_size_or_prop >= 1:
if samplek:
return random.sample(population, k=sample_size_or_prop)
else:
return population[:sample_size_or_prop]
else:
if samplek:
return random.sample(population, k=math.ceil(sample_size_or_prop * len(population)))
else:
return population[:math.ceil(sample_size_or_prop * len(population))]
def build_end_state(self, state):
new_state = state.copy()
new_state.next = self.end_state
output = self.end_state
new_state.data.add_qgen(QuestionGenerationStep(
question=output,
score=0,
participant=state.next
))
return new_state
def score_question(self, sub_question, complex_question):
sub_question_tokens = set(stem_filter_tokenization(sub_question))
if len(sub_question_tokens) == 0:
logger.debug("No tokens found in sub_question: {}!!".format(sub_question))
return 0.0
complex_question_tokens = set(stem_filter_tokenization(complex_question))
overlap = sub_question_tokens.intersection(complex_question_tokens)
# only penalized for sub-question length
return len(overlap) / len(sub_question_tokens)
def find_question_entities(self, origq):
entities = []
for m in re.finditer("\\b([A-Z]\w+)", origq):
if m.group(1).lower() not in stop_words_set:
entities.append(m.group(1))
for m in re.finditer("([0-9\.]+)", origq):
entities.append(m.group(1))
return entities
def replace_blanks(self, blanked_str, fillers):
num_blanks = blanked_str.count(BLANK)
output_strings = []
if num_blanks > 0:
filler_permutations = permutations(fillers, num_blanks)
for permutation in filler_permutations:
new_str = blanked_str
for filler_val in permutation:
new_str = new_str.replace(BLANK, filler_val, 1)
output_strings.append(new_str)
else:
output_strings = [blanked_str]
return output_strings
def query(self, state, debug=False):
data = state.data
num_steps = len(data.get_current_qseq())
# push for one extra step so that all shorter chains have been explored
if num_steps > self.max_steps:
return [self.build_end_state(state)]
origq = data["query"]
answer_strs = []
if num_steps == 0:
# hard-coded to only consider select in the first step
ops = ["select"]
else:
for x in range(num_steps):
answer_strs.append("#" + str(x + 1))
operations_pool = []
for op in self.operations:
operations_pool.extend(self.replace_blanks(op, answer_strs))
ops = self.select(operations_pool, self.sample_operations)
question_entities = self.find_question_entities(origq)
# hack to only use a filler in one of the steps
potential_fillers = question_entities + answer_strs
filler_pool = []
for filler in potential_fillers:
found_match = False
for question in state.data.get_current_subqseq():
if filler in question:
found_match = True
break
if not found_match:
filler_pool.append(filler)
questions_pool = [(m, newq) for (m, q) in self.model_questions
for newq in self.replace_blanks(q, filler_pool)]
if self.topk_questions:
sorted_model_questions = sorted(questions_pool, reverse=True,
key=lambda x: self.score_question(x[1], origq))
model_questions = self.select(sorted_model_questions, self.sample_questions,
samplek=False)
else:
model_questions = self.select(questions_pool, self.sample_questions, samplek=True)
op_model_qs_prod = product(ops, model_questions)
## eventual output
new_states = []
self.num_calls += 1
for (op, model_qs) in op_model_qs_prod:
(model, question) = model_qs
# no point repeating the exact same question
if question in state.data.get_current_subqseq():
continue
# copy state
new_state = state.copy()
new_state.next = self.next_model
new_state._score += 1
output = "({}) [{}] {}".format(op, model, question)
## add new question to question_seq
new_state.data.add_qgen(QuestionGenerationStep(
question=output,
score=1,
participant=state.next
))
# new_state.data["question_seq"].append(output)
# new_state.data["score_seq"].append(1)
# new_state.data["command_seq"].append("gen")
## mark the last output
# new_state.last_output = output
new_states.append(new_state)
##
# if len(data["question_seq"]) > 0:
# new_states.append(self.build_end_state(state))
return new_states
| DecomP-main | src/decomp/inference/participant_qgen.py |
import os
import re
from typing import List, Dict
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
stop_words_set = set(stopwords.words('english'))
QUESTION_MARKER = " Q: "
COMPQ_MARKER = " QC: "
SIMPQ_MARKER = " QS: "
INTERQ_MARKER = " QI: "
ANSWER_MARKER = " A: "
EOQ_MARKER = "[EOQ]"
LIST_JOINER = " + "
BLANK = "__"
WH_WORDS = set(["who", "what", "where", "how", "why", "when", "which"])
def get_sequence_representation(origq: str, question_seq: List[str], answer_seq: List[str],
compq_marker: str = COMPQ_MARKER,
interq_marker: str = INTERQ_MARKER,
answer_marker: str = ANSWER_MARKER,
simpq_marker:str = SIMPQ_MARKER):
ret_seq = compq_marker + origq
if len(question_seq) != len(answer_seq):
raise ValueError("Number of generated questions and answers should match before"
"question generation. Qs: {} As: {}".format(question_seq, answer_seq))
for aidx in range(len(answer_seq)):
ret_seq += interq_marker
ret_seq += question_seq[aidx]
ret_seq += answer_marker + answer_seq[aidx]
ret_seq += simpq_marker
return ret_seq
def tokenize_str(input_str):
return word_tokenize(input_str)
def stem_tokens(token_arr):
return [stemmer.stem(token) for token in token_arr]
def filter_stop_tokens(token_arr):
return [token for token in token_arr if token not in stop_words_set]
def stem_filter_tokenization(input_str):
return stem_tokens(filter_stop_tokens(tokenize_str(input_str.lower())))
# functions borrowed from AllenNLP to parse JSONNET with env vars
def get_environment_variables() -> Dict[str, str]:
"""
Wraps `os.environ` to filter out non-encodable values.
"""
return {key: value for key, value in os.environ.items() if _is_encodable(value)}
def _is_encodable(value: str) -> bool:
"""
We need to filter out environment variables that can't
be unicode-encoded to avoid a "surrogates not allowed"
error in jsonnet.
"""
# Idiomatically you'd like to not check the != b""
# but mypy doesn't like that.
return (value == "") or (value.encode("utf-8", "ignore") != b"")
def flatten_list(input_list):
output_list = []
for item in input_list:
if isinstance(item, list):
output_list.extend(flatten_list(item))
else:
output_list.append(item)
return output_list
def get_answer_indices(question_str):
return [int(m.group(1)) for m in re.finditer("#(\d)", question_str)] | DecomP-main | src/decomp/inference/utils.py |
import argparse
import json
import logging
import os
import _jsonnet
from decomp.inference.constants import MODEL_NAME_CLASS, READER_NAME_CLASS
from decomp.inference.dataset_readers import DatasetReader
from decomp.inference.model_search import (
ModelController,
BestFirstDecomposer)
from decomp.inference.data_instances import StructuredDataInstance
from decomp.inference.utils import get_environment_variables
logger = logging.getLogger(__name__)
def parse_arguments():
arg_parser = argparse.ArgumentParser(description='Convert HotPotQA dataset into SQUAD format')
arg_parser.add_argument('--input', type=str, required=False, help="Input QA file")
arg_parser.add_argument('--output', type=str, required=False, help="Output file")
arg_parser.add_argument('--config', type=str, required=True, help="Model configs")
arg_parser.add_argument('--reader', type=str, required=False, help="Dataset reader",
choices=READER_NAME_CLASS.keys())
arg_parser.add_argument('--debug', action='store_true', default=False,
help="Debug output")
arg_parser.add_argument('--demo', action='store_true', default=False,
help="Demo mode")
arg_parser.add_argument('--threads', default=1, type=int,
help="Number of threads (use MP if set to >1)")
arg_parser.add_argument('--n-examples', type=int)
return arg_parser.parse_args()
def build_decomposer_and_models(config_map):
print("loading participant models (might take a while)...")
model_map = {}
for key, value in config_map["models"].items():
class_name = value.pop("name")
if class_name not in MODEL_NAME_CLASS:
raise ValueError("No class mapped to model name: {} in MODEL_NAME_CLASS:{}".format(
class_name, MODEL_NAME_CLASS))
model = MODEL_NAME_CLASS[class_name](**value)
if key in config_map:
raise ValueError("Overriding key: {} with value: {} using instantiated model of type:"
" {}".format(key, config_map[key], class_name))
config_map[key] = model.query
model_map[key] = model
## instantiating
controller = ModelController(config_map, data_class=StructuredDataInstance)
decomposer = BestFirstDecomposer(controller)
return decomposer, model_map
def load_config(config_file):
if config_file.endswith(".jsonnet"):
ext_vars = get_environment_variables()
logger.info("Parsing config with external variables: {}".format(ext_vars))
config_map = json.loads(_jsonnet.evaluate_file(config_file, ext_vars=ext_vars))
else:
with open(config_file, "r") as input_fp:
config_map = json.load(input_fp)
return config_map
def load_reader(args, config_map):
if "reader" in config_map:
reader_config = config_map["reader"]
reader_name = reader_config.pop("name")
reader: DatasetReader = READER_NAME_CLASS[reader_name](**reader_config)
else:
reader: DatasetReader = READER_NAME_CLASS[args.example_reader]()
return reader
def demo_mode(args, reader, decomposer):
qid_example_map = {}
if args.input:
for eg in reader.read_examples(args.input):
qid = eg["qid"]
question = eg["query"]
answer = eg["answer"]
output_eg = {
"qid": qid,
"query": question,
"question": question,
}
if "paras" in eg:
output_eg["paras"] = eg["paras"]
qid_example_map[qid] = answer, output_eg
while True:
qid = input("QID: ")
if qid in qid_example_map:
answer, example = qid_example_map[qid]
print("using example from input file: " + json.dumps(example, indent=2))
else:
question = input("Question: ")
answer = None
example = {
"qid": qid,
"query": question,
"question": question,
}
final_state, other_states = decomposer.find_answer_decomp(example, debug=args.debug)
if final_state is None:
print("FAILED!")
else:
if args.debug:
for other_state in other_states:
data = other_state.data
print(data.get_printable_reasoning_chain())
print("Score: " + str(other_state._score))
data = final_state._data
chain = example["question"]
chain += "\n" + data.get_printable_reasoning_chain()
chain += " S: " + str(final_state._score)
chain += "\nG: {}".format(answer)
print(chain)
def inference_mode(args, reader, decomposer, model_map):
print("Running decomposer on examples")
qid_answer_chains = []
if not args.input:
raise ValueError("Input file must be specified when run in non-demo mode")
examples = reader.read_n_examples(args.input, args.n_examples)
if args.threads > 1:
import multiprocessing as mp
mp.set_start_method("spawn")
with mp.Pool(args.threads) as p:
qid_answer_chains = p.map(decomposer.return_qid_prediction, examples)
else:
for example in examples:
qid_answer_chains.append(
decomposer.return_qid_prediction(example, debug=args.debug))
num_call_metrics = {}
for pname, participant in model_map.items():
for model, num_calls in participant.return_model_calls().items():
print("Number of calls to {}: {}".format(pname + "." + model, num_calls))
num_call_metrics[pname + "." + model] = num_calls
metrics_json = {
"num_calls": num_call_metrics
}
metrics_file = os.path.join(os.path.dirname(args.output), "metrics.json")
with open(metrics_file, "w") as output_fp:
json.dump(metrics_json, output_fp)
predictions = {x[0]: x[1] for x in qid_answer_chains}
with open(args.output, "w") as output_fp:
json.dump(predictions, output_fp)
chains = [x[2] for x in qid_answer_chains]
ext_index = args.output.rfind(".")
chain_tsv = args.output[:ext_index] + "_chains.tsv"
with open(chain_tsv, "w") as output_fp:
for chain in chains:
output_fp.write(chain + "\n")
if __name__ == "__main__":
parsed_args = parse_arguments()
if parsed_args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
config_map = load_config(parsed_args.config)
decomposer, model_map = build_decomposer_and_models(config_map)
example_reader = load_reader(args=parsed_args, config_map=config_map)
if parsed_args.demo:
demo_mode(args=parsed_args, reader=example_reader, decomposer=decomposer)
else:
inference_mode(args=parsed_args, reader=example_reader,
decomposer=decomposer, model_map=model_map)
| DecomP-main | src/decomp/inference/configurable_inference.py |
import copy
import heapq
import json
import logging
from decomp.inference.data_instances import BasicDataInstance
class ParticipantModel(object):
"""Base model in this case for coordinating different models. Provides a general
class to structure all contributing models (in this case, by defining a single
function `query`, which is the single method that is called for each model).
"""
def query(self, state, debug=False):
"""The main function that interfaces with the overall search and
model controller, and manipulates the incoming data.
:param state: the state of controller and model flow.
:type state: launchpadqa.question_search.model_search.SearchState
:rtype: list
"""
raise NotImplementedError("Must implement to work inside of controller!")
def return_model_calls(self):
"""
:return: a dict of <model_name, number of calls> made by this participant
"""
raise NotImplementedError("Must implement to work inside of controller!")
class ModelController(object):
"""This class is a `ModelController` that takes multiple (arbitrary)
models and a control specification of how to interface the different
models (which can be thought of as a kind of state graph). For example
"""
def __init__(self, model_list,
data_class=BasicDataInstance):
"""Create an instance of a ComplexModel
:param model_list: a list of models with identifiers and
control flow.
:type model_list: dict
"""
if "start_state" not in model_list:
raise ValueError('Must specify start state')
if "end_state" not in model_list:
raise ValueError('Must specify end state')
self.model_list = model_list
self.data_class = data_class
def execute(self, state, debug=False):
"""Executes a command and query
:param state: a given state in search
:type state: SearchState (defined here)
:returns: a list of output
:rtype: list
"""
if state.next not in self.model_list:
self.logger.error("Can not handle next state: " + state.next)
return []
try:
model_func = self.model_list[state.next]
model_output = model_func(state, debug=debug)
if not isinstance(model_output, list):
return [model_output]
return model_output
except RecursionError:
return []
except Exception as e:
self.logger.error(e, exc_info=True)
raise ValueError('Error caught during model execution: %s' % e)
def init_data(self, data_instance):
"""Create an initialized version of the data object
that will get through around.
:param data_instance: any arbitrary piece of data.
:rtype: self.data_class
"""
return self.data_class(data_instance)
@property
def start_state(self):
return self.model_list["start_state"]
@property
def end_state(self):
return self.model_list["end_state"]
@property
def logger(self):
"""Returns a logger instance
"""
level = '.'.join([__name__, type(self).__name__])
return logging.getLogger(level)
## utility class for controlling and recording search state
class SearchState(object):
"""Tracks and records the state of a given search.
"""
def __init__(self, json_data,
command,
score=0.0):
"""Keep track of different stages in the state
:param json_data: some basic, json represntation of data
"""
self._data = json_data
self._score = score
self._next = command
def copy(self):
"""Does a deep copy of the state
:returns: new search state
"""
new_data = copy.deepcopy(self._data)
new_score = copy.deepcopy(self._score)
new_next = copy.deepcopy(self._next)
return SearchState(
new_data,
new_next,
new_score
)
## important to implement to work
## with the heap datastructures
def __lt__(self, other):
if self.score < other.score:
return True
return False
def __eq__(self, other):
if self.score == other.score:
return True
return False
@property
def data(self):
return self._data
@property
def score(self):
return self._score
@property
def next(self):
return self._next
@next.setter
def next(self, value):
self._next = value
@data.setter
def data(self, value):
self._data = value
class QuestionSearchBase(object):
def __init__(self, model_controller):
"""Create a `QuestionDecomposer instance`
:param model_ensemble: a collection of models with control instructions
"""
self.controller = model_controller
def find_answer_decomp(self, json_input, debug=False):
"""Main question decomposition function
:param json_input: the input to all of the models.
"""
raise NotImplementedError
def return_qid_prediction(self, example, debug=False):
final_state, other_states = self.find_answer_decomp(example, debug=debug)
if final_state is None:
print(example["question"] + " FAILED!")
chain = example["qid"] + "\t" + example["question"]
return (example["qid"], "", chain)
else:
data = final_state._data
chain = example["qid"] + "\t" + example["question"]
chain += "\n" + data.get_printable_reasoning_chain()
chain += "\nS: " + str(final_state._score)
print(chain)
final_answer = data.get_last_answer()
try:
json_answer = json.loads(final_answer)
# use this only if list (ignore numbers, etc)
if isinstance(json_answer, list) or isinstance(json_answer, str):
final_answer = json_answer
except ValueError:
# Not a valid json ignore
pass
return (example["qid"], final_answer, chain)
class BestFirstDecomposer(QuestionSearchBase):
def find_answer_decomp(self, json_input, debug=False):
"""Run the question decomposer. The main function here is to use
the controller to pass around inputs to the different models, then
keep a track of the search state and terminate when the shortest path
has been found.
:param json_input: some input to the model
"""
## start state of controller : e.g., generate
start_command = self.controller.start_state
start_data = self.controller.init_data(json_input)
## min-heap
heap = []
init_input = json_input["question"] if json_input["question"] else "UNKNOWN"
if debug:
print("[START QUERY] : %s" % init_input)
init_state = SearchState(start_data, ## initial input
start_command, ## starting point
score=0.0, ## starting score
)
## push it to heap
heapq.heappush(heap, init_state)
## start the main search
while True:
if len(heap) == 0:
if debug: print("[FAILED]: %s" % init_input)
return None, []
## pop from heap
current_state = heapq.heappop(heap)
if debug:
print("[MIN_STATE] command=%s" % (current_state.next))
# if current_state.next is None:
# print(current_state.data.get_printable_reasoning_chain())
# current_state.next = current_state.data.get_last_generator()
## end state
if current_state.next == self.controller.end_state:
if current_state.data.has_tasks():
new_task = current_state.data.pop_task()
# print("popped task!")
# print(new_task)
new_state = current_state.copy()
if new_task.task_question:
new_state.data.add_qgen(new_task.task_question)
new_state.next = new_task.task_participant
heapq.heappush(heap, new_state)
continue
else:
if debug:
print("[TERMINATED]")
return current_state, heap
## generate output and new stated
for new_state in self.controller.execute(current_state, debug=debug):
## push onto heap
heapq.heappush(heap, new_state)
| DecomP-main | src/decomp/inference/model_search.py |
import re
from decomp.inference.data_instances import QuestionAnsweringStep
from decomp.inference.model_search import ParticipantModel
from decomp.inference.utils import get_sequence_representation
class DumpChainsParticipant(ParticipantModel):
def __init__(self, output_file, next_model="gen"):
self.output_file = output_file
self.next_model = next_model
self.num_calls = 0
def return_model_calls(self):
return {"dumpchains": self.num_calls}
def dump_chain(self, state):
data = state.data
origq = data["query"]
qchain = data.get_current_qseq()
achain = data.get_current_aseq()
sequence = get_sequence_representation(origq=origq, question_seq=qchain, answer_seq=achain)
ans = achain[-1]
with open(self.output_file, 'a') as chains_fp:
chains_fp.write(data["qid"] + "\t" + sequence + "\t" + ans + "\n")
def query(self, state, debug=False):
self.num_calls += 1
if len(state.data["question_seq"]) > 0:
self.dump_chain(state)
new_state = state.copy()
new_state.next = self.next_model
return new_state
class AnswerExtractor(ParticipantModel):
def __init__(self, regex, next_model="[EOQ]"):
self.regex = re.compile(regex)
self.next_model = next_model
self.num_calls = 0
def return_model_calls(self):
return {"extract": self.num_calls}
def query(self, state, debug=False):
self.num_calls += 1
new_state = state.copy()
question = new_state.data.get_last_question()
m = self.regex.match(question)
if m:
answer = m.group(1)
if debug:
print("EXT: " + answer)
new_state.data.add_answer(QuestionAnsweringStep(
answer=answer,
score=0,
participant=state.next
))
# new_state.data["answer_seq"].append(answer)
# new_state.data["para_seq"].append("")
# new_state.data["command_seq"].append("qa")
# new_state.data["model_seq"].append("extractor")
# new_state.data["operation_seq"].append("")
# new_state.data["subquestion_seq"].append(question)
## change output
new_state.last_output = answer
new_state.next = self.next_model
return new_state
else:
# No match
print("Answer Extractor did not find a match for input regex in {}".format(question))
return []
| DecomP-main | src/decomp/inference/participant_util.py |
import json
class DatasetReader:
def __init__(self, add_paras=False, add_gold_paras=False):
self.add_paras = add_paras
self.add_gold_paras = add_gold_paras
def read_examples(self, file):
return NotImplementedError("read_examples not implemented by " + self.__class__.__name__)
class HotpotQAReader(DatasetReader):
def read_examples(self, file):
with open(file, 'r') as input_fp:
input_json = json.load(input_fp)
for entry in input_json:
output = {
"qid": entry["_id"],
"query": entry["question"],
# metadata
"answer": entry["answer"],
"question": entry["question"],
"type": entry.get("type", ""),
"level": entry.get("level", "")
}
if self.add_paras:
title_doc_map = self.get_paras(entry)
if self.add_gold_paras:
output_paras = self.get_gold_paras(entry, title_doc_map)
else:
output_paras = title_doc_map
output["paras"] = [title + "||" + " ".join(sentences)
for title, sentences in output_paras.items()]
yield output
def read_n_examples(self, file, n):
with open(file, 'r') as input_fp:
input_json = json.load(input_fp)
read = 0
for entry in input_json:
output = {
"qid": entry["_id"],
"query": entry["question"],
# metadata
"answer": entry["answer"],
"question": entry["question"],
"type": entry.get("type", ""),
"level": entry.get("level", "")
}
if self.add_paras:
title_doc_map = self.get_paras(entry)
if self.add_gold_paras:
output_paras = self.get_gold_paras(entry, title_doc_map)
else:
output_paras = title_doc_map
output["paras"] = [title + "||" + " ".join(sentences)
for title, sentences in output_paras.items()]
if n is None or read < n:
yield output
read += 1
else:
break
def get_gold_paras(self, entry, title_doc_map):
supporting_facts = entry["supporting_facts"]
collected_facts = {}
for (doc, idx) in supporting_facts:
if doc not in collected_facts:
collected_facts[doc] = title_doc_map[doc]
return collected_facts
def get_paras(self, entry):
# collect title->doc map
title_doc_map = {}
for title, document in entry["context"]:
if title in title_doc_map:
# Don't raise exception. Expected behavior with 2WikiMulithopQA :(
print("Two documents with same title: {} in {}".format(title, entry["_id"]))
continue
title_doc_map[title] = [doc.strip() for doc in document]
return title_doc_map
def format_drop_answer(answer_json):
if answer_json["number"]:
return answer_json["number"]
if len(answer_json["spans"]):
return answer_json["spans"]
# only date possible
date_json = answer_json["date"]
if not (date_json["day"] or date_json["month"] or date_json["year"]):
print("Number, Span or Date not set in {}".format(answer_json))
return None
return date_json["day"] + "-" + date_json["month"] + "-" + date_json["year"]
class DropReader(DatasetReader):
def read_examples(self, file):
with open(file, 'r') as input_fp:
input_json = json.load(input_fp)
for paraid, item in input_json.items():
para = item["passage"]
for qa_pair in item["qa_pairs"]:
question = qa_pair["question"]
qid = qa_pair["query_id"]
answer = format_drop_answer(qa_pair["answer"])
output = {
"qid": qid,
"query": question,
# metadata
"answer": answer,
"question": question
}
if self.add_paras:
output["paras"] = [para]
yield output
def read_n_examples(self, file, n):
with open(file, 'r') as input_fp:
input_json = json.load(input_fp)
read = 0
for paraid, item in input_json.items():
para = item["passage"]
for qa_pair in item["qa_pairs"]:
question = qa_pair["question"]
qid = qa_pair["query_id"]
answer = format_drop_answer(qa_pair["answer"])
output = {
"qid": qid,
"query": question,
# metadata
"answer": answer,
"question": question
}
if self.add_paras:
output["paras"] = [para]
if n is None or read < n:
yield output
read += 1
else:
break
| DecomP-main | src/decomp/inference/dataset_readers.py |
from setuptools import setup, find_packages
import os
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import allennlp_semparse whilst setting up.
VERSION = {}
with open("allennlp_models/version.py") as version_file:
exec(version_file.read(), VERSION)
# Load requirements.txt with a special case for allennlp so we can handle
# cross-library integration testing.
with open("requirements.txt") as requirements_file:
import re
def requirement_is_allennlp(req: str) -> bool:
if req == "allennlp":
return True
if re.match(r"^allennlp[>=<]", req):
return True
if re.match(r"^(git\+)?(https|ssh)://(git@)?github\.com/.*/allennlp\.git", req):
return True
if re.match(
r"^allennlp\[.*\] @ (git\+)?(https|ssh)://(git@)?github\.com/.*/allennlp\.git", req
):
return True
return False
def fix_url_dependencies(req: str) -> str:
"""Pip and setuptools disagree about how URL dependencies should be handled."""
m = re.match(
r"^(git\+)?(https|ssh)://(git@)?github\.com/([\w-]+)/(?P<name>[\w-]+)\.git", req
)
if m is None:
return req
else:
return f"{m.group('name')} @ {req}"
install_requirements = []
allennlp_requirements = []
for line in requirements_file:
line = line.strip()
if line.startswith("#") or len(line) <= 0:
continue
if requirement_is_allennlp(line):
allennlp_requirements.append(line)
else:
install_requirements.append(line)
assert len(allennlp_requirements) == 1
allennlp_override = os.environ.get("ALLENNLP_VERSION_OVERRIDE")
if allennlp_override is not None:
if len(allennlp_override) > 0:
allennlp_requirements = [allennlp_override]
else:
allennlp_requirements = []
install_requirements.extend(allennlp_requirements)
install_requirements = [fix_url_dependencies(req) for req in install_requirements]
setup(
name="allennlp_models",
version=VERSION["VERSION"],
description=("Officially supported models for the AllenNLP framework"),
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="allennlp NLP deep learning machine reading semantic parsing parsers",
url="https://github.com/allenai/allennlp-models",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"],
),
install_requires=install_requirements,
include_package_data=True,
package_data={
"allennlp_models": [
"modelcards/*.json",
"taskcards/*.json",
"structured_prediction/tools/srl-eval.pl",
]
},
python_requires=">=3.7.1",
zip_safe=False,
)
| allennlp-models-main | setup.py |
import os
import glob
from typing import Dict, Union, Any
from allennlp.common import Params
from allennlp.predictors import Predictor
from allennlp.common.model_card import ModelCard
from allennlp.common.task_card import TaskCard
from allennlp.common.plugins import import_plugins
def get_tasks() -> Dict[str, TaskCard]:
"""
Returns a mapping of [`TaskCard`](/models/common/task_card#taskcard)s for all
tasks.
"""
tasks = {}
task_card_paths = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "taskcards", "*.json"
)
for task_card_path in glob.glob(task_card_paths):
if "template" not in task_card_path:
task_card = TaskCard.from_params(params=Params.from_file(task_card_path))
tasks[task_card.id] = task_card
return tasks
def get_pretrained_models() -> Dict[str, ModelCard]:
"""
Returns a mapping of [`ModelCard`](/models/common/model_card#modelcard)s for all
available pretrained models.
"""
import_plugins()
pretrained_models = {}
model_card_paths = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "modelcards", "*.json"
)
for model_card_path in glob.glob(model_card_paths):
if "template" not in model_card_path:
model_card = ModelCard.from_params(params=Params.from_file(model_card_path))
pretrained_models[model_card.id] = model_card
return pretrained_models
def load_predictor(
model_id: str,
pretrained_models: Dict[str, ModelCard] = None,
cuda_device: int = -1,
overrides: Union[str, Dict[str, Any]] = None,
) -> Predictor:
"""
Returns the `Predictor` corresponding to the given `model_id`.
The `model_id` should be key present in the mapping returned by
[`get_pretrained_models`](#get_pretrained_models).
"""
pretrained_models = pretrained_models or get_pretrained_models()
model_card = pretrained_models[model_id]
if model_card.model_usage.archive_file is None:
raise ValueError(f"archive_file is required in the {model_card}")
return Predictor.from_path(
model_card.model_usage.archive_file,
predictor_name=model_card.registered_predictor_name,
cuda_device=cuda_device,
overrides=overrides,
)
| allennlp-models-main | allennlp_models/pretrained.py |
import os
_MAJOR = "2"
_MINOR = "10"
_PATCH = "1"
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
# https://semver.org/#is-v123-a-semantic-version for the semantics.
_SUFFIX = os.environ.get("ALLENNLP_MODELS_VERSION_SUFFIX", "")
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| allennlp-models-main | allennlp_models/version.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.