python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
from typing import Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union
import pytest
import torch
from allennlp.common import Lazy, Params, Registrable
from allennlp.common.from_params import FromParams, takes_arg, remove_optional, create_kwargs
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import DataLoader, DatasetReader, Tokenizer
from allennlp.models import Model
from allennlp.models.archival import load_archive
from allennlp.common.checks import ConfigurationError
class MyClass(FromParams):
def __init__(self, my_int: int, my_bool: bool = False) -> None:
self.my_int = my_int
self.my_bool = my_bool
class TestFromParams(AllenNlpTestCase):
def test_takes_arg(self):
def bare_function(some_input: int) -> int:
return some_input + 1
assert takes_arg(bare_function, "some_input")
assert not takes_arg(bare_function, "some_other_input")
class SomeClass:
total = 0
def __init__(self, constructor_param: str) -> None:
self.constructor_param = constructor_param
def check_param(self, check: str) -> bool:
return self.constructor_param == check
@classmethod
def set_total(cls, new_total: int) -> None:
cls.total = new_total
assert takes_arg(SomeClass, "self")
assert takes_arg(SomeClass, "constructor_param")
assert not takes_arg(SomeClass, "check")
assert takes_arg(SomeClass.check_param, "check")
assert not takes_arg(SomeClass.check_param, "other_check")
assert takes_arg(SomeClass.set_total, "new_total")
assert not takes_arg(SomeClass.set_total, "total")
def test_remove_optional(self):
optional_type = Optional[Dict[str, str]]
bare_type = remove_optional(optional_type) # type: ignore
bare_bare_type = remove_optional(bare_type)
assert bare_type == Dict[str, str]
assert bare_bare_type == Dict[str, str]
assert remove_optional(Optional[str]) == str
assert remove_optional(str) == str
def test_from_params(self):
my_class = MyClass.from_params(Params({"my_int": 10}), my_bool=True)
assert isinstance(my_class, MyClass)
assert my_class.my_int == 10
assert my_class.my_bool
def test_good_error_message_when_passing_non_params(self):
from allennlp.nn import InitializerApplicator
# This was how we used to take initializer params. We want to be sure we give a reasonable
# error message when something like this is passed to FromParams.
params = Params({"initializer": [["regex1", "uniform"], ["regex2", "orthogonal"]]})
with pytest.raises(ConfigurationError, match="dictionary.*InitializerApplicator"):
InitializerApplicator.from_params(params=params.pop("initializer"))
def test_create_kwargs(self):
kwargs = create_kwargs(MyClass, MyClass, Params({"my_int": 5}), my_bool=True, my_float=4.4)
# my_float should not be included because it's not a param of the MyClass constructor
assert kwargs == {"my_int": 5, "my_bool": True}
def test_extras(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
@A.register("c")
class C(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
# custom from params
@classmethod
def from_params(cls, params: Params, size: int, **extras) -> "C": # type: ignore
name = params.pop("name")
return cls(size=size, name=name)
# Check that extras get passed, even though A doesn't need them.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra")
assert b.name == "extra"
assert b.size == 10
# Check that extra extras don't get passed.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra", unwanted=True)
assert b.name == "extra"
assert b.size == 10
# Now the same with a custom from_params.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20)
assert c.name == "extra_c"
assert c.size == 20
# Check that extra extras don't get passed.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20, unwanted=True)
assert c.name == "extra_c"
assert c.size == 20
def test_extras_for_custom_classes(self):
from allennlp.common.registrable import Registrable
class BaseClass(Registrable):
pass
class BaseClass2(Registrable):
pass
@BaseClass.register("A")
class A(BaseClass):
def __init__(self, a: int, b: int, val: str) -> None:
self.a = a
self.b = b
self.val = val
def __hash__(self):
return self.b
def __eq__(self, other):
return self.b == other.b
@classmethod
def from_params(cls, params: Params, a: int, **extras) -> "A": # type: ignore
# A custom from params
b = params.pop_int("b")
val = params.pop("val", "C")
params.assert_empty(cls.__name__)
return cls(a=a, b=b, val=val)
@BaseClass2.register("B")
class B(BaseClass2):
def __init__(self, c: int, b: int) -> None:
self.c = c
self.b = b
@classmethod
def from_params(cls, params: Params, c: int, **extras) -> "B": # type: ignore
b = params.pop_int("b")
params.assert_empty(cls.__name__)
return cls(c=c, b=b)
@BaseClass.register("E")
class E(BaseClass):
def __init__(self, m: int, n: int) -> None:
self.m = m
self.n = n
@classmethod
def from_params(cls, params: Params, **extras2) -> "E": # type: ignore
m = params.pop_int("m")
params.assert_empty(cls.__name__)
n = extras2["n"]
return cls(m=m, n=n)
class C:
pass
@BaseClass.register("D")
class D(BaseClass):
def __init__(
self,
arg1: List[BaseClass],
arg2: Tuple[BaseClass, BaseClass2],
arg3: Dict[str, BaseClass],
arg4: Set[BaseClass],
arg5: List[BaseClass],
) -> None:
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.arg4 = arg4
self.arg5 = arg5
vals = [1, 2, 3]
params = Params(
{
"type": "D",
"arg1": [
{"type": "A", "b": vals[0]},
{"type": "A", "b": vals[1]},
{"type": "A", "b": vals[2]},
],
"arg2": [{"type": "A", "b": vals[0]}, {"type": "B", "b": vals[0]}],
"arg3": {
"class_1": {"type": "A", "b": vals[0]},
"class_2": {"type": "A", "b": vals[1]},
},
"arg4": [
{"type": "A", "b": vals[0], "val": "M"},
{"type": "A", "b": vals[1], "val": "N"},
{"type": "A", "b": vals[1], "val": "N"},
],
"arg5": [{"type": "E", "m": 9}],
}
)
extra = C()
tval1 = 5
tval2 = 6
d = BaseClass.from_params(params=params, extra=extra, a=tval1, c=tval2, n=10)
# Tests for List # Parameters
assert len(d.arg1) == len(vals)
assert isinstance(d.arg1, list)
assert isinstance(d.arg1[0], A)
assert all(x.b == y for x, y in zip(d.arg1, vals))
assert all(x.a == tval1 for x in d.arg1)
# Tests for Tuple
assert isinstance(d.arg2, tuple)
assert isinstance(d.arg2[0], A)
assert isinstance(d.arg2[1], B)
assert d.arg2[0].a == tval1
assert d.arg2[1].c == tval2
assert d.arg2[0].b == d.arg2[1].b == vals[0]
# Tests for Dict
assert isinstance(d.arg3, dict)
assert isinstance(d.arg3["class_1"], A)
assert d.arg3["class_1"].a == d.arg3["class_2"].a == tval1
assert d.arg3["class_1"].b == vals[0]
assert d.arg3["class_2"].b == vals[1]
# Tests for Set
assert isinstance(d.arg4, set)
assert len(d.arg4) == 2
assert any(x.val == "M" for x in d.arg4)
assert any(x.val == "N" for x in d.arg4)
# Tests for custom extras parameters
assert isinstance(d.arg5, list)
assert isinstance(d.arg5[0], E)
assert d.arg5[0].m == 9
assert d.arg5[0].n == 10
def test_no_constructor(self):
params = Params({"type": "just_spaces"})
Tokenizer.from_params(params)
def test_union(self):
class A(FromParams):
def __init__(self, a: Union[int, List[int]]) -> None:
self.a = a
class B(FromParams):
def __init__(self, b: Union[A, List[A]]) -> None:
# Really you would want to be sure that `self.b` has a consistent type, but for
# this test we'll ignore that.
self.b = b
params = Params({"a": 3})
a = A.from_params(params)
assert a.a == 3
params = Params({"a": [3, 4, 5]})
a = A.from_params(params)
assert a.a == [3, 4, 5]
params = Params({"b": {"a": 3}})
b = B.from_params(params)
assert isinstance(b.b, A)
assert b.b.a == 3
params = Params({"b": [{"a": 3}, {"a": [4, 5]}]})
b = B.from_params(params)
assert isinstance(b.b, list)
assert b.b[0].a == 3
assert b.b[1].a == [4, 5]
def test_crazy_nested_union(self):
class A(FromParams):
def __init__(self, a: Union[int, List[int]]) -> None:
self.a = a
class B(FromParams):
def __init__(self, b: Union[A, List[A]]) -> None:
# Really you would want to be sure that `self.b` has a consistent type, but for
# this test we'll ignore that.
self.b = b
class C(FromParams):
def __init__(self, c: Union[A, B, Dict[str, A]]) -> None:
# Really you would want to be sure that `self.c` has a consistent type, but for
# this test we'll ignore that.
self.c = c
# This is a contrived, ugly example (why would you want to duplicate names in a nested
# structure like this??), but it demonstrates a potential bug when dealing with mutatable
# parameters. If you're not careful about keeping the parameters un-mutated in two
# separate places, you'll end up with a B, or with a dict that's missing the 'b' key.
params = Params({"c": {"a": {"a": 3}, "b": {"a": [4, 5]}}})
c = C.from_params(params)
assert isinstance(c.c, dict)
assert c.c["a"].a == 3
assert c.c["b"].a == [4, 5]
def test_union_of_castable_types(self):
class IntFloat(FromParams):
def __init__(self, a: Union[int, float]) -> None:
self.a = a
class FloatInt(FromParams):
def __init__(self, a: Union[float, int]) -> None:
self.a = a
float_param_str = '{"a": 1.0}'
int_param_str = '{"a": 1}'
import json
for expected_type, param_str in [(int, int_param_str), (float, float_param_str)]:
for cls in [IntFloat, FloatInt]:
c = cls.from_params(Params(json.loads(param_str)))
assert type(c.a) == expected_type
def test_invalid_type_conversions(self):
class A(FromParams):
def __init__(self, a: int) -> None:
self.a = a
with pytest.raises(TypeError):
A.from_params(Params({"a": "1"}))
with pytest.raises(TypeError):
A.from_params(Params({"a": 1.0}))
def test_dict(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Dict[str, A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": {"first": {"type": "b", "size": 1}, "second": {"type": "b", "size": 2}},
}
)
d = C.from_params(params)
assert isinstance(d.items, dict)
assert len(d.items) == 2
assert all(isinstance(key, str) for key in d.items.keys())
assert all(isinstance(value, B) for value in d.items.values())
assert d.items["first"].size == 1
assert d.items["second"].size == 2
def test_dict_not_params(self):
class A(FromParams):
def __init__(self, counts: Dict[str, int]) -> None:
self.counts = counts
params = Params({"counts": {"a": 10, "b": 20}})
a = A.from_params(params)
assert isinstance(a.counts, dict)
assert not isinstance(a.counts, Params)
def test_list(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: List[A]) -> None:
self.items = items
params = Params(
{"type": "d", "items": [{"type": "b", "size": 1}, {"type": "b", "size": 2}]}
)
d = C.from_params(params)
assert isinstance(d.items, list)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert d.items[0].size == 1
assert d.items[1].size == 2
def test_tuple(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, name: str) -> None:
self.name = name
class E(Registrable):
pass
@E.register("f")
class F(E):
def __init__(self, items: Tuple[A, C]) -> None:
self.items = items
params = Params(
{"type": "f", "items": [{"type": "b", "size": 1}, {"type": "d", "name": "item2"}]}
)
f = E.from_params(params)
assert isinstance(f.items, tuple)
assert len(f.items) == 2
assert isinstance(f.items[0], B)
assert isinstance(f.items[1], D)
assert f.items[0].size == 1
assert f.items[1].name == "item2"
def test_set(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
def __init__(self, name: str) -> None:
self.name = name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
@A.register("b")
class B(A):
pass
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Set[A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": [
{"type": "b", "name": "item1"},
{"type": "b", "name": "item2"},
{"type": "b", "name": "item2"},
],
}
)
d = C.from_params(params)
assert isinstance(d.items, set)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert any(item.name == "item1" for item in d.items)
assert any(item.name == "item2" for item in d.items)
def test_transferring_of_modules(self):
model_archive = str(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "basic_classifier" / "experiment_seq2seq.jsonnet")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder (freeze) and seq2seq_encoder params (tunable)
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_text_field_embedder",
"freeze": True,
}
}
model_params["seq2seq_encoder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_seq2seq_encoder",
"freeze": False,
}
}
transfer_model = Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
# TextFieldEmbedder and Seq2SeqEncoder parameters should be transferred
for trained_parameter, transfer_parameter in zip(
trained_model._text_field_embedder.parameters(),
transfer_model._text_field_embedder.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
for trained_parameter, transfer_parameter in zip(
trained_model._seq2seq_encoder.parameters(),
transfer_model._seq2seq_encoder.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
# Any other module's parameters shouldn't be same (eg. _feedforward)
for trained_parameter, transfer_parameter in zip(
trained_model._feedforward.parameters(),
transfer_model._feedforward.parameters(),
):
assert torch.all(trained_parameter != transfer_parameter)
# TextFieldEmbedder should have requires_grad Off
for parameter in transfer_model._text_field_embedder.parameters():
assert not parameter.requires_grad
# # Seq2SeqEncoder should have requires_grad On
for parameter in transfer_model._seq2seq_encoder.parameters():
assert parameter.requires_grad
def test_transferring_of_modules_ensures_type_consistency(self):
model_archive = str(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "basic_classifier" / "experiment_seq2seq.jsonnet")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder and make it load Seq2SeqEncoder
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_seq2seq_encoder._module",
}
}
with pytest.raises(ConfigurationError):
Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
def test_bare_string_params(self):
dataset = [1]
class TestLoader(Registrable):
@classmethod
def from_partial_objects(cls, data_loader: Lazy[DataLoader]) -> DataLoader:
return data_loader.construct(dataset=dataset)
TestLoader.register("test", constructor="from_partial_objects")(TestLoader)
data_loader = TestLoader.from_params(
Params(
{
"type": "test",
"data_loader": {
"batch_sampler": {
"type": "basic",
"batch_size": 2,
"drop_last": True,
"sampler": "random",
}
},
}
)
)
assert data_loader.batch_sampler.sampler.__class__.__name__ == "RandomSampler"
assert data_loader.batch_sampler.sampler.data_source is dataset
def test_kwargs_are_passed_to_superclass(self):
params = Params(
{"type": "text_classification_json", "lazy": True, "cache_directory": "tmp"}
)
reader = DatasetReader.from_params(params)
assert reader.lazy is True
assert str(reader._cache_directory) == "tmp"
def test_kwargs_with_multiple_inheritance(self):
# Basic idea: have two identical classes, differing only in the order of their multiple
# inheritance, and make sure that passing kwargs up to the super class works in both cases.
class A(Registrable):
def __init__(self, a: int):
self.a = a
from numbers import Number
@A.register("b1")
class B1(A, Number):
def __init__(self, b: float, **kwargs):
super().__init__(**kwargs)
self.b = b
@A.register("b2")
class B2(Number, A):
def __init__(self, b: float, **kwargs):
super().__init__(**kwargs)
self.b = b
b = B1.from_params(params=Params({"a": 4, "b": 5}))
assert b.b == 5
assert b.a == 4
b = B2.from_params(params=Params({"a": 4, "b": 5}))
assert b.b == 5
assert b.a == 4
def test_only_infer_superclass_params_if_unknown(self):
from allennlp.common.registrable import Registrable
class BaseClass(Registrable):
def __init__(self):
self.x = None
self.a = None
self.rest = None
@BaseClass.register("a")
class A(BaseClass):
def __init__(self, a: int, x: int, **kwargs):
super().__init__()
self.x = x
self.a = a
self.rest = kwargs
@BaseClass.register("b")
class B(A):
def __init__(self, a: str, x: int = 42, **kwargs):
super().__init__(x=x, a=-1, raw_a=a, **kwargs)
params = Params({"type": "b", "a": "123"})
# The param `x` should not be required as it has default value in `B`
# The correct type of the param `a` should be inferred from `B` as well.
instance = BaseClass.from_params(params)
assert instance.x == 42
assert instance.a == -1
assert len(instance.rest) == 1
assert type(instance.rest["raw_a"]) == str
assert instance.rest["raw_a"] == "123"
def test_kwargs_are_passed_to_deeper_superclasses(self):
from allennlp.common.registrable import Registrable
class BaseClass(Registrable):
def __init__(self):
self.a = None
self.b = None
self.c = None
@BaseClass.register("a")
class A(BaseClass):
def __init__(self, a: str):
super().__init__()
self.a = a
@BaseClass.register("b")
class B(A):
def __init__(self, b: str, **kwargs):
super().__init__(**kwargs)
self.b = b
@BaseClass.register("c")
class C(B):
def __init__(self, c, **kwargs):
super().__init__(**kwargs)
self.c = c
params = Params({"type": "c", "a": "a_value", "b": "b_value", "c": "c_value"})
instance = BaseClass.from_params(params)
assert instance.a == "a_value"
assert instance.b == "b_value"
assert instance.c == "c_value"
def test_lazy_construction_can_happen_multiple_times(self):
test_string = "this is a test"
extra_string = "extra string"
class ConstructedObject(FromParams):
def __init__(self, string: str, extra: str):
self.string = string
self.extra = extra
class Testing(FromParams):
def __init__(self, lazy_object: Lazy[ConstructedObject]):
first_time = lazy_object.construct(extra=extra_string)
second_time = lazy_object.construct(extra=extra_string)
assert first_time.string == test_string
assert first_time.extra == extra_string
assert second_time.string == test_string
assert second_time.extra == extra_string
Testing.from_params(Params({"lazy_object": {"string": test_string}}))
def test_optional_vs_required_lazy_objects(self):
class ConstructedObject(FromParams):
def __init__(self, a: int):
self.a = a
class Testing(FromParams):
def __init__(
self,
lazy1: Lazy[ConstructedObject],
lazy2: Lazy[ConstructedObject] = Lazy(ConstructedObject),
lazy3: Lazy[ConstructedObject] = None,
lazy4: Optional[Lazy[ConstructedObject]] = Lazy(ConstructedObject),
) -> None:
self.lazy1 = lazy1.construct()
self.lazy2 = lazy2.construct(a=2)
self.lazy3 = None if lazy3 is None else lazy3.construct()
self.lazy4 = None if lazy4 is None else lazy4.construct(a=1)
test1 = Testing.from_params(Params({"lazy1": {"a": 1}}))
assert test1.lazy1.a == 1
assert test1.lazy2.a == 2
assert test1.lazy3 is None
assert test1.lazy4 is not None
test2 = Testing.from_params(Params({"lazy1": {"a": 1}, "lazy2": {"a": 3}}))
assert test2.lazy1.a == 1
assert test2.lazy2.a == 3
assert test2.lazy3 is None
assert test2.lazy4 is not None
test3 = Testing.from_params(Params({"lazy1": {"a": 1}, "lazy3": {"a": 3}, "lazy4": None}))
assert test3.lazy1.a == 1
assert test3.lazy2.a == 2
assert test3.lazy3 is not None
assert test3.lazy3.a == 3
assert test3.lazy4 is None
with pytest.raises(ConfigurationError, match='key "lazy1" is required'):
Testing.from_params(Params({}))
def test_iterable(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Iterable[A]) -> None:
self.items = items
params = Params(
{"type": "d", "items": [{"type": "b", "size": 1}, {"type": "b", "size": 2}]}
)
d = C.from_params(params)
assert isinstance(d.items, Iterable)
items = list(d.items)
assert len(items) == 2
assert all(isinstance(item, B) for item in items)
assert items[0].size == 1
assert items[1].size == 2
def test_mapping(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Mapping[str, A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": {"first": {"type": "b", "size": 1}, "second": {"type": "b", "size": 2}},
}
)
d = C.from_params(params)
assert isinstance(d.items, Mapping)
assert len(d.items) == 2
assert all(isinstance(key, str) for key in d.items.keys())
assert all(isinstance(value, B) for value in d.items.values())
assert d.items["first"].size == 1
assert d.items["second"].size == 2
def test_extra_parameters_are_not_allowed_when_there_is_no_constructor(self):
class A(FromParams):
pass
with pytest.raises(ConfigurationError, match="Extra parameters"):
A.from_params(Params({"some_spurious": "key", "value": "pairs"}))
def test_explicit_kwargs_always_passed_to_constructor(self):
class Base(FromParams):
def __init__(self, lazy: bool = False, x: int = 0) -> None:
self.lazy = lazy
self.x = x
class A(Base):
def __init__(self, **kwargs) -> None:
assert "lazy" in kwargs
super().__init__(**kwargs)
A.from_params(Params({"lazy": False}))
class B(Base):
def __init__(self, **kwargs) -> None:
super().__init__(lazy=True, **kwargs)
b = B.from_params(Params({}))
assert b.lazy is True
def test_raises_when_there_are_no_implementations(self):
class A(Registrable):
pass
with pytest.raises(ConfigurationError, match="no registered concrete types"):
A.from_params("nonexistent_class")
with pytest.raises(ConfigurationError, match="no registered concrete types"):
A.from_params(Params({"some_spurious": "key", "value": "pairs"}))
with pytest.raises(ConfigurationError, match="no registered concrete types"):
A.from_params(Params({}))
# Some paths through the code are different if there is a constructor here versus not. We
# don't actually go through this logic anymore, but it's here as a regression test.
class B(Registrable):
def __init__(self):
pass
with pytest.raises(ConfigurationError, match="no registered concrete types"):
B.from_params("nonexistent_class")
with pytest.raises(ConfigurationError, match="no registered concrete types"):
B.from_params(Params({"some_spurious": "key", "value": "pairs"}))
with pytest.raises(ConfigurationError, match="no registered concrete types"):
B.from_params(Params({}))
def test_from_params_raises_error_on_wrong_parameter_name_in_optional_union(self):
class NestedClass(FromParams):
def __init__(self, varname: Optional[str] = None):
self.varname = varname
class WrapperClass(FromParams):
def __init__(self, nested_class: Optional[Union[str, NestedClass]] = None):
if isinstance(nested_class, str):
nested_class = NestedClass(varname=nested_class)
self.nested_class = nested_class
with pytest.raises(ConfigurationError):
WrapperClass.from_params(
params=Params({"nested_class": {"wrong_varname": "varstring"}})
)
def test_from_params_handles_base_class_kwargs(self):
class Foo(FromParams):
def __init__(self, a: int, b: str = None, **kwargs) -> None:
self.a = a
self.b = b
for key, value in kwargs.items():
setattr(self, key, value)
foo = Foo.from_params(Params({"a": 2, "b": "hi"}))
assert foo.a == 2
assert foo.b == "hi"
foo = Foo.from_params(Params({"a": 2, "b": "hi", "c": {"2": "3"}}))
assert foo.a == 2
assert foo.b == "hi"
assert foo.c == {"2": "3"}
class Bar(Foo):
def __init__(self, a: int, b: str, d: int, **kwargs) -> None:
super().__init__(a, b=b, **kwargs)
self.d = d
bar = Bar.from_params(Params({"a": 2, "b": "hi", "c": {"2": "3"}, "d": 0}))
assert bar.a == 2
assert bar.b == "hi"
assert bar.c == {"2": "3"}
assert bar.d == 0
class Baz(Foo):
def __init__(self, a: int, b: Optional[str] = "a", **kwargs) -> None:
super().__init__(a, b=b, **kwargs)
baz = Baz.from_params(Params({"a": 2, "b": None}))
assert baz.b is None
baz = Baz.from_params(Params({"a": 2}))
assert baz.b == "a"
def test_from_params_base_class_kwargs_crashes_if_params_not_handled(self):
class Bar(FromParams):
def __init__(self, c: str = None) -> None:
self.c = c
class Foo(Bar):
def __init__(self, a: int, b: str = None, **kwargs) -> None:
super().__init__(**kwargs)
self.a = a
self.b = b
foo = Foo.from_params(Params({"a": 2, "b": "hi", "c": "some value"}))
assert foo.a == 2
assert foo.b == "hi"
assert foo.c == "some value"
with pytest.raises(TypeError, match="invalid_key"):
Foo.from_params(Params({"a": 2, "b": "hi", "invalid_key": "some value"}))
def test_from_params_handles_kwargs_in_non_from_params_registered_class(self):
class Bar(Registrable):
pass
class Baz:
def __init__(self, a: int) -> None:
self.a = a
@Bar.register("foo")
class Foo(Baz):
def __init__(self, a: int, b: str = None, **kwargs) -> None:
super().__init__(a)
self.b = b
for key, value in kwargs.items():
setattr(self, key, value)
foo = Bar.from_params(Params({"type": "foo", "a": 2, "b": "hi"}))
assert foo.a == 2
assert foo.b == "hi"
foo = Bar.from_params(Params({"type": "foo", "a": 2, "b": "hi", "c": {"2": "3"}}))
assert foo.a == 2
assert foo.b == "hi"
assert foo.c == {"2": "3"}
def test_from_params_does_not_pass_extras_to_non_from_params_registered_class(self):
class Bar(Registrable):
pass
class Baz:
def __init__(self, a: int, c: Dict[str, str] = None) -> None:
self.a = a
self.c = c
@Bar.register("foo")
class Foo(Baz):
def __init__(self, a: int, b: str = None, **kwargs) -> None:
super().__init__(a, **kwargs)
self.b = b
foo = Bar.from_params(Params({"type": "foo", "a": 2, "b": "hi"}))
assert foo.a == 2
assert foo.b == "hi"
assert foo.c is None
foo = Bar.from_params(
params=Params({"type": "foo", "a": 2, "b": "hi", "c": {"2": "3"}}), extra="4"
)
assert foo.a == 2
assert foo.b == "hi"
assert foo.c == {"2": "3"}
def test_from_params_child_has_kwargs_base_implicit_constructor(self):
class Foo(FromParams):
pass
class Bar(Foo):
def __init__(self, a: int, **kwargs) -> None:
self.a = a
bar = Bar.from_params(Params({"a": 2}))
assert bar.a == 2
def test_from_params_has_args(self):
class Foo(FromParams):
def __init__(self, a: int, *args) -> None:
self.a = a
foo = Foo.from_params(Params({"a": 2}))
assert foo.a == 2
| allennlp-master | tests/common/from_params_test.py |
from collections import Counter
import os
import pathlib
import json
import time
import shutil
import pytest
import responses
from requests.exceptions import ConnectionError
from allennlp.common import file_utils
from allennlp.common.file_utils import (
_resource_to_filename,
filename_to_url,
get_from_cache,
cached_path,
_split_s3_path,
open_compressed,
CacheFile,
_Meta,
_find_entries,
inspect_cache,
remove_cache_entries,
)
from allennlp.common.testing import AllenNlpTestCase
def set_up_glove(url: str, byt: bytes, change_etag_every: int = 1000):
# Mock response for the datastore url that returns glove vectors
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/gzip",
stream=True,
headers={"Content-Length": str(len(byt))},
)
etags_left = change_etag_every
etag = "0"
def head_callback(_):
"""
Writing this as a callback allows different responses to different HEAD requests.
In our case, we're going to change the ETag header every `change_etag_every`
requests, which will allow us to simulate having a new version of the file.
"""
nonlocal etags_left, etag
headers = {"ETag": etag}
# countdown and change ETag
etags_left -= 1
if etags_left <= 0:
etags_left = change_etag_every
etag = str(int(etag) + 1)
return (200, headers, "")
responses.add_callback(responses.HEAD, url, callback=head_callback)
class TestFileUtils(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.glove_file = self.FIXTURES_ROOT / "embeddings/glove.6B.100d.sample.txt.gz"
with open(self.glove_file, "rb") as glove:
self.glove_bytes = glove.read()
def test_cached_path_offline(self, monkeypatch):
# Ensures `cached_path` just returns the path to the latest cached version
# of the resource when there's no internet connection.
# First we mock the `_http_etag` method so that it raises a `ConnectionError`,
# like it would if there was no internet connection.
def mocked_http_etag(url: str):
raise ConnectionError
monkeypatch.setattr(file_utils, "_http_etag", mocked_http_etag)
url = "https://github.com/allenai/allennlp/blob/master/some-fake-resource"
# We'll create two cached versions of this fake resource using two different etags.
etags = ['W/"3e5885bfcbf4c47bc4ee9e2f6e5ea916"', 'W/"3e5885bfcbf4c47bc4ee9e2f6e5ea918"']
filenames = [
os.path.join(self.TEST_DIR, _resource_to_filename(url, etag)) for etag in etags
]
for filename, etag in zip(filenames, etags):
meta = _Meta(
resource=url, cached_path=filename, creation_time=time.time(), etag=etag, size=2341
)
meta.to_file()
with open(filename, "w") as f:
f.write("some random data")
# os.path.getmtime is only accurate to the second.
time.sleep(1.1)
# Should know to ignore lock files and extraction directories.
with open(filenames[-1] + ".lock", "w") as f:
f.write("")
os.mkdir(filenames[-1] + "-extracted")
# The version corresponding to the last etag should be returned, since
# that one has the latest "last modified" time.
assert get_from_cache(url, cache_dir=self.TEST_DIR) == filenames[-1]
# We also want to make sure this works when the latest cached version doesn't
# have a corresponding etag.
filename = os.path.join(self.TEST_DIR, _resource_to_filename(url))
meta = _Meta(resource=url, cached_path=filename, creation_time=time.time(), size=2341)
with open(filename, "w") as f:
f.write("some random data")
assert get_from_cache(url, cache_dir=self.TEST_DIR) == filename
def test_resource_to_filename(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
"https://allennlp.s3.amazonaws.com" + "/long" * 20 + "/url",
]:
filename = _resource_to_filename(url)
assert "http" not in filename
with pytest.raises(FileNotFoundError):
filename_to_url(filename, cache_dir=self.TEST_DIR)
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
with pytest.raises(FileNotFoundError):
filename_to_url(filename, cache_dir=self.TEST_DIR)
json.dump(
{"url": url, "etag": None},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag is None
def test_resource_to_filename_with_etags(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
]:
filename = _resource_to_filename(url, etag="mytag")
assert "http" not in filename
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
json.dump(
{"url": url, "etag": "mytag"},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag == "mytag"
baseurl = "http://allenai.org/"
assert _resource_to_filename(baseurl + "1") != _resource_to_filename(baseurl, etag="1")
def test_resource_to_filename_with_etags_eliminates_quotes(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
]:
filename = _resource_to_filename(url, etag='"mytag"')
assert "http" not in filename
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
json.dump(
{"url": url, "etag": "mytag"},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag == "mytag"
def test_split_s3_path(self):
# Test splitting good urls.
assert _split_s3_path("s3://my-bucket/subdir/file.txt") == ("my-bucket", "subdir/file.txt")
assert _split_s3_path("s3://my-bucket/file.txt") == ("my-bucket", "file.txt")
# Test splitting bad urls.
with pytest.raises(ValueError):
_split_s3_path("s3://")
_split_s3_path("s3://myfile.txt")
_split_s3_path("myfile.txt")
@responses.activate
def test_get_from_cache(self):
url = "http://fake.datastore.com/glove.txt.gz"
set_up_glove(url, self.glove_bytes, change_etag_every=2)
filename = get_from_cache(url, cache_dir=self.TEST_DIR)
assert filename == os.path.join(self.TEST_DIR, _resource_to_filename(url, etag="0"))
assert os.path.exists(filename + ".json")
meta = _Meta.from_path(filename + ".json")
assert meta.resource == url
# We should have made one HEAD request and one GET request.
method_counts = Counter(call.request.method for call in responses.calls)
assert len(method_counts) == 2
assert method_counts["HEAD"] == 1
assert method_counts["GET"] == 1
# And the cached file should have the correct contents
with open(filename, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
# A second call to `get_from_cache` should make another HEAD call
# but not another GET call.
filename2 = get_from_cache(url, cache_dir=self.TEST_DIR)
assert filename2 == filename
method_counts = Counter(call.request.method for call in responses.calls)
assert len(method_counts) == 2
assert method_counts["HEAD"] == 2
assert method_counts["GET"] == 1
with open(filename2, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
# A third call should have a different ETag and should force a new download,
# which means another HEAD call and another GET call.
filename3 = get_from_cache(url, cache_dir=self.TEST_DIR)
assert filename3 == os.path.join(self.TEST_DIR, _resource_to_filename(url, etag="1"))
method_counts = Counter(call.request.method for call in responses.calls)
assert len(method_counts) == 2
assert method_counts["HEAD"] == 3
assert method_counts["GET"] == 2
with open(filename3, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
@responses.activate
def test_cached_path(self):
url = "http://fake.datastore.com/glove.txt.gz"
set_up_glove(url, self.glove_bytes)
# non-existent file
with pytest.raises(FileNotFoundError):
filename = cached_path(self.FIXTURES_ROOT / "does_not_exist" / "fake_file.tar.gz")
# unparsable URI
with pytest.raises(ValueError):
filename = cached_path("fakescheme://path/to/fake/file.tar.gz")
# existing file as path
assert cached_path(self.glove_file) == str(self.glove_file)
# caches urls
filename = cached_path(url, cache_dir=self.TEST_DIR)
assert len(responses.calls) == 2
assert filename == os.path.join(self.TEST_DIR, _resource_to_filename(url, etag="0"))
with open(filename, "rb") as cached_file:
assert cached_file.read() == self.glove_bytes
# archives
filename = cached_path(
self.FIXTURES_ROOT / "common" / "quote.tar.gz!quote.txt",
extract_archive=True,
cache_dir=self.TEST_DIR,
)
with open(filename, "r") as f:
assert f.read().startswith("I mean, ")
def test_extract_with_external_symlink(self):
dangerous_file = self.FIXTURES_ROOT / "common" / "external_symlink.tar.gz"
with pytest.raises(ValueError):
cached_path(dangerous_file, extract_archive=True)
def test_open_compressed(self):
uncompressed_file = self.FIXTURES_ROOT / "embeddings/fake_embeddings.5d.txt"
with open_compressed(uncompressed_file) as f:
uncompressed_lines = [line.strip() for line in f]
for suffix in ["bz2", "gz"]:
compressed_file = f"{uncompressed_file}.{suffix}"
with open_compressed(compressed_file) as f:
compressed_lines = [line.strip() for line in f]
assert compressed_lines == uncompressed_lines
def test_meta_backwards_compatible(self):
url = "http://fake.datastore.com/glove.txt.gz"
etag = "some-fake-etag"
filename = os.path.join(self.TEST_DIR, _resource_to_filename(url, etag))
with open(filename, "wb") as f:
f.write(self.glove_bytes)
with open(filename + ".json", "w") as meta_file:
json.dump({"url": url, "etag": etag}, meta_file)
meta = _Meta.from_path(filename + ".json")
assert meta.resource == url
assert meta.etag == etag
assert meta.creation_time is not None
assert meta.size == len(self.glove_bytes)
def create_cache_entry(self, url: str, etag: str, as_extraction_dir: bool = False):
filename = os.path.join(self.TEST_DIR, _resource_to_filename(url, etag))
cache_path = filename
if as_extraction_dir:
cache_path = filename + "-extracted"
filename = filename + "-extracted/glove.txt"
os.mkdir(cache_path)
with open(filename, "wb") as f:
f.write(self.glove_bytes)
open(cache_path + ".lock", "a").close()
meta = _Meta(
resource=url,
cached_path=cache_path,
etag=etag,
creation_time=time.time(),
size=len(self.glove_bytes),
extraction_dir=as_extraction_dir,
)
meta.to_file()
def test_inspect(self, capsys):
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-1")
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-2")
self.create_cache_entry(
"http://fake.datastore.com/glove.txt.gz", "etag-3", as_extraction_dir=True
)
inspect_cache(cache_dir=self.TEST_DIR)
captured = capsys.readouterr()
assert "http://fake.datastore.com/glove.txt.gz" in captured.out
assert "2 versions cached" in captured.out
assert "1 version extracted" in captured.out
def test_inspect_with_patterns(self, capsys):
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-1")
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-2")
self.create_cache_entry("http://other.fake.datastore.com/glove.txt.gz", "etag-4")
inspect_cache(cache_dir=self.TEST_DIR, patterns=["http://fake.*"])
captured = capsys.readouterr()
assert "http://fake.datastore.com/glove.txt.gz" in captured.out
assert "2 versions" in captured.out
assert "http://other.fake.datastore.com/glove.txt.gz" not in captured.out
def test_remove_entries(self):
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-1")
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-2")
self.create_cache_entry(
"http://fake.datastore.com/glove.txt.gz", "etag-3", as_extraction_dir=True
)
self.create_cache_entry("http://other.fake.datastore.com/glove.txt.gz", "etag-4")
self.create_cache_entry(
"http://other.fake.datastore.com/glove.txt.gz", "etag-5", as_extraction_dir=True
)
reclaimed_space = remove_cache_entries(["http://fake.*"], cache_dir=self.TEST_DIR)
assert reclaimed_space == 3 * len(self.glove_bytes)
size_left, entries_left = _find_entries(cache_dir=self.TEST_DIR)
assert size_left == 2 * len(self.glove_bytes)
assert len(entries_left) == 1
entry_left = list(entries_left.values())[0]
# one regular cache file and one extraction dir
assert len(entry_left[0]) == 1
assert len(entry_left[1]) == 1
# Now remove everything.
remove_cache_entries(["*"], cache_dir=self.TEST_DIR)
assert len(os.listdir(self.TEST_DIR)) == 0
class TestCachedPathWithArchive(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.tar_file = self.TEST_DIR / "utf-8.tar.gz"
shutil.copyfile(
self.FIXTURES_ROOT / "utf-8_sample" / "archives" / "utf-8.tar.gz", self.tar_file
)
self.zip_file = self.TEST_DIR / "utf-8.zip"
shutil.copyfile(
self.FIXTURES_ROOT / "utf-8_sample" / "archives" / "utf-8.zip", self.zip_file
)
def check_extracted(self, extracted: str):
assert os.path.isdir(extracted)
assert pathlib.Path(extracted).parent == self.TEST_DIR
assert os.path.exists(os.path.join(extracted, "dummy.txt"))
assert os.path.exists(os.path.join(extracted, "folder/utf-8_sample.txt"))
assert os.path.exists(extracted + ".json")
def test_cached_path_extract_local_tar(self):
extracted = cached_path(self.tar_file, cache_dir=self.TEST_DIR, extract_archive=True)
self.check_extracted(extracted)
def test_cached_path_extract_local_zip(self):
extracted = cached_path(self.zip_file, cache_dir=self.TEST_DIR, extract_archive=True)
self.check_extracted(extracted)
@responses.activate
def test_cached_path_extract_remote_tar(self):
url = "http://fake.datastore.com/utf-8.tar.gz"
byt = open(self.tar_file, "rb").read()
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/tar+gzip",
stream=True,
headers={"Content-Length": str(len(byt))},
)
responses.add(
responses.HEAD,
url,
status=200,
headers={"ETag": "fake-etag"},
)
extracted = cached_path(url, cache_dir=self.TEST_DIR, extract_archive=True)
assert extracted.endswith("-extracted")
self.check_extracted(extracted)
@responses.activate
def test_cached_path_extract_remote_zip(self):
url = "http://fake.datastore.com/utf-8.zip"
byt = open(self.zip_file, "rb").read()
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/zip",
stream=True,
headers={"Content-Length": str(len(byt))},
)
responses.add(
responses.HEAD,
url,
status=200,
headers={"ETag": "fake-etag"},
)
extracted = cached_path(url, cache_dir=self.TEST_DIR, extract_archive=True)
assert extracted.endswith("-extracted")
self.check_extracted(extracted)
class TestCacheFile(AllenNlpTestCase):
def test_temp_file_removed_on_error(self):
cache_filename = self.TEST_DIR / "cache_file"
with pytest.raises(IOError, match="I made this up"):
with CacheFile(cache_filename) as handle:
raise IOError("I made this up")
assert not os.path.exists(handle.name)
assert not os.path.exists(cache_filename)
| allennlp-master | tests/common/file_utils_test.py |
import os
import logging
import random
from allennlp.common.logging import AllenNlpLogger
from allennlp.common.testing import AllenNlpTestCase
class TestLogging(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
logger = logging.getLogger(str(random.random()))
self.test_log_file = os.path.join(self.TEST_DIR, "test.log")
logger.addHandler(logging.FileHandler(self.test_log_file))
logger.setLevel(logging.DEBUG)
self.logger = logger
self._msg = "test message"
def test_debug_once(self):
self.logger.debug_once(self._msg)
self.logger.debug_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_info_once(self):
self.logger.info_once(self._msg)
self.logger.info_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_warning_once(self):
self.logger.warning_once(self._msg)
self.logger.warning_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_error_once(self):
self.logger.error_once(self._msg)
self.logger.error_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_critical_once(self):
self.logger.critical_once(self._msg)
self.logger.critical_once(self._msg)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
def test_debug_once_different_args(self):
self.logger.debug_once("There are %d lights.", 4)
self.logger.debug_once("There are %d lights.", 5)
with open(self.test_log_file, "r") as f:
assert len(f.readlines()) == 1
assert len(self.logger._seen_msgs) == 1
def test_getLogger(self):
logger = logging.getLogger("test_logger")
assert isinstance(logger, AllenNlpLogger)
| allennlp-master | tests/common/logging_test.py |
import pytest
from allennlp.common import cached_transformers
from allennlp.common.testing import AllenNlpTestCase
class TestCachedTransformers(AllenNlpTestCase):
def test_get_missing_from_cache_local_files_only(self):
with pytest.raises(ValueError) as execinfo:
cached_transformers.get(
"bert-base-uncased",
True,
cache_dir=self.TEST_DIR,
local_files_only=True,
)
assert str(execinfo.value) == (
"Cannot find the requested files in the cached path and "
"outgoing traffic has been disabled. To enable model "
"look-ups and downloads online, set 'local_files_only' "
"to False."
)
def test_get_tokenizer_missing_from_cache_local_files_only(self):
with pytest.raises(ValueError) as execinfo:
cached_transformers.get_tokenizer(
"bert-base-uncased",
cache_dir=self.TEST_DIR,
local_files_only=True,
)
assert str(execinfo.value) == (
"Cannot find the requested files in the cached path and "
"outgoing traffic has been disabled. To enable model "
"look-ups and downloads online, set 'local_files_only' "
"to False."
)
| allennlp-master | tests/common/cached_transformers_test.py |
allennlp-master | tests/common/__init__.py |
|
from overrides import overrides
from allennlp.commands import Subcommand
from allennlp.common.plugins import (
discover_plugins,
import_plugins,
)
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import pushd
class TestPlugins(AllenNlpTestCase):
@overrides
def setup_method(self):
super().setup_method()
self.plugins_root = self.FIXTURES_ROOT / "plugins"
def test_no_plugins(self):
available_plugins = set(discover_plugins())
assert available_plugins == set()
def test_file_plugin(self):
available_plugins = set(discover_plugins())
assert available_plugins == set()
with pushd(self.plugins_root):
available_plugins = set(discover_plugins())
assert available_plugins == {"d"}
import_plugins()
subcommands_available = Subcommand.list_available()
assert "d" in subcommands_available
| allennlp-master | tests/common/plugins_test.py |
from datetime import timedelta
import sys
from collections import OrderedDict
import pytest
import torch
from allennlp.common import util
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import push_python_path
class Unsanitizable:
pass
class Sanitizable:
def to_json(self):
return {"sanitizable": True}
class TestCommonUtils(AllenNlpTestCase):
def test_group_by_count(self):
assert util.group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 20) == [
[1, 2, 3],
[4, 5, 6],
[7, 20, 20],
]
def test_lazy_groups_of(self):
xs = [1, 2, 3, 4, 5, 6, 7]
groups = util.lazy_groups_of(iter(xs), group_size=3)
assert next(groups) == [1, 2, 3]
assert next(groups) == [4, 5, 6]
assert next(groups) == [7]
with pytest.raises(StopIteration):
_ = next(groups)
def test_pad_sequence_to_length(self):
assert util.pad_sequence_to_length([1, 2, 3], 5) == [1, 2, 3, 0, 0]
assert util.pad_sequence_to_length([1, 2, 3], 5, default_value=lambda: 2) == [1, 2, 3, 2, 2]
assert util.pad_sequence_to_length([1, 2, 3], 5, padding_on_right=False) == [0, 0, 1, 2, 3]
def test_namespace_match(self):
assert util.namespace_match("*tags", "tags")
assert util.namespace_match("*tags", "passage_tags")
assert util.namespace_match("*tags", "question_tags")
assert util.namespace_match("tokens", "tokens")
assert not util.namespace_match("tokens", "stemmed_tokens")
def test_sanitize(self):
assert util.sanitize(torch.Tensor([1, 2])) == [1, 2]
assert util.sanitize(torch.LongTensor([1, 2])) == [1, 2]
with pytest.raises(ValueError):
util.sanitize(Unsanitizable())
assert util.sanitize(Sanitizable()) == {"sanitizable": True}
def test_import_submodules(self):
(self.TEST_DIR / "mymodule").mkdir()
(self.TEST_DIR / "mymodule" / "__init__.py").touch()
(self.TEST_DIR / "mymodule" / "submodule").mkdir()
(self.TEST_DIR / "mymodule" / "submodule" / "__init__.py").touch()
(self.TEST_DIR / "mymodule" / "submodule" / "subsubmodule.py").touch()
with push_python_path(self.TEST_DIR):
assert "mymodule" not in sys.modules
assert "mymodule.submodule" not in sys.modules
util.import_module_and_submodules("mymodule")
assert "mymodule" in sys.modules
assert "mymodule.submodule" in sys.modules
assert "mymodule.submodule.subsubmodule" in sys.modules
def test_get_frozen_and_tunable_parameter_names(self):
model = torch.nn.Sequential(
OrderedDict([("conv", torch.nn.Conv1d(5, 5, 5)), ("linear", torch.nn.Linear(5, 10))])
)
named_parameters = dict(model.named_parameters())
named_parameters["linear.weight"].requires_grad_(False)
named_parameters["linear.bias"].requires_grad_(False)
(
frozen_parameter_names,
tunable_parameter_names,
) = util.get_frozen_and_tunable_parameter_names(model)
assert set(frozen_parameter_names) == {"linear.weight", "linear.bias"}
assert set(tunable_parameter_names) == {"conv.weight", "conv.bias"}
def test_sanitize_ptb_tokenized_string(self):
def create_surrounding_test_case(start_ptb_token, end_ptb_token, start_token, end_token):
return (
"a {} b c {} d".format(start_ptb_token, end_ptb_token),
"a {}b c{} d".format(start_token, end_token),
)
def create_fwd_token_test_case(fwd_token):
return "a {} b".format(fwd_token), "a {}b".format(fwd_token)
def create_backward_token_test_case(backward_token):
return "a {} b".format(backward_token), "a{} b".format(backward_token)
punct_forward = {"`", "$", "#"}
punct_backward = {".", ",", "!", "?", ":", ";", "%", "'"}
test_cases = [
# Parentheses
create_surrounding_test_case("-lrb-", "-rrb-", "(", ")"),
create_surrounding_test_case("-lsb-", "-rsb-", "[", "]"),
create_surrounding_test_case("-lcb-", "-rcb-", "{", "}"),
# Parentheses don't have to match
create_surrounding_test_case("-lsb-", "-rcb-", "[", "}"),
# Also check that casing doesn't matter
create_surrounding_test_case("-LsB-", "-rcB-", "[", "}"),
# Quotes
create_surrounding_test_case("``", "''", '"', '"'),
# Start/end tokens
create_surrounding_test_case("<s>", "</s>", "", ""),
# Tokens that merge forward
*[create_fwd_token_test_case(t) for t in punct_forward],
# Tokens that merge backward
*[create_backward_token_test_case(t) for t in punct_backward],
# Merge tokens starting with ' backwards
("I 'm", "I'm"),
# Merge tokens backwards when matching (n't or na) (special cases, parentheses behave in the same way)
("I do n't", "I don't"),
("gon na", "gonna"),
# Also make sure casing is preserved
("gon NA", "gonNA"),
# This is a no op
("A b C d", "A b C d"),
]
for ptb_string, expected in test_cases:
actual = util.sanitize_ptb_tokenized_string(ptb_string)
assert actual == expected
@pytest.mark.parametrize(
"size, result",
[
(12, "12B"),
(int(1.2 * 1024), "1.2K"),
(12 * 1024, "12K"),
(120 * 1024, "120K"),
(int(1.2 * 1024 * 1024), "1.2M"),
(12 * 1024 * 1024, "12M"),
(120 * 1024 * 1024, "120M"),
(int(1.2 * 1024 * 1024 * 1024), "1.2G"),
(12 * 1024 * 1024 * 1024, "12G"),
],
)
def test_format_size(size: int, result: str):
assert util.format_size(size) == result
@pytest.mark.parametrize(
"td, result",
[
(timedelta(days=2, hours=3), "2 days"),
(timedelta(days=1, hours=3), "1 day"),
(timedelta(hours=3, minutes=12), "3 hours"),
(timedelta(hours=1, minutes=12), "1 hour, 12 mins"),
(timedelta(minutes=12), "12 mins"),
],
)
def test_format_timedelta(td: timedelta, result: str):
assert util.format_timedelta(td) == result
| allennlp-master | tests/common/util_test.py |
import json
import os
import re
from collections import OrderedDict
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import (
infer_and_cast,
Params,
parse_overrides,
unflatten,
with_fallback,
remove_keys_from_params,
)
from allennlp.common.testing import AllenNlpTestCase
class TestParams(AllenNlpTestCase):
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "simple_tagger"
def test_replace_none(self):
params = Params({"a": "None", "b": [1.0, "None", 2], "c": {"d": "None"}})
assert params["a"] is None
assert params["b"][1] is None
assert params["c"]["d"] is None
def test_bad_unicode_environment_variables(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
os.environ["BAD_ENVIRONMENT_VARIABLE"] = "\udce2"
Params.from_file(filename)
del os.environ["BAD_ENVIRONMENT_VARIABLE"]
@pytest.mark.parametrize("input_type", [dict, str])
def test_overrides(self, input_type):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
overrides = {
"train_data_path": "FOO",
"model": {"type": "BAR"},
"model.text_field_embedder.tokens.type": "BAZ",
"data_loader.batch_sampler.sorting_keys.0": "question",
}
params = Params.from_file(
filename, overrides if input_type == dict else json.dumps(overrides)
)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
assert params["data_loader"]["batch_sampler"]["sorting_keys"][0] == "question"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["tokens"]["type"] == "BAZ"
def test_unflatten(self):
flattened = {"a.b.c": 1, "a.b.d": 0, "a.e.f.g.h": 2, "b": 3}
unflattened = unflatten(flattened)
assert unflattened == {"a": {"b": {"c": 1, "d": 0}, "e": {"f": {"g": {"h": 2}}}}, "b": 3}
# should do nothing to a non-flat dictionary
assert unflatten(unflattened) == unflattened
def test_with_fallback(self):
preferred = {"a": 1}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": 1, "b": 2}
# incompatibility is ok
preferred = {"a": {"c": 3}}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": {"c": 3}, "b": 2}
# goes deep
preferred = {"deep": {"a": 1}}
fallback = {"deep": {"a": 0, "b": 2}}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"deep": {"a": 1, "b": 2}}
def test_parse_overrides(self):
assert parse_overrides("") == {}
assert parse_overrides("{}") == {}
override_dict = parse_overrides('{"train_data": "/train", "trainer.num_epochs": 10}')
assert override_dict == {"train_data": "/train", "trainer": {"num_epochs": 10}}
params = with_fallback(
preferred=override_dict,
fallback={
"train_data": "/test",
"model": "simple_tagger",
"trainer": {"num_epochs": 100, "optimizer": "sgd"},
},
)
assert params == {
"train_data": "/train",
"model": "simple_tagger",
"trainer": {"num_epochs": 10, "optimizer": "sgd"},
}
def test_as_flat_dict(self):
params = Params({"a": 10, "b": {"c": 20, "d": "stuff"}}).as_flat_dict()
assert params == {"a": 10, "b.c": 20, "b.d": "stuff"}
def test_jsonnet_features(self):
config_file = self.TEST_DIR / "config.jsonnet"
with open(config_file, "w") as f:
f.write(
"""{
// This example is copied straight from the jsonnet docs
person1: {
name: "Alice",
welcome: "Hello " + self.name + "!",
},
person2: self.person1 { name: "Bob" },
}"""
)
params = Params.from_file(config_file)
alice = params.pop("person1")
bob = params.pop("person2")
assert alice.as_dict() == {"name": "Alice", "welcome": "Hello Alice!"}
assert bob.as_dict() == {"name": "Bob", "welcome": "Hello Bob!"}
params.assert_empty("TestParams")
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / "bad_regex.jsonnet"
good_regex = self.TEST_DIR / "good_regex.jsonnet"
with open(bad_regex, "w") as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, "w") as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params["myRegex"]
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
# Check roundtripping
good_regex2 = self.TEST_DIR / "good_regex2.jsonnet"
with open(good_regex2, "w") as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / "substitutor.jsonnet"
key = "TEST_ENV_VAR_SUBSTITUTION"
assert os.environ.get(key) is None
with open(substitutor, "w") as f:
f.write(f'{{"path": std.extVar("{key}")}}')
# raises without environment variable set
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params["path"] == "PERFECT"
del os.environ[key]
@pytest.mark.xfail(
not os.path.exists(AllenNlpTestCase.PROJECT_ROOT / "training_config"),
reason="Training configs not installed with pip",
)
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
# Our configs use environment variable substitution, and the _jsonnet parser
# will fail if we don't pass it correct environment variables.
forced_variables = [
# constituency parser
"PTB_TRAIN_PATH",
"PTB_DEV_PATH",
"PTB_TEST_PATH",
# dependency parser
"PTB_DEPENDENCIES_TRAIN",
"PTB_DEPENDENCIES_VAL",
# multilingual dependency parser
"TRAIN_PATHNAME",
"DEV_PATHNAME",
"TEST_PATHNAME",
# srl_elmo_5.5B
"SRL_TRAIN_DATA_PATH",
"SRL_VALIDATION_DATA_PATH",
# coref
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
# ner
"NER_TRAIN_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
# bidirectional lm
"BIDIRECTIONAL_LM_TRAIN_PATH",
"BIDIRECTIONAL_LM_VOCAB_PATH",
"BIDIRECTIONAL_LM_ARCHIVE_PATH",
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
def test_as_ordered_dict(self):
# keyD > keyC > keyE; keyDA > keyDB; Next all other keys alphabetically
preference_orders = [["keyD", "keyC", "keyE"], ["keyDA", "keyDB"]]
params = Params(
{
"keyC": "valC",
"keyB": "valB",
"keyA": "valA",
"keyE": "valE",
"keyD": {"keyDB": "valDB", "keyDA": "valDA"},
}
)
ordered_params_dict = params.as_ordered_dict(preference_orders)
expected_ordered_params_dict = OrderedDict(
{
"keyD": {"keyDA": "valDA", "keyDB": "valDB"},
"keyC": "valC",
"keyE": "valE",
"keyA": "valA",
"keyB": "valB",
}
)
assert json.dumps(ordered_params_dict) == json.dumps(expected_ordered_params_dict)
def test_to_file(self):
# Test to_file works with or without preference orders
params_dict = {"keyA": "valA", "keyB": "valB"}
expected_ordered_params_dict = OrderedDict({"keyB": "valB", "keyA": "valA"})
params = Params(params_dict)
file_path = self.TEST_DIR / "config.jsonnet"
# check with preference orders
params.to_file(file_path, [["keyB", "keyA"]])
with open(file_path, "r") as handle:
ordered_params_dict = OrderedDict(json.load(handle))
assert json.dumps(expected_ordered_params_dict) == json.dumps(ordered_params_dict)
# check without preference orders doesn't give error
params.to_file(file_path)
def test_infer_and_cast(self):
lots_of_strings = {
"a": ["10", "1.3", "true"],
"b": {"x": 10, "y": "20.1", "z": "other things"},
"c": "just a string",
}
casted = {
"a": [10, 1.3, True],
"b": {"x": 10, "y": 20.1, "z": "other things"},
"c": "just a string",
}
assert infer_and_cast(lots_of_strings) == casted
contains_bad_data = {"x": 10, "y": int}
with pytest.raises(ValueError, match="cannot infer type"):
infer_and_cast(contains_bad_data)
params = Params(lots_of_strings)
assert params.as_dict() == lots_of_strings
assert params.as_dict(infer_type_and_cast=True) == casted
def test_pop_choice(self):
choices = ["my_model", "other_model"]
params = Params({"model": "my_model"})
assert params.pop_choice("model", choices) == "my_model"
params = Params({"model": "non_existent_model"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices)
params = Params({"model": "module.submodule.ModelName"})
assert params.pop_choice("model", "choices") == "module.submodule.ModelName"
params = Params({"model": "module.submodule.ModelName"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices, allow_class_names=False)
def test_remove_keys_from_params(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert params["data_loader"]["batch_sampler"]["type"] == "bucket"
assert params["data_loader"]["batch_sampler"]["batch_size"] == 80
remove_keys_from_params(params, keys=["batch_size"])
assert "batch_size" not in params["data_loader"]["batch_sampler"]
remove_keys_from_params(params, keys=["type", "batch_size"])
assert "type" not in params["data_loader"]["batch_sampler"]
remove_keys_from_params(params, keys=["data_loader"])
assert "data_loader" not in params
| allennlp-master | tests/common/params_test.py |
import torch
from allennlp.common.testing import AllenNlpTestCase, multi_device
actual_devices = set()
class TestTesting(AllenNlpTestCase):
@multi_device
def test_multi_device(self, device: str):
actual_devices.add(device)
def test_devices_accounted_for(self):
expected_devices = {"cpu", "cuda"} if torch.cuda.is_available() else {"cpu"}
assert expected_devices == actual_devices
| allennlp-master | tests/common/testing.py |
from pytest import raises
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.token_indexers import TokenCharactersIndexer
from allennlp.interpret.attackers import Hotflip
from allennlp.models.archival import load_archive
from allennlp.modules.token_embedders import EmptyEmbedder
from allennlp.predictors import Predictor, TextClassifierPredictor
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.testing.interpret_test import (
FakeModelForTestingInterpret,
FakePredictorForTestingInterpret,
)
class TestHotflip(AllenNlpTestCase):
def test_hotflip(self):
inputs = {"sentence": "I always write unit tests for my code."}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
hotflipper = Hotflip(predictor)
hotflipper.initialize()
attack = hotflipper.attack_from_json(inputs, "tokens", "grad_input_1")
assert attack is not None
assert "final" in attack
assert "original" in attack
assert "outputs" in attack
assert len(attack["final"][0]) == len(
attack["original"]
) # hotflip replaces words without removing
def test_with_token_characters_indexer(self):
inputs = {"sentence": "I always write unit tests for my code."}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
predictor._dataset_reader._token_indexers["chars"] = TokenCharactersIndexer(
min_padding_length=1
)
predictor._model._text_field_embedder._token_embedders["chars"] = EmptyEmbedder()
hotflipper = Hotflip(predictor)
hotflipper.initialize()
attack = hotflipper.attack_from_json(inputs, "tokens", "grad_input_1")
assert attack is not None
assert "final" in attack
assert "original" in attack
assert "outputs" in attack
assert len(attack["final"][0]) == len(
attack["original"]
) # hotflip replaces words without removing
# This checks for a bug that arose with a change in the pytorch API. We want to be sure we
# can handle the case where we have to re-encode a vocab item because we didn't save it in
# our fake embedding matrix (see Hotflip docstring for more info).
hotflipper = Hotflip(predictor, max_tokens=50)
hotflipper.initialize()
hotflipper._first_order_taylor(
grad=torch.rand((10,)).numpy(), token_idx=torch.tensor(60), sign=1
)
def test_interpret_fails_when_embedding_layer_not_found(self):
inputs = {"sentence": "I always write unit tests for my code."}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = TextClassifierPredictor(model, TextClassificationJsonReader())
hotflipper = Hotflip(predictor)
with raises(RuntimeError):
hotflipper.initialize()
def test_interpret_works_with_custom_embedding_layer(self):
inputs = {"sentence": "I always write unit tests for my code"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = FakePredictorForTestingInterpret(model, TextClassificationJsonReader())
hotflipper = Hotflip(predictor)
hotflipper.initialize()
attack = hotflipper.attack_from_json(inputs, "tokens", "grad_input_1")
assert attack is not None
assert "final" in attack
assert "original" in attack
assert "outputs" in attack
assert len(attack["final"][0]) == len(
attack["original"]
) # hotflip replaces words without removing
| allennlp-master | tests/interpret/hotflip_test.py |
from pytest import approx, raises
from allennlp.common.testing import AllenNlpTestCase
from allennlp.interpret.saliency_interpreters import SimpleGradient
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor, TextClassifierPredictor
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.testing.interpret_test import (
FakeModelForTestingInterpret,
FakePredictorForTestingInterpret,
)
class TestSimpleGradient(AllenNlpTestCase):
def test_simple_gradient_basic_text(self):
inputs = {"sentence": "It was the ending that I hated"}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "text_classifier")
interpreter = SimpleGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
# two interpretations should be identical for gradient
repeat_interpretation = interpreter.saliency_interpret_from_json(inputs)
repeat_grad_input_1 = repeat_interpretation["instance_1"]["grad_input_1"]
for grad, repeat_grad in zip(grad_input_1, repeat_grad_input_1):
assert grad == approx(repeat_grad)
def test_interpret_fails_when_embedding_layer_not_found(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = TextClassifierPredictor(model, TextClassificationJsonReader())
interpreter = SimpleGradient(predictor)
with raises(RuntimeError):
interpreter.saliency_interpret_from_json(inputs)
def test_interpret_works_with_custom_embedding_layer(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = FakePredictorForTestingInterpret(model, TextClassificationJsonReader())
interpreter = SimpleGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
| allennlp-master | tests/interpret/simple_gradient_test.py |
from pytest import approx, raises
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.interpret.saliency_interpreters import IntegratedGradient
from allennlp.predictors import Predictor, TextClassifierPredictor
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.testing.interpret_test import (
FakeModelForTestingInterpret,
FakePredictorForTestingInterpret,
)
class TestIntegratedGradient(AllenNlpTestCase):
def test_integrated_gradient(self):
inputs = {"sentence": "It was the ending that I hated"}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "text_classifier")
interpreter = IntegratedGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
# two interpretations should be identical for integrated gradients
repeat_interpretation = interpreter.saliency_interpret_from_json(inputs)
repeat_grad_input_1 = repeat_interpretation["instance_1"]["grad_input_1"]
for grad, repeat_grad in zip(grad_input_1, repeat_grad_input_1):
assert grad == approx(repeat_grad)
def test_interpret_fails_when_embedding_layer_not_found(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = TextClassifierPredictor(model, TextClassificationJsonReader())
interpreter = IntegratedGradient(predictor)
with raises(RuntimeError):
interpreter.saliency_interpret_from_json(inputs)
def test_interpret_works_with_custom_embedding_layer(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = FakePredictorForTestingInterpret(model, TextClassificationJsonReader())
interpreter = IntegratedGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
| allennlp-master | tests/interpret/integrated_gradient_test.py |
allennlp-master | tests/interpret/__init__.py |
|
from pytest import raises
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.interpret.saliency_interpreters import SmoothGradient
from allennlp.predictors import Predictor, TextClassifierPredictor
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.testing.interpret_test import (
FakeModelForTestingInterpret,
FakePredictorForTestingInterpret,
)
class TestSmoothGradient(AllenNlpTestCase):
def test_smooth_gradient(self):
inputs = {"sentence": "It was the ending that I hated"}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "text_classifier")
interpreter = SmoothGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
assert len(interpretation["instance_1"]["grad_input_1"]) == 7 # 7 words in input
def test_interpret_fails_when_embedding_layer_not_found(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = TextClassifierPredictor(model, TextClassificationJsonReader())
interpreter = SmoothGradient(predictor)
with raises(RuntimeError):
interpreter.saliency_interpret_from_json(inputs)
def test_interpret_works_with_custom_embedding_layer(self):
inputs = {"sentence": "It was the ending that I hated"}
vocab = Vocabulary()
vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
predictor = FakePredictorForTestingInterpret(model, TextClassificationJsonReader())
interpreter = SmoothGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 7 # 7 words in input
| allennlp-master | tests/interpret/smooth_gradient_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp.interpret.attackers import InputReduction
class TestInputReduction(AllenNlpTestCase):
def test_input_reduction(self):
# test using classification model
inputs = {"sentence": "I always write unit tests for my code."}
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
reducer = InputReduction(predictor)
reduced = reducer.attack_from_json(inputs, "tokens", "grad_input_1")
assert reduced is not None
assert "final" in reduced
assert "original" in reduced
assert reduced["final"][0] # always at least one token
assert len(reduced["final"][0]) <= len(
reduced["original"]
) # input reduction removes tokens
for word in reduced["final"][0]: # no new words entered
assert word in reduced["original"]
# test using NER model (tests different underlying logic)
inputs = {"sentence": "Eric Wallace was an intern at AI2"}
archive = load_archive(
self.FIXTURES_ROOT / "simple_tagger" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "sentence_tagger")
reducer = InputReduction(predictor)
reduced = reducer.attack_from_json(inputs, "tokens", "grad_input_1")
assert reduced is not None
assert "final" in reduced
assert "original" in reduced
for reduced_input in reduced["final"]:
assert reduced_input # always at least one token
assert len(reduced_input) <= len(reduced["original"]) # input reduction removes tokens
for word in reduced_input: # no new words entered
assert word in reduced["original"]
| allennlp-master | tests/interpret/input_reduction_test.py |
allennlp-master | tests/tutorials/__init__.py |
|
allennlp-master | tests/tutorials/tagger/__init__.py |
|
import pytest
from allennlp.common.testing import AllenNlpTestCase
@pytest.mark.skip("makes test-install fail (and also takes 30 seconds)")
class TestBasicAllenNlp(AllenNlpTestCase):
@classmethod
def test_run_as_script(cls):
# Just ensure the tutorial runs without throwing an exception.
import tutorials.tagger.basic_allennlp # noqa
| allennlp-master | tests/tutorials/tagger/basic_allennlp_test.py |
import argparse
import csv
import io
import json
import os
import pathlib
import shutil
import sys
import tempfile
import pytest
from allennlp.commands import main
from allennlp.commands.predict import Predict
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import JsonDict, push_python_path
from allennlp.data.dataset_readers import DatasetReader, TextClassificationJsonReader
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor, TextClassifierPredictor
class TestPredict(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.classifier_model_path = (
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
self.classifier_data_path = (
self.FIXTURES_ROOT / "data" / "text_classification_json" / "imdb_corpus.jsonl"
)
self.tempdir = pathlib.Path(tempfile.mkdtemp())
self.infile = self.tempdir / "inputs.txt"
self.outfile = self.tempdir / "outputs.txt"
def test_add_predict_subparser(self):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
Predict().add_subparser(subparsers)
kebab_args = [
"predict", # command
"/path/to/archive", # archive
"/dev/null", # input_file
"--output-file",
"/dev/null",
"--batch-size",
"10",
"--cuda-device",
"0",
"--silent",
]
args = parser.parse_args(kebab_args)
assert args.func.__name__ == "_predict"
assert args.archive_file == "/path/to/archive"
assert args.output_file == "/dev/null"
assert args.batch_size == 10
assert args.cuda_device == 0
assert args.silent
def test_works_with_known_model(self):
with open(self.infile, "w") as f:
f.write("""{"sentence": "the seahawks won the super bowl in 2016"}\n""")
f.write("""{"sentence": "the mariners won the super bowl in 2037"}\n""")
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--silent",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 2
for result in results:
assert set(result.keys()) == {"label", "logits", "probs", "tokens", "token_ids"}
shutil.rmtree(self.tempdir)
def test_using_dataset_reader_works_with_known_model(self):
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--silent",
"--use-dataset-reader",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 3
for result in results:
assert set(result.keys()) == {"label", "logits", "loss", "probs", "tokens", "token_ids"}
shutil.rmtree(self.tempdir)
def test_uses_correct_dataset_reader(self):
# We're going to use a fake predictor for this test, just checking that we loaded the
# correct dataset reader. We'll also create a fake dataset reader that subclasses the
# expected one, and specify that one for validation.
@Predictor.register("test-predictor")
class _TestPredictor(Predictor):
def dump_line(self, outputs: JsonDict) -> str:
data = {"dataset_reader_type": type(self._dataset_reader).__name__} # type: ignore
return json.dumps(data) + "\n"
def load_line(self, line: str) -> JsonDict:
raise NotImplementedError
@DatasetReader.register("fake-reader")
class FakeDatasetReader(TextClassificationJsonReader):
pass
# --use-dataset-reader argument only should use validation
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--overrides",
'{"validation_dataset_reader": {"type": "fake-reader"}}',
"--silent",
"--predictor",
"test-predictor",
"--use-dataset-reader",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert results[0]["dataset_reader_type"] == "FakeDatasetReader"
# --use-dataset-reader, override with train
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--overrides",
'{"validation_dataset_reader": {"type": "fake-reader"}}',
"--silent",
"--predictor",
"test-predictor",
"--use-dataset-reader",
"--dataset-reader-choice",
"train",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert results[0]["dataset_reader_type"] == "TextClassificationJsonReader"
# --use-dataset-reader, override with validation
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--overrides",
'{"validation_dataset_reader": {"type": "fake-reader"}}',
"--silent",
"--predictor",
"test-predictor",
"--use-dataset-reader",
"--dataset-reader-choice",
"validation",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert results[0]["dataset_reader_type"] == "FakeDatasetReader"
# No --use-dataset-reader flag, fails because the loading logic
# is not implemented in the testing predictor
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--overrides",
'{"validation_dataset_reader": {"type": "fake-reader"}}',
"--silent",
"--predictor",
"test-predictor",
]
with pytest.raises(NotImplementedError):
main()
def test_base_predictor(self):
# Tests when no Predictor is found and the base class implementation is used
model_path = str(self.classifier_model_path)
archive = load_archive(model_path)
model_type = archive.config.get("model").get("type")
# Makes sure that we don't have a default_predictor for it. Otherwise the base class
# implementation wouldn't be used
from allennlp.models import Model
model_class, _ = Model.resolve_class_name(model_type)
saved_default_predictor = model_class.default_predictor
model_class.default_predictor = None
try:
# Doesn't use a --predictor
sys.argv = [
"__main__.py", # executable
"predict", # command
model_path,
str(self.classifier_data_path), # input_file
"--output-file",
str(self.outfile),
"--silent",
"--use-dataset-reader",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 3
for result in results:
assert set(result.keys()) == {
"logits",
"probs",
"label",
"loss",
"tokens",
"token_ids",
}
finally:
model_class.default_predictor = saved_default_predictor
def test_batch_prediction_works_with_known_model(self):
with open(self.infile, "w") as f:
f.write("""{"sentence": "the seahawks won the super bowl in 2016"}\n""")
f.write("""{"sentence": "the mariners won the super bowl in 2037"}\n""")
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--silent",
"--batch-size",
"2",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 2
for result in results:
assert set(result.keys()) == {"label", "logits", "probs", "tokens", "token_ids"}
shutil.rmtree(self.tempdir)
def test_fails_without_required_args(self):
sys.argv = [
"__main__.py",
"predict",
"/path/to/archive",
] # executable # command # archive, but no input file
with pytest.raises(SystemExit) as cm:
main()
assert cm.value.code == 2 # argparse code for incorrect usage
def test_can_specify_predictor(self):
@Predictor.register("classification-explicit")
class ExplicitPredictor(TextClassifierPredictor):
"""same as classifier predictor but with an extra field"""
def predict_json(self, inputs: JsonDict) -> JsonDict:
result = super().predict_json(inputs)
result["explicit"] = True
return result
with open(self.infile, "w") as f:
f.write("""{"sentence": "the seahawks won the super bowl in 2016"}\n""")
f.write("""{"sentence": "the mariners won the super bowl in 2037"}\n""")
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--predictor",
"classification-explicit",
"--silent",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 2
# Overridden predictor should output extra field
for result in results:
assert set(result.keys()) == {
"label",
"logits",
"explicit",
"probs",
"tokens",
"token_ids",
}
shutil.rmtree(self.tempdir)
def test_other_modules(self):
# Create a new package in a temporary dir
packagedir = self.TEST_DIR / "testpackage"
packagedir.mkdir()
(packagedir / "__init__.py").touch()
# And add that directory to the path
with push_python_path(self.TEST_DIR):
# Write out a duplicate predictor there, but registered under a different name.
from allennlp.predictors import text_classifier
with open(text_classifier.__file__) as f:
code = f.read().replace(
"""@Predictor.register("text_classifier")""",
"""@Predictor.register("duplicate-test-predictor")""",
)
with open(os.path.join(packagedir, "predictor.py"), "w") as f:
f.write(code)
self.infile = os.path.join(self.TEST_DIR, "inputs.txt")
self.outfile = os.path.join(self.TEST_DIR, "outputs.txt")
with open(self.infile, "w") as f:
f.write("""{"sentence": "the seahawks won the super bowl in 2016"}\n""")
f.write("""{"sentence": "the mariners won the super bowl in 2037"}\n""")
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--predictor",
"duplicate-test-predictor",
"--silent",
]
# Should raise ConfigurationError, because predictor is unknown
with pytest.raises(ConfigurationError):
main()
# But once we include testpackage, it should be known
sys.argv.extend(["--include-package", "testpackage"])
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 2
# Overridden predictor should output extra field
for result in results:
assert set(result.keys()) == {"label", "logits", "probs", "tokens", "token_ids"}
def test_alternative_file_formats(self):
@Predictor.register("classification-csv")
class CsvPredictor(TextClassifierPredictor):
"""same as classification predictor but using CSV inputs and outputs"""
def load_line(self, line: str) -> JsonDict:
reader = csv.reader([line])
sentence, label = next(reader)
return {"sentence": sentence, "label": label}
def dump_line(self, outputs: JsonDict) -> str:
output = io.StringIO()
writer = csv.writer(output)
row = [outputs["label"], *outputs["probs"]]
writer.writerow(row)
return output.getvalue()
with open(self.infile, "w") as f:
writer = csv.writer(f)
writer.writerow(["the seahawks won the super bowl in 2016", "pos"])
writer.writerow(["the mariners won the super bowl in 2037", "neg"])
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.infile), # input_file
"--output-file",
str(self.outfile),
"--predictor",
"classification-csv",
"--silent",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile) as f:
reader = csv.reader(f)
results = [row for row in reader]
assert len(results) == 2
for row in results:
assert len(row) == 3 # label and 2 class probabilities
label, *probs = row
for prob in probs:
assert 0 <= float(prob) <= 1
assert label != ""
shutil.rmtree(self.tempdir)
| allennlp-master | tests/commands/predict_test.py |
import argparse
import os
import pytest
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.data import DataLoader
from allennlp.models import Model
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase, requires_multi_gpu
from allennlp.commands.find_learning_rate import (
search_learning_rate,
find_learning_rate_from_args,
find_learning_rate_model,
FindLearningRate,
)
from allennlp.training import Trainer
from allennlp.training.util import datasets_from_params
def is_matplotlib_installed():
try:
import matplotlib # noqa: F401 - Matplotlib is optional.
except: # noqa: E722. Any exception means we don't have a working matplotlib.
return False
return True
class TestFindLearningRate(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.params = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"validation_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"data_loader": {"batch_size": 2},
"trainer": {"cuda_device": -1, "num_epochs": 2, "optimizer": "adam"},
}
)
@pytest.mark.skipif(not is_matplotlib_installed(), reason="matplotlib dependency is optional")
def test_find_learning_rate(self):
find_learning_rate_model(
self.params(),
os.path.join(self.TEST_DIR, "test_find_learning_rate"),
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False,
)
# It's OK if serialization dir exists but is empty:
serialization_dir2 = os.path.join(self.TEST_DIR, "empty_directory")
assert not os.path.exists(serialization_dir2)
os.makedirs(serialization_dir2)
find_learning_rate_model(
self.params(),
serialization_dir2,
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False,
)
# It's not OK if serialization dir exists and has junk in it non-empty:
serialization_dir3 = os.path.join(self.TEST_DIR, "non_empty_directory")
assert not os.path.exists(serialization_dir3)
os.makedirs(serialization_dir3)
with open(os.path.join(serialization_dir3, "README.md"), "w") as f:
f.write("TEST")
with pytest.raises(ConfigurationError):
find_learning_rate_model(
self.params(),
serialization_dir3,
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False,
)
# ... unless you use the --force flag.
find_learning_rate_model(
self.params(),
serialization_dir3,
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=True,
)
def test_find_learning_rate_args(self):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
FindLearningRate().add_subparser(subparsers)
for serialization_arg in ["-s", "--serialization-dir"]:
raw_args = ["find-lr", "path/to/params", serialization_arg, "serialization_dir"]
args = parser.parse_args(raw_args)
assert args.func == find_learning_rate_from_args
assert args.param_path == "path/to/params"
assert args.serialization_dir == "serialization_dir"
# config is required
with pytest.raises(SystemExit) as cm:
parser.parse_args(["find-lr", "-s", "serialization_dir"])
assert cm.exception.code == 2 # argparse code for incorrect usage
# serialization dir is required
with pytest.raises(SystemExit) as cm:
parser.parse_args(["find-lr", "path/to/params"])
assert cm.exception.code == 2 # argparse code for incorrect usage
@requires_multi_gpu
def test_find_learning_rate_multi_gpu(self):
params = self.params()
del params["trainer"]["cuda_device"]
params["distributed"] = Params({})
params["distributed"]["cuda_devices"] = [0, 1]
with pytest.raises(AssertionError) as execinfo:
find_learning_rate_model(
params,
os.path.join(self.TEST_DIR, "test_find_learning_rate_multi_gpu"),
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False,
)
assert "DistributedDataParallel" in str(execinfo.value)
class TestSearchLearningRate(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"validation_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"data_loader": {"batch_size": 2},
"trainer": {"cuda_device": -1, "num_epochs": 2, "optimizer": "adam"},
}
)
all_datasets = datasets_from_params(params)
vocab = Vocabulary.from_params(
params.pop("vocabulary", {}),
instances=(instance for dataset in all_datasets.values() for instance in dataset),
)
model = Model.from_params(vocab=vocab, params=params.pop("model"))
train_data = all_datasets["train"]
train_data.index_with(vocab)
data_loader = DataLoader.from_params(dataset=train_data, params=params.pop("data_loader"))
trainer_params = params.pop("trainer")
serialization_dir = os.path.join(self.TEST_DIR, "test_search_learning_rate")
self.trainer = Trainer.from_params(
model=model,
serialization_dir=serialization_dir,
data_loader=data_loader,
train_data=train_data,
params=trainer_params,
validation_data=None,
validation_iterator=None,
)
def test_search_learning_rate_with_num_batches_less_than_ten(self):
with pytest.raises(ConfigurationError):
search_learning_rate(self.trainer, num_batches=9)
def test_search_learning_rate_linear_steps(self):
learning_rates_losses = search_learning_rate(self.trainer, linear_steps=True)
assert len(learning_rates_losses) > 1
def test_search_learning_rate_without_stopping_factor(self):
learning_rates, losses = search_learning_rate(
self.trainer, num_batches=100, stopping_factor=None
)
assert len(learning_rates) == 101
assert len(losses) == 101
| allennlp-master | tests/commands/find_learning_rate_test.py |
import argparse
import copy
import json
import logging
import math
import os
import re
import shutil
from collections import OrderedDict, Counter
from typing import Iterable, Optional, List, Dict, Any
import pytest
import torch
from allennlp.commands.train import Train, train_model, train_model_from_args, TrainModel
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase, cpu_or_gpu
from allennlp.data import DatasetReader, Instance, Vocabulary
from allennlp.data.dataloader import TensorDict
from allennlp.models import load_archive, Model
from allennlp.models.archival import CONFIG_NAME
from allennlp.training import BatchCallback, GradientDescentTrainer
from allennlp.training.learning_rate_schedulers import (
ExponentialLearningRateScheduler,
LearningRateScheduler,
)
SEQUENCE_TAGGING_DATA_PATH = str(AllenNlpTestCase.FIXTURES_ROOT / "data" / "sequence_tagging.tsv")
SEQUENCE_TAGGING_SHARDS_PATH = str(AllenNlpTestCase.FIXTURES_ROOT / "data" / "shards" / "*")
@BatchCallback.register("training_data_logger")
class TrainingDataLoggerBatchCallback(BatchCallback):
def __call__( # type: ignore
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[TensorDict],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
if is_training:
logger = logging.getLogger(__name__)
for batch in batch_inputs:
for metadata in batch["metadata"]:
logger.info(f"First word from training data: '{metadata['words'][0]}'") # type: ignore
_seen_training_devices = set()
@BatchCallback.register("training_device_logger")
class TrainingDeviceLoggerBatchCallback(BatchCallback):
def __call__( # type: ignore
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[TensorDict],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
global _seen_training_devices
for tensor in trainer.model.parameters():
_seen_training_devices.add(tensor.device)
class TestTrain(AllenNlpTestCase):
DEFAULT_PARAMS = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
def test_train_model(self):
params = lambda: copy.deepcopy(self.DEFAULT_PARAMS)
train_model(params(), serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"))
# It's OK if serialization dir exists but is empty:
serialization_dir2 = os.path.join(self.TEST_DIR, "empty_directory")
assert not os.path.exists(serialization_dir2)
os.makedirs(serialization_dir2)
train_model(params(), serialization_dir=serialization_dir2)
# It's not OK if serialization dir exists and has junk in it non-empty:
serialization_dir3 = os.path.join(self.TEST_DIR, "non_empty_directory")
assert not os.path.exists(serialization_dir3)
os.makedirs(serialization_dir3)
with open(os.path.join(serialization_dir3, "README.md"), "w") as f:
f.write("TEST")
with pytest.raises(ConfigurationError):
train_model(params(), serialization_dir=serialization_dir3)
# It's also not OK if serialization dir is a real serialization dir:
with pytest.raises(ConfigurationError):
train_model(params(), serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"))
# But it's OK if serialization dir exists and --recover is specified:
train_model(
params(),
serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"),
recover=True,
)
# It's ok serialization dir exists and --force is specified (it will be deleted):
train_model(
params(), serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"), force=True
)
# But --force and --recover cannot both be specified
with pytest.raises(ConfigurationError):
train_model(
params(),
serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"),
force=True,
recover=True,
)
@cpu_or_gpu
def test_detect_gpu(self):
import copy
params = copy.deepcopy(self.DEFAULT_PARAMS)
params["trainer"]["batch_callbacks"] = ["training_device_logger"]
global _seen_training_devices
_seen_training_devices.clear()
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_detect_gpu"))
assert len(_seen_training_devices) == 1
seen_training_device = next(iter(_seen_training_devices))
if torch.cuda.device_count() == 0:
assert seen_training_device.type == "cpu"
else:
assert seen_training_device.type == "cuda"
@cpu_or_gpu
def test_force_gpu(self):
import copy
params = copy.deepcopy(self.DEFAULT_PARAMS)
params["trainer"]["batch_callbacks"] = ["training_device_logger"]
params["trainer"]["cuda_device"] = 0
global _seen_training_devices
_seen_training_devices.clear()
if torch.cuda.device_count() == 0:
with pytest.raises(ConfigurationError):
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_force_gpu"))
else:
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_force_gpu"))
assert len(_seen_training_devices) == 1
seen_training_device = next(iter(_seen_training_devices))
assert seen_training_device.type == "cuda"
@cpu_or_gpu
def test_force_cpu(self):
import copy
params = copy.deepcopy(self.DEFAULT_PARAMS)
params["trainer"]["batch_callbacks"] = ["training_device_logger"]
params["trainer"]["cuda_device"] = -1
global _seen_training_devices
_seen_training_devices.clear()
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_force_cpu"))
assert len(_seen_training_devices) == 1
seen_training_device = next(iter(_seen_training_devices))
assert seen_training_device.type == "cpu"
@cpu_or_gpu
def test_train_model_distributed(self):
if torch.cuda.device_count() >= 2:
devices = [0, 1]
else:
devices = [-1, -1]
params = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
"distributed": {"cuda_devices": devices},
}
)
out_dir = os.path.join(self.TEST_DIR, "test_distributed_train")
train_model(params(), serialization_dir=out_dir)
# Check that some logs specific to distributed
# training are where we expect.
serialized_files = os.listdir(out_dir)
assert "out_worker0.log" in serialized_files
assert "out_worker1.log" in serialized_files
assert "model.tar.gz" in serialized_files
assert "metrics.json" in serialized_files
# Make sure the metrics look right.
with open(os.path.join(out_dir, "metrics.json")) as f:
metrics = json.load(f)
assert metrics["peak_worker_0_memory_MB"] > 0
assert metrics["peak_worker_1_memory_MB"] > 0
if torch.cuda.device_count() >= 2:
assert metrics["peak_gpu_0_memory_MB"] > 0
assert metrics["peak_gpu_1_memory_MB"] > 0
# Check we can load the serialized model
assert load_archive(out_dir).model
@cpu_or_gpu
@pytest.mark.parametrize("lazy", [True, False])
def test_train_model_distributed_with_sharded_reader(self, lazy):
if torch.cuda.device_count() >= 2:
devices = [0, 1]
else:
devices = [-1, -1]
params = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {
"type": "sharded",
"base_reader": {"type": "sequence_tagging"},
"lazy": lazy,
},
"train_data_path": SEQUENCE_TAGGING_SHARDS_PATH,
"validation_data_path": SEQUENCE_TAGGING_SHARDS_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
"distributed": {"cuda_devices": devices},
}
)
out_dir = os.path.join(self.TEST_DIR, "test_distributed_train")
train_model(params(), serialization_dir=out_dir)
# Check that some logs specific to distributed
# training are where we expect.
serialized_files = os.listdir(out_dir)
assert "out_worker0.log" in serialized_files
assert "out_worker1.log" in serialized_files
assert "model.tar.gz" in serialized_files
# Check we can load the serialized model
archive = load_archive(out_dir)
assert archive.model
# Check that we created a vocab from all the shards.
tokens = archive.model.vocab._token_to_index["tokens"].keys()
assert tokens == {
"@@PADDING@@",
"@@UNKNOWN@@",
"are",
".",
"animals",
"plants",
"vehicles",
"cats",
"dogs",
"snakes",
"birds",
"ferns",
"trees",
"flowers",
"vegetables",
"cars",
"buses",
"planes",
"rockets",
}
# TODO: This is somewhat brittle. Make these constants in trainer.py.
train_early = "finishing training early!"
validation_early = "finishing validation early!"
train_complete = "completed its entire epoch (training)."
validation_complete = "completed its entire epoch (validation)."
# There are three shards, but only two workers, so the first worker will have to discard some data.
with open(os.path.join(out_dir, "out_worker0.log")) as f:
worker0_log = f.read()
assert train_early in worker0_log
assert validation_early in worker0_log
assert train_complete not in worker0_log
assert validation_complete not in worker0_log
with open(os.path.join(out_dir, "out_worker1.log")) as f:
worker1_log = f.read()
assert train_early not in worker1_log
assert validation_early not in worker1_log
assert train_complete in worker1_log
assert validation_complete in worker1_log
@cpu_or_gpu
@pytest.mark.parametrize("lazy", [True, False])
def test_train_model_distributed_without_sharded_reader(self, lazy: bool):
if torch.cuda.device_count() >= 2:
devices = [0, 1]
else:
devices = [-1, -1]
num_epochs = 2
params = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging", "lazy": lazy},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 1},
"trainer": {
"num_epochs": num_epochs,
"optimizer": "adam",
"batch_callbacks": [
"tests.commands.train_test.TrainingDataLoggerBatchCallback"
],
},
"distributed": {"cuda_devices": devices},
}
)
out_dir = os.path.join(self.TEST_DIR, "test_distributed_train")
train_model(params(), serialization_dir=out_dir)
# Check that some logs specific to distributed
# training are where we expect.
serialized_files = os.listdir(out_dir)
assert "out_worker0.log" in serialized_files
assert "out_worker1.log" in serialized_files
assert "model.tar.gz" in serialized_files
# Check we can load the serialized model
archive = load_archive(out_dir)
assert archive.model
# Check that we created a vocab from all the shards.
tokens = set(archive.model.vocab._token_to_index["tokens"].keys())
assert tokens == {
"@@PADDING@@",
"@@UNKNOWN@@",
"are",
".",
"animals",
"cats",
"dogs",
"snakes",
"birds",
}
train_complete = "completed its entire epoch (training)."
validation_complete = "completed its entire epoch (validation)."
import re
pattern = re.compile(r"First word from training data: '([^']*)'")
first_word_counts = Counter() # type: ignore
with open(os.path.join(out_dir, "out_worker0.log")) as f:
worker0_log = f.read()
assert train_complete in worker0_log
assert validation_complete in worker0_log
for first_word in pattern.findall(worker0_log):
first_word_counts[first_word] += 1
with open(os.path.join(out_dir, "out_worker1.log")) as f:
worker1_log = f.read()
assert train_complete in worker1_log
assert validation_complete in worker1_log
for first_word in pattern.findall(worker1_log):
first_word_counts[first_word] += 1
assert first_word_counts == {
"cats": num_epochs,
"dogs": num_epochs,
"snakes": num_epochs,
"birds": num_epochs,
}
def test_distributed_raises_error_with_no_gpus(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
"distributed": {},
}
)
with pytest.raises(ConfigurationError):
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"))
def test_train_saves_all_keys_in_config(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"pytorch_seed": 42,
"numpy_seed": 42,
"random_seed": 42,
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
serialization_dir = os.path.join(self.TEST_DIR, "test_train_model")
params_as_dict = (
params.as_ordered_dict()
) # Do it here as train_model will pop all the values.
train_model(params, serialization_dir=serialization_dir)
config_path = os.path.join(serialization_dir, CONFIG_NAME)
with open(config_path) as config:
saved_config_as_dict = OrderedDict(json.load(config))
assert params_as_dict == saved_config_as_dict
def test_error_is_throw_when_cuda_device_is_not_available(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": "test_fixtures/data/sequence_tagging.tsv",
"validation_data_path": "test_fixtures/data/sequence_tagging.tsv",
"data_loader": {"batch_size": 2},
"trainer": {
"num_epochs": 2,
"cuda_device": torch.cuda.device_count(),
"optimizer": "adam",
},
}
)
with pytest.raises(ConfigurationError, match="Experiment specified"):
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "test_train_model"))
def test_train_with_test_set(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"test_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"evaluate_on_test": True,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "train_with_test_set"))
def test_train_number_of_steps(self):
number_of_epochs = 2
last_num_steps_per_epoch: Optional[int] = None
@LearningRateScheduler.register("mock")
class MockLRScheduler(ExponentialLearningRateScheduler):
def __init__(self, optimizer: torch.optim.Optimizer, num_steps_per_epoch: int):
super().__init__(optimizer)
nonlocal last_num_steps_per_epoch
last_num_steps_per_epoch = num_steps_per_epoch
batch_callback_counter = 0
@BatchCallback.register("counter")
class CounterBatchCallback(BatchCallback):
def __call__(
self,
trainer: GradientDescentTrainer,
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
nonlocal batch_callback_counter
if is_training:
batch_callback_counter += 1
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"test_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"evaluate_on_test": True,
"data_loader": {"batch_size": 2},
"trainer": {
"num_epochs": number_of_epochs,
"optimizer": "adam",
"learning_rate_scheduler": {"type": "mock"},
"batch_callbacks": ["counter"],
},
}
)
train_model(
params.duplicate(), serialization_dir=os.path.join(self.TEST_DIR, "train_normal")
)
assert batch_callback_counter == last_num_steps_per_epoch * number_of_epochs
batch_callback_counter = 0
normal_steps_per_epoch = last_num_steps_per_epoch
original_batch_size = params["data_loader"]["batch_size"]
params["data_loader"]["batch_size"] = 1
train_model(
params.duplicate(), serialization_dir=os.path.join(self.TEST_DIR, "train_with_bs1")
)
assert batch_callback_counter == last_num_steps_per_epoch * number_of_epochs
batch_callback_counter = 0
assert normal_steps_per_epoch == math.ceil(last_num_steps_per_epoch / original_batch_size)
params["data_loader"]["batch_size"] = original_batch_size
params["trainer"]["num_gradient_accumulation_steps"] = 3
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "train_with_ga"))
assert batch_callback_counter == last_num_steps_per_epoch * number_of_epochs
batch_callback_counter = 0
assert math.ceil(normal_steps_per_epoch / 3) == last_num_steps_per_epoch
def test_train_args(self):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
Train().add_subparser(subparsers)
for serialization_arg in ["-s", "--serialization-dir"]:
raw_args = ["train", "path/to/params", serialization_arg, "serialization_dir"]
args = parser.parse_args(raw_args)
assert args.func == train_model_from_args
assert args.param_path == "path/to/params"
assert args.serialization_dir == "serialization_dir"
# config is required
with pytest.raises(SystemExit) as cm:
args = parser.parse_args(["train", "-s", "serialization_dir"])
assert cm.exception.code == 2 # argparse code for incorrect usage
# serialization dir is required
with pytest.raises(SystemExit) as cm:
args = parser.parse_args(["train", "path/to/params"])
assert cm.exception.code == 2 # argparse code for incorrect usage
def test_train_model_can_instantiate_from_params(self):
params = Params.from_file(self.FIXTURES_ROOT / "simple_tagger" / "experiment.json")
# Can instantiate from base class params
TrainModel.from_params(
params=params, serialization_dir=self.TEST_DIR, local_rank=0, batch_weight_key=""
)
def test_train_can_fine_tune_model_from_archive(self):
params = Params.from_file(
self.FIXTURES_ROOT / "basic_classifier" / "experiment_from_archive.jsonnet"
)
train_loop = TrainModel.from_params(
params=params, serialization_dir=self.TEST_DIR, local_rank=0, batch_weight_key=""
)
train_loop.run()
model = Model.from_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
# This is checking that the vocabulary actually got extended. The data that we're using for
# training is different from the data we used to produce the model archive, and we set
# parameters such that the vocab should have been extended.
assert train_loop.model.vocab.get_vocab_size() > model.vocab.get_vocab_size()
@DatasetReader.register("lazy-test")
class LazyFakeReader(DatasetReader):
def __init__(self) -> None:
super().__init__(lazy=True)
self.reader = DatasetReader.from_params(Params({"type": "sequence_tagging", "lazy": True}))
def _read(self, file_path: str) -> Iterable[Instance]:
"""
Reads some data from the `file_path` and returns the instances.
"""
return self.reader.read(file_path)
class TestTrainOnLazyDataset(AllenNlpTestCase):
def test_train_model(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "lazy-test"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "train_lazy_model"))
def test_train_with_test_set(self):
params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "lazy-test"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"test_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"evaluate_on_test": True,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
train_model(params, serialization_dir=os.path.join(self.TEST_DIR, "lazy_test_set"))
def test_train_nograd_regex(self):
params_get = lambda: Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
serialization_dir = os.path.join(self.TEST_DIR, "test_train_nograd")
regex_lists = [[], [".*text_field_embedder.*"], [".*text_field_embedder.*", ".*encoder.*"]]
for regex_list in regex_lists:
params = params_get()
params["trainer"]["no_grad"] = regex_list
shutil.rmtree(serialization_dir, ignore_errors=True)
model = train_model(params, serialization_dir=serialization_dir)
# If regex is matched, parameter name should have requires_grad False
# Or else True
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in regex_list):
assert not parameter.requires_grad
else:
assert parameter.requires_grad
# If all parameters have requires_grad=False, then error.
params = params_get()
params["trainer"]["no_grad"] = ["*"]
shutil.rmtree(serialization_dir, ignore_errors=True)
with pytest.raises(Exception):
train_model(params, serialization_dir=serialization_dir)
class TestDryRun(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.params = Params(
{
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
},
"encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"validation_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
"data_loader": {"batch_size": 2},
"trainer": {"num_epochs": 2, "optimizer": "adam"},
}
)
def test_dry_run_doesnt_overwrite_vocab(self):
vocab_path = self.TEST_DIR / "vocabulary"
os.mkdir(vocab_path)
# Put something in the vocab directory
with open(vocab_path / "test.txt", "a+") as open_file:
open_file.write("test")
# It should raise error if vocab dir is non-empty
with pytest.raises(ConfigurationError):
train_model(self.params, self.TEST_DIR, dry_run=True)
def test_dry_run_makes_vocab(self):
vocab_path = self.TEST_DIR / "vocabulary"
train_model(self.params, self.TEST_DIR, dry_run=True)
vocab_files = os.listdir(vocab_path)
assert set(vocab_files) == {
".lock",
"labels.txt",
"non_padded_namespaces.txt",
"tokens.txt",
}
with open(vocab_path / "tokens.txt") as f:
tokens = [line.strip() for line in f]
tokens.sort()
assert tokens == [".", "@@UNKNOWN@@", "animals", "are", "birds", "cats", "dogs", "snakes"]
with open(vocab_path / "labels.txt") as f:
labels = [line.strip() for line in f]
labels.sort()
assert labels == ["N", "V"]
def test_dry_run_with_extension(self):
existing_serialization_dir = self.TEST_DIR / "existing"
extended_serialization_dir = self.TEST_DIR / "extended"
existing_vocab_path = existing_serialization_dir / "vocabulary"
extended_vocab_path = extended_serialization_dir / "vocabulary"
vocab = Vocabulary()
vocab.add_token_to_namespace("some_weird_token_1", namespace="tokens")
vocab.add_token_to_namespace("some_weird_token_2", namespace="tokens")
os.makedirs(existing_serialization_dir, exist_ok=True)
vocab.save_to_files(existing_vocab_path)
self.params["vocabulary"] = {}
self.params["vocabulary"]["type"] = "extend"
self.params["vocabulary"]["directory"] = str(existing_vocab_path)
self.params["vocabulary"]["min_count"] = {"tokens": 3}
train_model(self.params, extended_serialization_dir, dry_run=True)
vocab_files = os.listdir(extended_vocab_path)
assert set(vocab_files) == {
".lock",
"labels.txt",
"non_padded_namespaces.txt",
"tokens.txt",
}
with open(extended_vocab_path / "tokens.txt") as f:
tokens = [line.strip() for line in f]
assert tokens[0] == "@@UNKNOWN@@"
assert tokens[1] == "some_weird_token_1"
assert tokens[2] == "some_weird_token_2"
tokens.sort()
assert tokens == [
".",
"@@UNKNOWN@@",
"animals",
"are",
"some_weird_token_1",
"some_weird_token_2",
]
with open(extended_vocab_path / "labels.txt") as f:
labels = [line.strip() for line in f]
labels.sort()
assert labels == ["N", "V"]
def test_dry_run_without_extension(self):
existing_serialization_dir = self.TEST_DIR / "existing"
extended_serialization_dir = self.TEST_DIR / "extended"
existing_vocab_path = existing_serialization_dir / "vocabulary"
extended_vocab_path = extended_serialization_dir / "vocabulary"
vocab = Vocabulary()
# if extend is False, its users responsibility to make sure that dataset instances
# will be indexible by provided vocabulary. At least @@UNKNOWN@@ should be present in
# namespace for which there could be OOV entries seen in dataset during indexing.
# For `tokens` ns, new words will be seen but `tokens` has @@UNKNOWN@@ token.
# but for 'labels' ns, there is no @@UNKNOWN@@ so required to add 'N', 'V' upfront.
vocab.add_token_to_namespace("some_weird_token_1", namespace="tokens")
vocab.add_token_to_namespace("some_weird_token_2", namespace="tokens")
vocab.add_token_to_namespace("N", namespace="labels")
vocab.add_token_to_namespace("V", namespace="labels")
os.makedirs(existing_serialization_dir, exist_ok=True)
vocab.save_to_files(existing_vocab_path)
self.params["vocabulary"] = {}
self.params["vocabulary"]["type"] = "from_files"
self.params["vocabulary"]["directory"] = str(existing_vocab_path)
train_model(self.params, extended_serialization_dir, dry_run=True)
with open(extended_vocab_path / "tokens.txt") as f:
tokens = [line.strip() for line in f]
assert tokens[0] == "@@UNKNOWN@@"
assert tokens[1] == "some_weird_token_1"
assert tokens[2] == "some_weird_token_2"
assert len(tokens) == 3
def test_make_vocab_args(self):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
Train().add_subparser(subparsers)
for serialization_arg in ["-s", "--serialization-dir"]:
raw_args = [
"train",
"path/to/params",
serialization_arg,
"serialization_dir",
"--dry-run",
]
args = parser.parse_args(raw_args)
assert args.func == train_model_from_args
assert args.param_path == "path/to/params"
assert args.serialization_dir == "serialization_dir"
assert args.dry_run
def test_warn_validation_loader_batches_per_epoch(self):
self.params["data_loader"]["batches_per_epoch"] = 3
with pytest.warns(UserWarning, match="batches_per_epoch"):
train_model(self.params, self.TEST_DIR, dry_run=True)
| allennlp-master | tests/commands/train_test.py |
import os
import sys
import pytest
from allennlp.commands import main
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
class TestBuildVocabCommand(AllenNlpTestCase):
def test_build_vocab(self):
output_path = self.TEST_DIR / "vocab.tar.gz"
sys.argv = [
"allennlp",
"build-vocab",
str(self.FIXTURES_ROOT / "basic_classifier" / "experiment_seq2seq.jsonnet"),
str(output_path),
]
main()
assert os.path.exists(output_path)
vocab = Vocabulary.from_files(output_path)
vocab.get_token_index("neg", "labels") == 0
# If we try again, this time we should get a RuntimeError because the vocab archive
# already exists at the output path.
with pytest.raises(RuntimeError, match="already exists"):
main()
# But now if add the '--force' argument, it will override the file.
sys.argv.append("--force")
main()
| allennlp-master | tests/commands/build_vocab_test.py |
import argparse
import json
from typing import Iterator, List, Dict
import torch
from flaky import flaky
import pytest
from allennlp.commands.evaluate import evaluate_from_args, Evaluate, evaluate
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataloader import TensorDict
from allennlp.models import Model
class DummyDataLoader:
def __init__(self, outputs: List[TensorDict]) -> None:
super().__init__()
self._outputs = outputs
def __iter__(self) -> Iterator[TensorDict]:
yield from self._outputs
def __len__(self):
return len(self._outputs)
class DummyModel(Model):
def __init__(self) -> None:
super().__init__(None) # type: ignore
def forward(self, **kwargs) -> Dict[str, torch.Tensor]: # type: ignore
return kwargs
class TestEvaluate(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.parser = argparse.ArgumentParser(description="Testing")
subparsers = self.parser.add_subparsers(title="Commands", metavar="")
Evaluate().add_subparser(subparsers)
def test_evaluate_calculates_average_loss(self):
losses = [7.0, 9.0, 8.0]
outputs = [{"loss": torch.Tensor([loss])} for loss in losses]
data_loader = DummyDataLoader(outputs)
metrics = evaluate(DummyModel(), data_loader, -1, "")
assert metrics["loss"] == pytest.approx(8.0)
def test_evaluate_calculates_average_loss_with_weights(self):
losses = [7.0, 9.0, 8.0]
weights = [10, 2, 1.5]
inputs = zip(losses, weights)
outputs = [
{"loss": torch.Tensor([loss]), "batch_weight": torch.Tensor([weight])}
for loss, weight in inputs
]
data_loader = DummyDataLoader(outputs)
metrics = evaluate(DummyModel(), data_loader, -1, "batch_weight")
assert metrics["loss"] == pytest.approx((70 + 18 + 12) / 13.5)
@flaky
def test_evaluate_from_args(self):
kebab_args = [
"evaluate",
str(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "serialization" / "model.tar.gz"
),
str(self.FIXTURES_ROOT / "data" / "conll2003.txt"),
"--cuda-device",
"-1",
]
args = self.parser.parse_args(kebab_args)
metrics = evaluate_from_args(args)
assert metrics.keys() == {
"accuracy",
"accuracy3",
"precision-overall",
"recall-overall",
"f1-measure-overall",
"loss",
}
def test_output_file_evaluate_from_args(self):
output_file = str(self.TEST_DIR / "metrics.json")
predictions_output_file = str(self.TEST_DIR / "predictions.jsonl")
kebab_args = [
"evaluate",
str(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "serialization" / "model.tar.gz"
),
str(self.FIXTURES_ROOT / "data" / "conll2003.txt"),
"--cuda-device",
"-1",
"--output-file",
output_file,
"--predictions-output-file",
predictions_output_file,
]
args = self.parser.parse_args(kebab_args)
computed_metrics = evaluate_from_args(args)
with open(output_file, "r") as file:
saved_metrics = json.load(file)
assert computed_metrics == saved_metrics
with open(predictions_output_file, "r") as file:
for line in file:
prediction = json.loads(line.strip())
assert "tags" in prediction
def test_evaluate_works_with_vocab_expansion(self):
archive_path = str(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
# snli2 has a extra token ("seahorse") in it.
evaluate_data_path = str(
self.FIXTURES_ROOT / "data" / "text_classification_json" / "imdb_corpus2.jsonl"
)
embeddings_filename = str(
self.FIXTURES_ROOT / "data" / "unawarded_embeddings.gz"
) # has only unawarded vector
embedding_sources_mapping = json.dumps(
{"_text_field_embedder.token_embedder_tokens": embeddings_filename}
)
kebab_args = ["evaluate", archive_path, evaluate_data_path, "--cuda-device", "-1"]
# TODO(mattg): the unawarded_embeddings.gz file above doesn't exist, but this test still
# passes. This suggests that vocab extension in evaluate isn't currently doing anything,
# and so it is broken.
# Evaluate 1 with no vocab expansion,
# Evaluate 2 with vocab expansion with no pretrained embedding file.
# Evaluate 3 with vocab expansion with given pretrained embedding file.
metrics_1 = evaluate_from_args(self.parser.parse_args(kebab_args))
metrics_2 = evaluate_from_args(self.parser.parse_args(kebab_args + ["--extend-vocab"]))
metrics_3 = evaluate_from_args(
self.parser.parse_args(
kebab_args + ["--embedding-sources-mapping", embedding_sources_mapping]
)
)
assert metrics_1 != metrics_2
assert metrics_2 != metrics_3
| allennlp-master | tests/commands/evaluate_test.py |
allennlp-master | tests/commands/__init__.py |
|
from typing import Dict
import torch
from allennlp.commands.train import train_model
from allennlp.common import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models import load_archive, Model
SEQUENCE_TAGGING_DATA_PATH = str(AllenNlpTestCase.FIXTURES_ROOT / "data" / "sequence_tagging.tsv")
@Model.register("constant")
class ConstantModel(Model):
def forward(self, *inputs) -> Dict[str, torch.Tensor]:
return {"class": torch.tensor(98)}
class TestTrain(AllenNlpTestCase):
def test_train_model(self):
params = lambda: Params(
{
"model": {"type": "constant"},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": SEQUENCE_TAGGING_DATA_PATH,
"validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
"data_loader": {"batch_size": 2},
"trainer": {"type": "no_op"},
}
)
serialization_dir = self.TEST_DIR / "serialization_directory"
train_model(params(), serialization_dir=serialization_dir)
archive = load_archive(str(serialization_dir / "model.tar.gz"))
model = archive.model
assert model.forward(torch.tensor([1, 2, 3]))["class"] == torch.tensor(98)
assert model.vocab.get_vocab_size() == 9
| allennlp-master | tests/commands/no_op_train_test.py |
import os
from allennlp.common.testing import AllenNlpTestCase
from allennlp.commands.test_install import _get_module_root
class TestTestInstall(AllenNlpTestCase):
def test_get_module_root(self):
"""
When a user runs `allennlp test-install`, we have no idea where
they're running it from, so we do an `os.chdir` to the _module_
root in order to get all the paths in the fixtures to resolve properly.
The logic within `allennlp test-install` is pretty hard to test in
its entirety, so this test is verifies that the `os.chdir` component
works properly by checking that we correctly find the path to
`os.chdir` to.
"""
project_root = _get_module_root()
assert os.path.exists(os.path.join(project_root, "__main__.py"))
| allennlp-master | tests/commands/test_install_test.py |
import sys
import pytest
from allennlp.commands import main
from allennlp.common.testing import AllenNlpTestCase
class TestCachedPathCommand(AllenNlpTestCase):
def test_local_file(self, capsys):
sys.argv = ["allennlp", "cached-path", "--cache-dir", str(self.TEST_DIR), "README.md"]
main()
captured = capsys.readouterr()
assert "README.md" in captured.out
def test_inspect_empty_cache(self, capsys):
sys.argv = ["allennlp", "cached-path", "--cache-dir", str(self.TEST_DIR), "--inspect"]
main()
captured = capsys.readouterr()
assert "Cached resources:" in captured.out
assert "Total size: 0B" in captured.out
def test_inspect_with_bad_options(self, capsys):
sys.argv = [
"allennlp",
"cached-path",
"--cache-dir",
str(self.TEST_DIR),
"--inspect",
"--extract-archive",
]
with pytest.raises(RuntimeError, match="--extract-archive"):
main()
def test_remove_with_bad_options(self, capsys):
sys.argv = [
"allennlp",
"cached-path",
"--cache-dir",
str(self.TEST_DIR),
"--remove",
"--extract-archive",
"*",
]
with pytest.raises(RuntimeError, match="--extract-archive"):
main()
def test_remove_with_missing_positionals(self, capsys):
sys.argv = [
"allennlp",
"cached-path",
"--cache-dir",
str(self.TEST_DIR),
"--remove",
]
with pytest.raises(RuntimeError, match="Missing positional"):
main()
def test_remove_empty_cache(self, capsys):
sys.argv = [
"allennlp",
"cached-path",
"--cache-dir",
str(self.TEST_DIR),
"--remove",
"*",
]
main()
captured = capsys.readouterr()
assert "Reclaimed 0B of space" in captured.out
| allennlp-master | tests/commands/cached_path_test.py |
import shutil
import sys
import pytest
from overrides import overrides
from allennlp.commands import main
from allennlp.commands.subcommand import Subcommand
from allennlp.common.checks import ConfigurationError
from allennlp.common.plugins import discover_plugins
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import push_python_path, pushd
class TestMain(AllenNlpTestCase):
def test_fails_on_unknown_command(self):
sys.argv = [
"bogus", # command
"unknown_model", # model_name
"bogus file", # input_file
"--output-file",
"bogus out file",
"--silent",
]
with pytest.raises(SystemExit) as cm:
main()
assert cm.value.code == 2 # argparse code for incorrect usage
def test_subcommand_overrides(self):
called = False
def do_nothing(_):
nonlocal called
called = True
@Subcommand.register("evaluate", exist_ok=True)
class FakeEvaluate(Subcommand): # noqa
@overrides
def add_subparser(self, parser):
subparser = parser.add_parser(self.name, description="fake", help="fake help")
subparser.set_defaults(func=do_nothing)
return subparser
sys.argv = ["allennlp", "evaluate"]
main()
assert called
def test_other_modules(self):
# Create a new package in a temporary dir
packagedir = self.TEST_DIR / "testpackage"
packagedir.mkdir()
(packagedir / "__init__.py").touch()
# And add that directory to the path
with push_python_path(self.TEST_DIR):
# Write out a duplicate model there, but registered under a different name.
from allennlp.models import simple_tagger
with open(simple_tagger.__file__) as model_file:
code = model_file.read().replace(
"""@Model.register("simple_tagger")""",
"""@Model.register("duplicate-test-tagger")""",
)
with open(packagedir / "model.py", "w") as new_model_file:
new_model_file.write(code)
# Copy fixture there too.
shutil.copy(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv", self.TEST_DIR)
data_path = str(self.TEST_DIR / "sequence_tagging.tsv")
# Write out config file
config_path = self.TEST_DIR / "config.json"
config_json = """{
"model": {
"type": "duplicate-test-tagger",
"text_field_embedder": {
"token_embedders": {
"tokens": {
"type": "embedding",
"embedding_dim": 5
}
}
},
"encoder": {
"type": "lstm",
"input_size": 5,
"hidden_size": 7,
"num_layers": 2
}
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": "$$$",
"validation_data_path": "$$$",
"data_loader": {"batch_size": 2},
"trainer": {
"num_epochs": 2,
"optimizer": "adam"
}
}""".replace(
"$$$", data_path
)
with open(config_path, "w") as config_file:
config_file.write(config_json)
serialization_dir = self.TEST_DIR / "serialization"
# Run train with using the non-allennlp module.
sys.argv = ["allennlp", "train", str(config_path), "-s", str(serialization_dir)]
# Shouldn't be able to find the model.
with pytest.raises(ConfigurationError):
main()
# Now add the --include-package flag and it should work.
# We also need to add --recover since the output directory already exists.
sys.argv.extend(["--recover", "--include-package", "testpackage"])
main()
# Rewrite out config file, but change a value.
with open(config_path, "w") as new_config_file:
new_config_file.write(config_json.replace('"num_epochs": 2,', '"num_epochs": 4,'))
# This should fail because the config.json does not match that in the serialization directory.
with pytest.raises(ConfigurationError):
main()
def test_file_plugin_loaded(self):
plugins_root = self.FIXTURES_ROOT / "plugins"
sys.argv = ["allennlp"]
available_plugins = set(discover_plugins())
assert available_plugins == set()
with pushd(plugins_root):
main()
subcommands_available = Subcommand.list_available()
assert "d" in subcommands_available
| allennlp-master | tests/commands/main_test.py |
import os
import json
import sys
import pathlib
import tempfile
import io
from contextlib import redirect_stdout
from allennlp.commands import main
from allennlp.common.testing import AllenNlpTestCase
class TestPrintResults(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.out_dir1 = pathlib.Path(tempfile.mkdtemp(prefix="hi"))
self.out_dir2 = pathlib.Path(tempfile.mkdtemp(prefix="hi"))
self.directory1 = self.TEST_DIR / "results1"
self.directory2 = self.TEST_DIR / "results2"
self.directory3 = self.TEST_DIR / "results3"
os.makedirs(self.directory1)
os.makedirs(self.directory2)
os.makedirs(self.directory3)
json.dump(
{"train": 1, "test": 2, "dev": 3},
open(os.path.join(self.directory1 / "metrics.json"), "w+"),
)
json.dump(
{"train": 4, "dev": 5}, open(os.path.join(self.directory2 / "metrics.json"), "w+")
)
json.dump(
{"train": 6, "dev": 7}, open(os.path.join(self.directory3 / "cool_metrics.json"), "w+")
)
def test_print_results(self):
kebab_args = [
"__main__.py",
"print-results",
str(self.TEST_DIR),
"--keys",
"train",
"dev",
"test",
]
sys.argv = kebab_args
with io.StringIO() as buf, redirect_stdout(buf):
main()
output = buf.getvalue()
lines = output.strip().split("\n")
assert lines[0] == "model_run, train, dev, test"
expected_results = {
(str(self.directory1) + "/metrics.json", "1", "3", "2"),
(str(self.directory2) + "/metrics.json", "4", "5", "N/A"),
}
results = {tuple(line.split(", ")) for line in lines[1:]}
assert results == expected_results
def test_print_results_with_metrics_filename(self):
kebab_args = [
"__main__.py",
"print-results",
str(self.TEST_DIR),
"--keys",
"train",
"dev",
"test",
"--metrics-filename",
"cool_metrics.json",
]
sys.argv = kebab_args
with io.StringIO() as buf, redirect_stdout(buf):
main()
output = buf.getvalue()
lines = output.strip().split("\n")
assert lines[0] == "model_run, train, dev, test"
expected_results = {(str(self.directory3) + "/cool_metrics.json", "6", "7", "N/A")}
results = {tuple(line.split(", ")) for line in lines[1:]}
assert results == expected_results
| allennlp-master | tests/commands/print_results_test.py |
from numpy.testing import assert_almost_equal
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.modules import Maxout
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.common.testing import AllenNlpTestCase
class TestMaxout(AllenNlpTestCase):
def test_init_checks_output_dims_consistency(self):
with pytest.raises(ConfigurationError):
Maxout(input_dim=2, num_layers=2, output_dims=[5, 4, 3], pool_sizes=4, dropout=0.0)
def test_init_checks_pool_sizes_consistency(self):
with pytest.raises(ConfigurationError):
Maxout(input_dim=2, num_layers=2, output_dims=5, pool_sizes=[4, 5, 2], dropout=0.0)
def test_init_checks_dropout_consistency(self):
with pytest.raises(ConfigurationError):
Maxout(input_dim=2, num_layers=3, output_dims=5, pool_sizes=4, dropout=[0.2, 0.3])
def test_forward_gives_correct_output(self):
params = Params(
{"input_dim": 2, "output_dims": 3, "pool_sizes": 4, "dropout": 0.0, "num_layers": 2}
)
maxout = Maxout.from_params(params)
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(maxout)
input_tensor = torch.FloatTensor([[-3, 1]])
output = maxout(input_tensor).data.numpy()
assert output.shape == (1, 3)
# This output was checked by hand
# The output of the first maxout layer is [-1, -1, -1], since the
# matrix multiply gives us [-2]*12. Reshaping and maxing
# produces [-2, -2, -2] and the bias increments these values.
# The second layer output is [-2, -2, -2], since the matrix
# matrix multiply gives us [-3]*12. Reshaping and maxing
# produces [-3, -3, -3] and the bias increments these values.
assert_almost_equal(output, [[-2, -2, -2]])
| allennlp-master | tests/modules/maxout_test.py |
import json
import os
import warnings
from typing import List
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
from allennlp.data.token_indexers.single_id_token_indexer import SingleIdTokenIndexer
from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset
from allennlp.data.dataloader import PyTorchDataLoader
from allennlp.modules.elmo import _ElmoBiLm, _ElmoCharacterEncoder, Elmo
from allennlp.modules.token_embedders import ElmoTokenEmbedder
from allennlp.nn.util import remove_sentence_boundaries
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
class ElmoTestCase(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
self.options_file = str(self.elmo_fixtures_path / "options.json")
self.weight_file = str(self.elmo_fixtures_path / "lm_weights.hdf5")
self.sentences_json_file = str(self.elmo_fixtures_path / "sentences.json")
self.sentences_txt_file = str(self.elmo_fixtures_path / "sentences.txt")
def _load_sentences_embeddings(self):
"""
Load the test sentences and the expected LM embeddings.
These files loaded in this method were created with a batch-size of 3.
Due to idiosyncrasies with TensorFlow, the 30 sentences in sentences.json are split into 3 files in which
the k-th sentence in each is from batch k.
This method returns a (sentences, embeddings) pair where each is a list of length batch_size.
Each list contains a sublist with total_sentence_count / batch_size elements. As with the original files,
the k-th element in the sublist is in batch k.
"""
with open(self.sentences_json_file) as fin:
sentences = json.load(fin)
# the expected embeddings
expected_lm_embeddings = []
for k in range(len(sentences)):
embed_fname = os.path.join(self.elmo_fixtures_path, "lm_embeddings_{}.hdf5".format(k))
expected_lm_embeddings.append([])
with h5py.File(embed_fname, "r") as fin:
for i in range(10):
sent_embeds = fin["%s" % i][...]
sent_embeds_concat = numpy.concatenate(
(sent_embeds[0, :, :], sent_embeds[1, :, :]), axis=-1
)
expected_lm_embeddings[-1].append(sent_embeds_concat)
return sentences, expected_lm_embeddings
@staticmethod
def get_vocab_and_both_elmo_indexed_ids(batch: List[List[str]]):
instances = []
indexer = ELMoTokenCharactersIndexer()
indexer2 = SingleIdTokenIndexer()
for sentence in batch:
tokens = [Token(token) for token in sentence]
field = TextField(tokens, {"character_ids": indexer, "tokens": indexer2})
instance = Instance({"elmo": field})
instances.append(instance)
dataset = Batch(instances)
vocab = Vocabulary.from_instances(instances)
dataset.index_instances(vocab)
return vocab, dataset.as_tensor_dict()["elmo"]
class TestElmoBiLm(ElmoTestCase):
def test_elmo_bilm(self):
# get the raw data
sentences, expected_lm_embeddings = self._load_sentences_embeddings()
# load the test model
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file)
# Deal with the data.
indexer = ELMoTokenCharactersIndexer()
# For each sentence, first create a TextField, then create an instance
instances = []
for batch in zip(*sentences):
for sentence in batch:
tokens = [Token(token) for token in sentence.split()]
field = TextField(tokens, {"character_ids": indexer})
instance = Instance({"elmo": field})
instances.append(instance)
vocab = Vocabulary()
dataset = AllennlpDataset(instances, vocab)
# Now finally we can iterate through batches.
loader = PyTorchDataLoader(dataset, 3)
for i, batch in enumerate(loader):
lm_embeddings = elmo_bilm(batch["elmo"]["character_ids"]["elmo_tokens"])
top_layer_embeddings, mask = remove_sentence_boundaries(
lm_embeddings["activations"][2], lm_embeddings["mask"]
)
# check the mask lengths
lengths = mask.data.numpy().sum(axis=1)
batch_sentences = [sentences[k][i] for k in range(3)]
expected_lengths = [len(sentence.split()) for sentence in batch_sentences]
assert lengths.tolist() == expected_lengths
# get the expected embeddings and compare!
expected_top_layer = [expected_lm_embeddings[k][i] for k in range(3)]
for k in range(3):
assert numpy.allclose(
top_layer_embeddings[k, : lengths[k], :].data.numpy(),
expected_top_layer[k],
atol=1.0e-6,
)
def test_elmo_char_cnn_cache_does_not_raise_error_for_uncached_words(self):
sentences = [["This", "is", "OOV"], ["so", "is", "this"]]
in_vocab_sentences = [["here", "is"], ["a", "vocab"]]
oov_tensor = self.get_vocab_and_both_elmo_indexed_ids(sentences)[1]
vocab, in_vocab_tensor = self.get_vocab_and_both_elmo_indexed_ids(in_vocab_sentences)
words_to_cache = list(vocab.get_token_to_index_vocabulary("tokens").keys())
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file, vocab_to_cache=words_to_cache)
elmo_bilm(
in_vocab_tensor["character_ids"]["elmo_tokens"], in_vocab_tensor["tokens"]["tokens"]
)
elmo_bilm(oov_tensor["character_ids"]["elmo_tokens"], oov_tensor["tokens"]["tokens"])
def test_elmo_bilm_can_cache_char_cnn_embeddings(self):
sentences = [["This", "is", "a", "sentence"], ["Here", "'s", "one"], ["Another", "one"]]
vocab, tensor = self.get_vocab_and_both_elmo_indexed_ids(sentences)
words_to_cache = list(vocab.get_token_to_index_vocabulary("tokens").keys())
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file)
elmo_bilm.eval()
no_cache = elmo_bilm(
tensor["character_ids"]["elmo_tokens"], tensor["character_ids"]["elmo_tokens"]
)
# ELMo is stateful, so we need to actually re-initialise it for this comparison to work.
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file, vocab_to_cache=words_to_cache)
elmo_bilm.eval()
cached = elmo_bilm(tensor["character_ids"]["elmo_tokens"], tensor["tokens"]["tokens"])
numpy.testing.assert_array_almost_equal(
no_cache["mask"].data.cpu().numpy(), cached["mask"].data.cpu().numpy()
)
for activation_cached, activation in zip(cached["activations"], no_cache["activations"]):
numpy.testing.assert_array_almost_equal(
activation_cached.data.cpu().numpy(), activation.data.cpu().numpy(), decimal=6
)
class TestElmo(ElmoTestCase):
def setup_method(self):
super().setup_method()
self.elmo = Elmo(self.options_file, self.weight_file, 2, dropout=0.0)
def _sentences_to_ids(self, sentences):
indexer = ELMoTokenCharactersIndexer()
# For each sentence, first create a TextField, then create an instance
instances = []
for sentence in sentences:
tokens = [Token(token) for token in sentence]
field = TextField(tokens, {"character_ids": indexer})
instance = Instance({"elmo": field})
instances.append(instance)
dataset = Batch(instances)
vocab = Vocabulary()
dataset.index_instances(vocab)
return dataset.as_tensor_dict()["elmo"]["character_ids"]["elmo_tokens"]
def test_elmo(self):
# Correctness checks are in ElmoBiLm and ScalarMix, here we just add a shallow test
# to ensure things execute.
sentences = [
["The", "sentence", "."],
["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
]
character_ids = self._sentences_to_ids(sentences)
output = self.elmo(character_ids)
elmo_representations = output["elmo_representations"]
mask = output["mask"]
assert len(elmo_representations) == 2
assert list(elmo_representations[0].size()) == [2, 7, 32]
assert list(elmo_representations[1].size()) == [2, 7, 32]
assert list(mask.size()) == [2, 7]
def test_elmo_keep_sentence_boundaries(self):
sentences = [
["The", "sentence", "."],
["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
]
elmo = Elmo(
self.options_file, self.weight_file, 2, dropout=0.0, keep_sentence_boundaries=True
)
character_ids = self._sentences_to_ids(sentences)
output = elmo(character_ids)
elmo_representations = output["elmo_representations"]
mask = output["mask"]
assert len(elmo_representations) == 2
# Add 2 to the lengths because we're keeping the start and end of sentence tokens.
assert list(elmo_representations[0].size()) == [2, 7 + 2, 32]
assert list(elmo_representations[1].size()) == [2, 7 + 2, 32]
assert list(mask.size()) == [2, 7 + 2]
def test_elmo_4D_input(self):
sentences = [
[
["The", "sentence", "."],
["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
],
[["1", "2"], ["1", "2", "3", "4", "5", "6", "7"]],
[["1", "2", "3", "4", "50", "60", "70"], ["The"]],
]
all_character_ids = []
for batch_sentences in sentences:
all_character_ids.append(self._sentences_to_ids(batch_sentences))
# (2, 3, 7, 50)
character_ids = torch.cat([ids.unsqueeze(1) for ids in all_character_ids], dim=1)
embeddings_4d = self.elmo(character_ids)
# Run the individual batches.
embeddings_3d = []
for char_ids in all_character_ids:
self.elmo._elmo_lstm._elmo_lstm.reset_states()
embeddings_3d.append(self.elmo(char_ids))
for k in range(3):
numpy.testing.assert_array_almost_equal(
embeddings_4d["elmo_representations"][0][:, k, :, :].data.numpy(),
embeddings_3d[k]["elmo_representations"][0].data.numpy(),
)
def test_elmo_with_module(self):
# We will create the _ElmoBilm class and pass it in as a module.
sentences = [
["The", "sentence", "."],
["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
]
character_ids = self._sentences_to_ids(sentences)
elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file)
elmo = Elmo(None, None, 2, dropout=0.0, module=elmo_bilm)
output = elmo(character_ids)
elmo_representations = output["elmo_representations"]
assert len(elmo_representations) == 2
for k in range(2):
assert list(elmo_representations[k].size()) == [2, 7, 32]
def test_elmo_bilm_can_handle_higher_dimensional_input_with_cache(self):
sentences = [["This", "is", "a", "sentence"], ["Here", "'s", "one"], ["Another", "one"]]
vocab, tensor = self.get_vocab_and_both_elmo_indexed_ids(sentences)
words_to_cache = list(vocab.get_token_to_index_vocabulary("tokens").keys())
elmo_bilm = Elmo(self.options_file, self.weight_file, 1, vocab_to_cache=words_to_cache)
elmo_bilm.eval()
individual_dim = elmo_bilm(
tensor["character_ids"]["elmo_tokens"], tensor["tokens"]["tokens"]
)
elmo_bilm = Elmo(self.options_file, self.weight_file, 1, vocab_to_cache=words_to_cache)
elmo_bilm.eval()
expanded_word_ids = torch.stack([tensor["tokens"]["tokens"] for _ in range(4)], dim=1)
expanded_char_ids = torch.stack(
[tensor["character_ids"]["elmo_tokens"] for _ in range(4)], dim=1
)
expanded_result = elmo_bilm(expanded_char_ids, expanded_word_ids)
split_result = [
x.squeeze(1) for x in torch.split(expanded_result["elmo_representations"][0], 1, dim=1)
]
for expanded in split_result:
numpy.testing.assert_array_almost_equal(
expanded.data.cpu().numpy(),
individual_dim["elmo_representations"][0].data.cpu().numpy(),
)
class TestElmoRequiresGrad(ElmoTestCase):
def _run_test(self, requires_grad):
embedder = ElmoTokenEmbedder(
self.options_file, self.weight_file, requires_grad=requires_grad
)
batch_size = 3
seq_len = 4
char_ids = torch.from_numpy(numpy.random.randint(0, 262, (batch_size, seq_len, 50)))
embeddings = embedder(char_ids)
loss = embeddings.sum()
loss.backward()
elmo_grads = [
param.grad for name, param in embedder.named_parameters() if "_elmo_lstm" in name
]
if requires_grad:
# None of the elmo grads should be None.
assert all(grad is not None for grad in elmo_grads)
else:
# All of the elmo grads should be None.
assert all(grad is None for grad in elmo_grads)
def test_elmo_requires_grad(self):
self._run_test(True)
def test_elmo_does_not_require_grad(self):
self._run_test(False)
class TestElmoTokenRepresentation(ElmoTestCase):
def test_elmo_token_representation(self):
# Load the test words and convert to char ids
with open(os.path.join(self.elmo_fixtures_path, "vocab_test.txt"), "r") as fin:
words = fin.read().strip().split("\n")
vocab = Vocabulary()
indexer = ELMoTokenCharactersIndexer()
tokens = [Token(word) for word in words]
indices = indexer.tokens_to_indices(tokens, vocab)
# There are 457 tokens. Reshape into 10 batches of 50 tokens.
sentences = []
for k in range(10):
char_indices = indices["elmo_tokens"][(k * 50) : ((k + 1) * 50)]
sentences.append(
indexer.as_padded_tensor_dict(
{"elmo_tokens": char_indices}, padding_lengths={"elmo_tokens": 50}
)["elmo_tokens"]
)
batch = torch.stack(sentences)
elmo_token_embedder = _ElmoCharacterEncoder(self.options_file, self.weight_file)
elmo_token_embedder_output = elmo_token_embedder(batch)
# Reshape back to a list of words and compare with ground truth. Need to also
# remove <S>, </S>
actual_embeddings = remove_sentence_boundaries(
elmo_token_embedder_output["token_embedding"], elmo_token_embedder_output["mask"]
)[0].data.numpy()
actual_embeddings = actual_embeddings.reshape(-1, actual_embeddings.shape[-1])
embedding_file = os.path.join(self.elmo_fixtures_path, "elmo_token_embeddings.hdf5")
with h5py.File(embedding_file, "r") as fin:
expected_embeddings = fin["embedding"][...]
assert numpy.allclose(actual_embeddings[: len(tokens)], expected_embeddings, atol=1e-6)
def test_elmo_token_representation_bos_eos(self):
# The additional <S> and </S> embeddings added by the embedder should be as expected.
indexer = ELMoTokenCharactersIndexer()
elmo_token_embedder = _ElmoCharacterEncoder(self.options_file, self.weight_file)
for correct_index, token in [[0, "<S>"], [2, "</S>"]]:
indices = indexer.tokens_to_indices([Token(token)], Vocabulary())
indices = torch.from_numpy(numpy.array(indices["elmo_tokens"])).view(1, 1, -1)
embeddings = elmo_token_embedder(indices)["token_embedding"]
assert numpy.allclose(
embeddings[0, correct_index, :].data.numpy(), embeddings[0, 1, :].data.numpy()
)
| allennlp-master | tests/modules/elmo_test.py |
import numpy
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
from allennlp.common.testing import AllenNlpTestCase
class TestStackedAlternatingLstm(AllenNlpTestCase):
def test_stacked_alternating_lstm_completes_forward_pass(self):
input_tensor = torch.rand(4, 5, 3)
input_tensor[1, 4:, :] = 0.0
input_tensor[2, 2:, :] = 0.0
input_tensor[3, 1:, :] = 0.0
input_tensor = pack_padded_sequence(input_tensor, [5, 4, 2, 1], batch_first=True)
lstm = StackedAlternatingLstm(3, 7, 3)
output, _ = lstm(input_tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 2:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 1:, :].numpy(), 0.0)
def test_lstms_are_interleaved(self):
lstm = StackedAlternatingLstm(3, 7, 8)
for i, layer in enumerate(lstm.lstm_layers):
if i % 2 == 0:
assert layer.go_forward
else:
assert not layer.go_forward
| allennlp-master | tests/modules/stacked_alternating_lstm_test.py |
import pytest
import numpy
import torch
import torch.nn.init
from torch.nn.modules.rnn import LSTM
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.augmented_lstm import AugmentedLstm, AugmentedLSTMCell, BiAugmentedLstm
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.nn.util import sort_batch_by_length
class TestAugmentedLSTM(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
tensor = torch.rand([5, 7, 10])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, 6:, :] = 0
sequence_lengths = torch.LongTensor([3, 4, 2, 6, 7])
self.random_tensor = tensor
self.sequence_lengths = sequence_lengths
def test_variable_length_sequences_return_correctly_padded_outputs(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
tensor = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
lstm = AugmentedLstm(10, 11)
output, _ = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 6:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 3:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[4, 2:, :].numpy(), 0.0)
def test_variable_length_sequences_run_backward_return_correctly_padded_outputs(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
tensor = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
lstm = AugmentedLstm(10, 11, go_forward=False)
output, _ = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 6:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 3:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[4, 2:, :].numpy(), 0.0)
def test_augmented_lstm_computes_same_function_as_pytorch_lstm(self):
augmented_lstm = AugmentedLstm(10, 11)
pytorch_lstm = LSTM(10, 11, num_layers=1, batch_first=True)
# Initialize all weights to be == 1.
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(augmented_lstm)
initializer(pytorch_lstm)
initial_state = torch.zeros([1, 5, 11])
initial_memory = torch.zeros([1, 5, 11])
# Use bigger numbers to avoid floating point instability.
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor * 5.0, self.sequence_lengths
)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
augmented_output, augmented_state = augmented_lstm(
lstm_input, (initial_state, initial_memory)
)
pytorch_output, pytorch_state = pytorch_lstm(lstm_input, (initial_state, initial_memory))
pytorch_output_sequence, _ = pad_packed_sequence(pytorch_output, batch_first=True)
augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)
numpy.testing.assert_array_almost_equal(
pytorch_output_sequence.data.numpy(), augmented_output_sequence.data.numpy(), decimal=4
)
numpy.testing.assert_array_almost_equal(
pytorch_state[0].data.numpy(), augmented_state[0].data.numpy(), decimal=4
)
numpy.testing.assert_array_almost_equal(
pytorch_state[1].data.numpy(), augmented_state[1].data.numpy(), decimal=4
)
def test_augmented_lstm_works_with_highway_connections(self):
augmented_lstm = AugmentedLstm(10, 11, use_highway=True)
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
augmented_lstm(lstm_input)
def test_augmented_lstm_throws_error_on_non_packed_sequence_input(self):
lstm = AugmentedLstm(3, 5)
tensor = torch.rand([5, 7, 9])
with pytest.raises(ConfigurationError):
lstm(tensor)
def test_augmented_lstm_is_initialized_with_correct_biases(self):
lstm = AugmentedLSTMCell(2, 3)
true_state_bias = numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
numpy.testing.assert_array_equal(lstm.state_linearity.bias.data.numpy(), true_state_bias)
# Non-highway case.
lstm = AugmentedLSTMCell(2, 3, use_highway=False)
true_state_bias = numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0])
numpy.testing.assert_array_equal(lstm.state_linearity.bias.data.numpy(), true_state_bias)
def test_dropout_is_not_applied_to_output_or_returned_hidden_states(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
tensor = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
lstm = AugmentedLstm(10, 11, recurrent_dropout_probability=0.5)
output, (hidden_state, _) = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
# Test returned output sequence
num_hidden_dims_zero_across_timesteps = ((output_sequence.sum(1) == 0).sum()).item()
# If this is not True then dropout has been applied to the output of the LSTM
assert not num_hidden_dims_zero_across_timesteps
# Should not have dropout applied to the last hidden state as this is not used
# within the LSTM and makes it more consistent with the `torch.nn.LSTM` where
# dropout is not applied to any of it's output. This would also make it more
# consistent with the Keras LSTM implementation as well.
hidden_state = hidden_state.squeeze()
num_hidden_dims_zero_across_timesteps = ((hidden_state == 0).sum()).item()
assert not num_hidden_dims_zero_across_timesteps
def test_dropout_version_is_different_to_no_dropout(self):
augmented_lstm = AugmentedLstm(10, 11)
dropped_augmented_lstm = AugmentedLstm(10, 11, recurrent_dropout_probability=0.9)
# Initialize all weights to be == 1.
constant_init = Initializer.from_params(Params({"type": "constant", "val": 0.5}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(augmented_lstm)
initializer(dropped_augmented_lstm)
initial_state = torch.randn([1, 5, 11])
initial_memory = torch.randn([1, 5, 11])
# If we use too bigger number like in the PyTorch test the dropout has no affect
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
augmented_output, augmented_state = augmented_lstm(
lstm_input, (initial_state, initial_memory)
)
dropped_output, dropped_state = dropped_augmented_lstm(
lstm_input, (initial_state, initial_memory)
)
dropped_output_sequence, _ = pad_packed_sequence(dropped_output, batch_first=True)
augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_output_sequence.data.numpy(),
augmented_output_sequence.data.numpy(),
decimal=4,
)
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_state[0].data.numpy(), augmented_state[0].data.numpy(), decimal=4
)
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_state[1].data.numpy(), augmented_state[1].data.numpy(), decimal=4
)
def test_biaugmented_lstm(self):
for bidirectional in [True, False]:
bi_augmented_lstm = BiAugmentedLstm(
10, 11, 3, recurrent_dropout_probability=0.1, bidirectional=bidirectional
)
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
self.random_tensor, self.sequence_lengths
)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
bi_augmented_lstm(lstm_input)
| allennlp-master | tests/modules/augmented_lstm_test.py |
import torch
from allennlp.common import Params
from allennlp.modules import BiMpmMatching
from allennlp.common.testing import AllenNlpTestCase
class TestBiMPMMatching(AllenNlpTestCase):
def test_forward(self):
batch = 16
len1, len2 = 21, 24
seq_len1 = torch.randint(low=len1 - 10, high=len1 + 1, size=(batch,)).long()
seq_len2 = torch.randint(low=len2 - 10, high=len2 + 1, size=(batch,)).long()
mask1 = []
for w in seq_len1:
mask1.append([1] * w.item() + [0] * (len1 - w.item()))
mask1 = torch.tensor(mask1, dtype=torch.bool)
mask2 = []
for w in seq_len2:
mask2.append([1] * w.item() + [0] * (len2 - w.item()))
mask2 = torch.tensor(mask2, dtype=torch.bool)
d = 200 # hidden dimension
n = 20 # number of perspective
test1 = torch.randn(batch, len1, d)
test2 = torch.randn(batch, len2, d)
test1 = test1 * mask1.view(-1, len1, 1).expand(-1, len1, d)
test2 = test2 * mask2.view(-1, len2, 1).expand(-1, len2, d)
test1_fw, test1_bw = torch.split(test1, d // 2, dim=-1)
test2_fw, test2_bw = torch.split(test2, d // 2, dim=-1)
ml_fw = BiMpmMatching.from_params(Params({"is_forward": True, "num_perspectives": n}))
ml_bw = BiMpmMatching.from_params(Params({"is_forward": False, "num_perspectives": n}))
vecs_p_fw, vecs_h_fw = ml_fw(test1_fw, mask1, test2_fw, mask2)
vecs_p_bw, vecs_h_bw = ml_bw(test1_bw, mask1, test2_bw, mask2)
vecs_p, vecs_h = (
torch.cat(vecs_p_fw + vecs_p_bw, dim=2),
torch.cat(vecs_h_fw + vecs_h_bw, dim=2),
)
assert vecs_p.size() == torch.Size([batch, len1, 10 + 10 * n])
assert vecs_h.size() == torch.Size([batch, len2, 10 + 10 * n])
assert (
ml_fw.get_output_dim()
== ml_bw.get_output_dim()
== vecs_p.size(2) // 2
== vecs_h.size(2) // 2
)
| allennlp-master | tests/modules/bimpm_matching_test.py |
from numpy.testing import assert_almost_equal
import inspect
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.modules import FeedForward
from allennlp.nn import InitializerApplicator, Initializer, Activation
from allennlp.common.testing import AllenNlpTestCase
class TestFeedForward(AllenNlpTestCase):
def test_can_construct_from_params(self):
params = Params({"input_dim": 2, "hidden_dims": 3, "activations": "relu", "num_layers": 2})
feedforward = FeedForward.from_params(params)
assert len(feedforward._activations) == 2
assert [isinstance(a, torch.nn.ReLU) for a in feedforward._activations]
assert len(feedforward._linear_layers) == 2
assert [layer.weight.size(-1) == 3 for layer in feedforward._linear_layers]
params = Params(
{
"input_dim": 2,
"hidden_dims": [3, 4, 5],
"activations": ["relu", "relu", "linear"],
"dropout": 0.2,
"num_layers": 3,
}
)
feedforward = FeedForward.from_params(params)
assert len(feedforward._activations) == 3
assert isinstance(feedforward._activations[0], torch.nn.ReLU)
assert isinstance(feedforward._activations[1], torch.nn.ReLU)
# It's hard to check that the last activation is the lambda function we use for `linear`,
# so this is good enough.
assert not isinstance(feedforward._activations[2], torch.nn.ReLU)
assert len(feedforward._linear_layers) == 3
assert feedforward._linear_layers[0].weight.size(0) == 3
assert feedforward._linear_layers[1].weight.size(0) == 4
assert feedforward._linear_layers[2].weight.size(0) == 5
assert len(feedforward._dropout) == 3
assert [d.p == 0.2 for d in feedforward._dropout]
def test_init_checks_hidden_dim_consistency(self):
with pytest.raises(ConfigurationError):
FeedForward(2, 4, [5, 5], Activation.by_name("relu")())
def test_init_checks_activation_consistency(self):
with pytest.raises(ConfigurationError):
FeedForward(2, 4, 5, [Activation.by_name("relu")(), Activation.by_name("relu")()])
def test_forward_gives_correct_output(self):
params = Params({"input_dim": 2, "hidden_dims": 3, "activations": "relu", "num_layers": 2})
feedforward = FeedForward.from_params(params)
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(feedforward)
input_tensor = torch.FloatTensor([[-3, 1]])
output = feedforward(input_tensor).data.numpy()
assert output.shape == (1, 3)
# This output was checked by hand - ReLU makes output after first hidden layer [0, 0, 0],
# which then gets a bias added in the second layer to be [1, 1, 1].
assert_almost_equal(output, [[1, 1, 1]])
def test_textual_representation_contains_activations(self):
params = Params(
{
"input_dim": 2,
"hidden_dims": 3,
"activations": ["linear", "relu", "swish"],
"num_layers": 3,
}
)
feedforward = FeedForward.from_params(params)
expected_text_representation = inspect.cleandoc(
"""
FeedForward(
(_activations): ModuleList(
(0): Linear()
(1): ReLU()
(2): Swish()
)
(_linear_layers): ModuleList(
(0): Linear(in_features=2, out_features=3, bias=True)
(1): Linear(in_features=3, out_features=3, bias=True)
(2): Linear(in_features=3, out_features=3, bias=True)
)
(_dropout): ModuleList(
(0): Dropout(p=0.0, inplace=False)
(1): Dropout(p=0.0, inplace=False)
(2): Dropout(p=0.0, inplace=False)
)
)
"""
)
actual_text_representation = str(feedforward)
assert actual_text_representation == expected_text_representation
| allennlp-master | tests/modules/feedforward_test.py |
import torch
import pytest
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.modules import ScalarMix
from allennlp.nn import util
class TestScalarMix(AllenNlpTestCase):
def test_scalar_mix_can_run_forward(self):
mixture = ScalarMix(3)
tensors = [torch.randn([3, 4, 5]) for _ in range(3)]
for k in range(3):
mixture.scalar_parameters[k].data[0] = 0.1 * (k + 1)
mixture.gamma.data[0] = 0.5
result = mixture(tensors)
weights = [0.1, 0.2, 0.3]
normed_weights = numpy.exp(weights) / numpy.sum(numpy.exp(weights))
expected_result = sum(normed_weights[k] * tensors[k].data.numpy() for k in range(3))
expected_result *= 0.5
numpy.testing.assert_almost_equal(expected_result, result.data.numpy())
def test_scalar_mix_throws_error_on_incorrect_number_of_inputs(self):
mixture = ScalarMix(3)
tensors = [torch.randn([3, 4, 5]) for _ in range(5)]
with pytest.raises(ConfigurationError):
_ = mixture(tensors)
def test_scalar_mix_throws_error_on_incorrect_initial_scalar_parameters_length(self):
with pytest.raises(ConfigurationError):
ScalarMix(3, initial_scalar_parameters=[0.0, 0.0])
def test_scalar_mix_trainable_with_initial_scalar_parameters(self):
initial_scalar_parameters = [1.0, 2.0, 3.0]
mixture = ScalarMix(3, initial_scalar_parameters=initial_scalar_parameters, trainable=False)
for i, scalar_mix_parameter in enumerate(mixture.scalar_parameters):
assert scalar_mix_parameter.requires_grad is False
assert scalar_mix_parameter.item() == initial_scalar_parameters[i]
def test_scalar_mix_layer_norm(self):
mixture = ScalarMix(3, do_layer_norm="scalar_norm_reg")
tensors = [torch.randn([3, 4, 5]) for _ in range(3)]
numpy_mask = numpy.ones((3, 4), dtype="int32")
numpy_mask[1, 2:] = 0
mask = torch.from_numpy(numpy_mask).bool()
weights = [0.1, 0.2, 0.3]
for k in range(3):
mixture.scalar_parameters[k].data[0] = weights[k]
mixture.gamma.data[0] = 0.5
result = mixture(tensors, mask)
normed_weights = numpy.exp(weights) / numpy.sum(numpy.exp(weights))
expected_result = numpy.zeros((3, 4, 5))
for k in range(3):
mean = numpy.mean(tensors[k].data.numpy()[numpy_mask == 1])
std = numpy.std(tensors[k].data.numpy()[numpy_mask == 1])
normed_tensor = (tensors[k].data.numpy() - mean) / (
std + util.tiny_value_of_dtype(torch.float)
)
expected_result += normed_tensor * normed_weights[k]
expected_result *= 0.5
numpy.testing.assert_almost_equal(expected_result, result.data.numpy(), decimal=6)
| allennlp-master | tests/modules/scalar_mix_test.py |
import numpy
import pytest
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from allennlp.modules.stacked_bidirectional_lstm import StackedBidirectionalLstm
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.common.params import Params
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.nn.util import sort_batch_by_length
class TestStackedBidirectionalLstm:
def test_stacked_bidirectional_lstm_completes_forward_pass(self):
input_tensor = torch.rand(4, 5, 3)
input_tensor[1, 4:, :] = 0.0
input_tensor[2, 2:, :] = 0.0
input_tensor[3, 1:, :] = 0.0
input_tensor = pack_padded_sequence(input_tensor, [5, 4, 2, 1], batch_first=True)
lstm = StackedBidirectionalLstm(3, 7, 3)
output, _ = lstm(input_tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 2:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 1:, :].numpy(), 0.0)
def test_stacked_bidirectional_lstm_can_build_from_params(self):
params = Params(
{
"type": "stacked_bidirectional_lstm",
"input_size": 5,
"hidden_size": 9,
"num_layers": 3,
}
)
encoder = Seq2SeqEncoder.from_params(params)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 18
assert encoder.is_bidirectional
def test_stacked_bidirectional_lstm_can_build_from_params_seq2vec(self):
params = Params(
{
"type": "stacked_bidirectional_lstm",
"input_size": 5,
"hidden_size": 9,
"num_layers": 3,
}
)
encoder = Seq2VecEncoder.from_params(params)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 18
def test_stacked_bidirectional_lstm_can_complete_forward_pass_seq2vec(self):
params = Params(
{
"type": "stacked_bidirectional_lstm",
"input_size": 3,
"hidden_size": 9,
"num_layers": 3,
}
)
encoder = Seq2VecEncoder.from_params(params)
input_tensor = torch.rand(4, 5, 3)
mask = torch.ones(4, 5).bool()
output = encoder(input_tensor, mask)
assert output.detach().numpy().shape == (4, 18)
@pytest.mark.parametrize(
"dropout_name", ("layer_dropout_probability", "recurrent_dropout_probability")
)
def test_stacked_bidirectional_lstm_dropout_version_is_different(self, dropout_name: str):
stacked_lstm = StackedBidirectionalLstm(input_size=10, hidden_size=11, num_layers=3)
if dropout_name == "layer_dropout_probability":
dropped_stacked_lstm = StackedBidirectionalLstm(
input_size=10, hidden_size=11, num_layers=3, layer_dropout_probability=0.9
)
elif dropout_name == "recurrent_dropout_probability":
dropped_stacked_lstm = StackedBidirectionalLstm(
input_size=10, hidden_size=11, num_layers=3, recurrent_dropout_probability=0.9
)
else:
raise ValueError("Do not recognise the following dropout name " f"{dropout_name}")
# Initialize all weights to be == 1.
constant_init = Initializer.from_params(Params({"type": "constant", "val": 0.5}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(stacked_lstm)
initializer(dropped_stacked_lstm)
initial_state = torch.randn([3, 5, 11])
initial_memory = torch.randn([3, 5, 11])
tensor = torch.rand([5, 7, 10])
sequence_lengths = torch.LongTensor([7, 7, 7, 7, 7])
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(tensor, sequence_lengths)
lstm_input = pack_padded_sequence(
sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
)
stacked_output, stacked_state = stacked_lstm(lstm_input, (initial_state, initial_memory))
dropped_output, dropped_state = dropped_stacked_lstm(
lstm_input, (initial_state, initial_memory)
)
dropped_output_sequence, _ = pad_packed_sequence(dropped_output, batch_first=True)
stacked_output_sequence, _ = pad_packed_sequence(stacked_output, batch_first=True)
if dropout_name == "layer_dropout_probability":
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_output_sequence.data.numpy(),
stacked_output_sequence.data.numpy(),
decimal=4,
)
if dropout_name == "recurrent_dropout_probability":
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_state[0].data.numpy(), stacked_state[0].data.numpy(), decimal=4
)
with pytest.raises(AssertionError):
numpy.testing.assert_array_almost_equal(
dropped_state[1].data.numpy(), stacked_state[1].data.numpy(), decimal=4
)
| allennlp-master | tests/modules/stacked_bidirectional_lstm_test.py |
import numpy as np
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.masked_layer_norm import MaskedLayerNorm
from allennlp.nn import util
class TestMaskedLayerNorm(AllenNlpTestCase):
def test_masked_layer_norm(self):
x_n = np.random.rand(2, 3, 7)
mask_n = np.array([[1, 1, 0], [1, 1, 1]])
x = torch.from_numpy(x_n).float()
mask = torch.from_numpy(mask_n).bool()
layer_norm = MaskedLayerNorm(7, gamma0=0.2)
normed_x = layer_norm(x, mask)
N = 7 * 5
mean = (x_n * np.expand_dims(mask_n, axis=-1)).sum() / N
std = np.sqrt(
(((x_n - mean) * np.expand_dims(mask_n, axis=-1)) ** 2).sum() / N
+ util.tiny_value_of_dtype(torch.float)
)
expected = 0.2 * (x_n - mean) / (std + util.tiny_value_of_dtype(torch.float))
assert np.allclose(normed_x.data.numpy(), expected)
| allennlp-master | tests/modules/masked_layer_norm_test.py |
import pytest
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.modules import Seq2VecEncoder
from allennlp.common.testing import AllenNlpTestCase
class TestSeq2VecEncoder(AllenNlpTestCase):
def test_from_params_builders_encoder_correctly(self):
# We're just making sure parameters get passed through correctly here, and that the basic
# API works.
params = Params(
{
"type": "lstm",
"bidirectional": True,
"num_layers": 3,
"input_size": 5,
"hidden_size": 7,
}
)
encoder = Seq2VecEncoder.from_params(params)
assert encoder.__class__.__name__ == "LstmSeq2VecEncoder"
assert encoder._module.__class__.__name__ == "LSTM"
assert encoder._module.num_layers == 3
assert encoder._module.input_size == 5
assert encoder._module.hidden_size == 7
assert encoder._module.bidirectional is True
assert encoder._module.batch_first is True
def test_from_params_requires_batch_first(self):
params = Params({"type": "lstm", "batch_first": False})
with pytest.raises(ConfigurationError):
Seq2VecEncoder.from_params(params)
| allennlp-master | tests/modules/seq2vec_encoder_test.py |
import numpy
import torch
from allennlp.modules.elmo_lstm import ElmoLstm
from allennlp.common.testing import AllenNlpTestCase
class TestElmoLstmCell(AllenNlpTestCase):
def test_elmo_lstm(self):
input_tensor = torch.rand(4, 5, 3)
input_tensor[1, 4:, :] = 0.0
input_tensor[2, 2:, :] = 0.0
input_tensor[3, 1:, :] = 0.0
mask = torch.ones([4, 5]).bool()
mask[1, 4:] = False
mask[2, 2:] = False
mask[3, 1:] = False
lstm = ElmoLstm(
num_layers=2,
input_size=3,
hidden_size=5,
cell_size=7,
memory_cell_clip_value=2,
state_projection_clip_value=1,
)
output_sequence = lstm(input_tensor, mask)
# Check all the layer outputs are masked properly.
numpy.testing.assert_array_equal(output_sequence.data[:, 1, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[:, 2, 2:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[:, 3, 1:, :].numpy(), 0.0)
# LSTM state should be (num_layers, batch_size, hidden_size)
assert list(lstm._states[0].size()) == [2, 4, 10]
# LSTM memory cell should be (num_layers, batch_size, cell_size)
assert list((lstm._states[1].size())) == [2, 4, 14]
| allennlp-master | tests/modules/stacked_elmo_lstm_test.py |
from typing import Tuple
import torch
import numpy as np
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.sampled_softmax_loss import _choice, SampledSoftmaxLoss
from allennlp.modules import SoftmaxLoss
class TestSampledSoftmaxLoss(AllenNlpTestCase):
def test_choice(self):
sample, num_tries = _choice(num_words=1000, num_samples=50)
assert len(set(sample)) == 50
assert all(0 <= x < 1000 for x in sample)
assert num_tries >= 50
def test_sampled_softmax_can_run(self):
softmax = SampledSoftmaxLoss(num_words=1000, embedding_dim=12, num_samples=50)
# sequence_length, embedding_dim
embedding = torch.rand(100, 12)
targets = torch.randint(0, 1000, (100,)).long()
_ = softmax(embedding, targets)
def test_sampled_equals_unsampled_during_eval(self):
sampled_softmax = SampledSoftmaxLoss(num_words=10000, embedding_dim=12, num_samples=40)
unsampled_softmax = SoftmaxLoss(num_words=10000, embedding_dim=12)
sampled_softmax.eval()
unsampled_softmax.eval()
# set weights equal, use transpose because opposite shapes
sampled_softmax.softmax_w.data = unsampled_softmax.softmax_w.t()
sampled_softmax.softmax_b.data = unsampled_softmax.softmax_b
# sequence_length, embedding_dim
embedding = torch.rand(100, 12)
targets = torch.randint(0, 1000, (100,)).long()
full_loss = unsampled_softmax(embedding, targets).item()
sampled_loss = sampled_softmax(embedding, targets).item()
# Should be really close
np.testing.assert_almost_equal(sampled_loss, full_loss)
def test_sampled_softmax_has_greater_loss_in_train_mode(self):
sampled_softmax = SampledSoftmaxLoss(num_words=10000, embedding_dim=12, num_samples=10)
# sequence_length, embedding_dim
embedding = torch.rand(100, 12)
targets = torch.randint(0, 1000, (100,)).long()
sampled_softmax.train()
train_loss = sampled_softmax(embedding, targets).item()
sampled_softmax.eval()
eval_loss = sampled_softmax(embedding, targets).item()
assert eval_loss > train_loss
def test_sampled_equals_unsampled_when_biased_against_non_sampled_positions(self):
sampled_softmax = SampledSoftmaxLoss(num_words=10000, embedding_dim=12, num_samples=10)
unsampled_softmax = SoftmaxLoss(num_words=10000, embedding_dim=12)
# fake out choice function
FAKE_SAMPLES = [100, 200, 300, 400, 500, 600, 700, 800, 900, 9999]
def fake_choice(num_words: int, num_samples: int) -> Tuple[np.ndarray, int]:
assert (num_words, num_samples) == (10000, 10)
return np.array(FAKE_SAMPLES), 12
sampled_softmax.choice_func = fake_choice
# bias out the unsampled terms:
for i in range(10000):
if i not in FAKE_SAMPLES:
unsampled_softmax.softmax_b[i] = -10000
# set weights equal, use transpose because opposite shapes
sampled_softmax.softmax_w.data = unsampled_softmax.softmax_w.t()
sampled_softmax.softmax_b.data = unsampled_softmax.softmax_b
sampled_softmax.train()
unsampled_softmax.train()
# sequence_length, embedding_dim
embedding = torch.rand(100, 12)
targets = torch.randint(0, 1000, (100,)).long()
full_loss = unsampled_softmax(embedding, targets).item()
sampled_loss = sampled_softmax(embedding, targets).item()
# Should be close
pct_error = (sampled_loss - full_loss) / full_loss
assert abs(pct_error) < 0.001
| allennlp-master | tests/modules/sampled_softmax_loss_test.py |
from numpy.testing import assert_almost_equal
from overrides import overrides
import torch
from torch.nn import Embedding, Module, Parameter
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules import TimeDistributed
class TestTimeDistributed(AllenNlpTestCase):
def test_time_distributed_reshapes_named_arg_correctly(self):
char_embedding = Embedding(2, 2)
char_embedding.weight = Parameter(torch.FloatTensor([[0.4, 0.4], [0.5, 0.5]]))
distributed_embedding = TimeDistributed(char_embedding)
char_input = torch.LongTensor([[[1, 0], [1, 1]]])
output = distributed_embedding(char_input)
assert_almost_equal(
output.data.numpy(), [[[[0.5, 0.5], [0.4, 0.4]], [[0.5, 0.5], [0.5, 0.5]]]]
)
def test_time_distributed_reshapes_positional_kwarg_correctly(self):
char_embedding = Embedding(2, 2)
char_embedding.weight = Parameter(torch.FloatTensor([[0.4, 0.4], [0.5, 0.5]]))
distributed_embedding = TimeDistributed(char_embedding)
char_input = torch.LongTensor([[[1, 0], [1, 1]]])
output = distributed_embedding(input=char_input)
assert_almost_equal(
output.data.numpy(), [[[[0.5, 0.5], [0.4, 0.4]], [[0.5, 0.5], [0.5, 0.5]]]]
)
def test_time_distributed_works_with_multiple_inputs(self):
module = lambda x, y: x + y
distributed = TimeDistributed(module)
x_input = torch.LongTensor([[[1, 2], [3, 4]]])
y_input = torch.LongTensor([[[4, 2], [9, 1]]])
output = distributed(x_input, y_input)
assert_almost_equal(output.data.numpy(), [[[5, 4], [12, 5]]])
def test_time_distributed_reshapes_multiple_inputs_with_pass_through_tensor_correctly(self):
class FakeModule(Module):
@overrides
def forward(self, input_tensor, tensor_to_pass_through=None, another_tensor=None):
return input_tensor + tensor_to_pass_through + another_tensor
module = FakeModule()
distributed_module = TimeDistributed(module)
input_tensor1 = torch.LongTensor([[[1, 2], [3, 4]]])
input_to_pass_through = torch.LongTensor([3, 7])
input_tensor2 = torch.LongTensor([[[4, 2], [9, 1]]])
output = distributed_module(
input_tensor1,
tensor_to_pass_through=input_to_pass_through,
another_tensor=input_tensor2,
pass_through=["tensor_to_pass_through"],
)
assert_almost_equal(output.data.numpy(), [[[8, 11], [15, 12]]])
def test_time_distributed_reshapes_multiple_inputs_with_pass_through_non_tensor_correctly(self):
class FakeModule(Module):
@overrides
def forward(self, input_tensor, number=0, another_tensor=None):
return input_tensor + number + another_tensor
module = FakeModule()
distributed_module = TimeDistributed(module)
input_tensor1 = torch.LongTensor([[[1, 2], [3, 4]]])
input_number = 5
input_tensor2 = torch.LongTensor([[[4, 2], [9, 1]]])
output = distributed_module(
input_tensor1,
number=input_number,
another_tensor=input_tensor2,
pass_through=["number"],
)
assert_almost_equal(output.data.numpy(), [[[10, 9], [17, 10]]])
| allennlp-master | tests/modules/time_distributed_test.py |
import numpy
import pytest
import torch
from torch.nn import LSTM, RNN
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common.testing import AllenNlpTestCase, requires_gpu
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
class TestEncoderBase(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.lstm = LSTM(
bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True
)
self.rnn = RNN(
bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True
)
self.encoder_base = _EncoderBase(stateful=True)
tensor = torch.rand([5, 7, 3])
tensor[1, 6:, :] = 0
tensor[3, 2:, :] = 0
self.tensor = tensor
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, :] = False # <= completely masked
mask[3, 2:] = False
mask[4, :] = False # <= completely masked
self.mask = mask
self.batch_size = 5
self.num_valid = 3
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
_, _, restoration_indices, sorting_indices = sort_batch_by_length(tensor, sequence_lengths)
self.sorting_indices = sorting_indices
self.restoration_indices = restoration_indices
def test_non_stateful_states_are_sorted_correctly(self):
encoder_base = _EncoderBase(stateful=False)
initial_states = (torch.randn(6, 5, 7), torch.randn(6, 5, 7))
# Check that we sort the state for non-stateful encoders. To test
# we'll just use a "pass through" encoder, as we aren't actually testing
# the functionality of the encoder here anyway.
_, states, restoration_indices = encoder_base.sort_and_run_forward(
lambda *x: x, self.tensor, self.mask, initial_states
)
# Our input tensor had 2 zero length sequences, so we need
# to concat a tensor of shape
# (num_layers * num_directions, batch_size - num_valid, hidden_dim),
# to the output before unsorting it.
zeros = torch.zeros([6, 2, 7])
# sort_and_run_forward strips fully-padded instances from the batch;
# in order to use the restoration_indices we need to add back the two
# that got stripped. What we get back should match what we started with.
for state, original in zip(states, initial_states):
assert list(state.size()) == [6, 3, 7]
state_with_zeros = torch.cat([state, zeros], 1)
unsorted_state = state_with_zeros.index_select(1, restoration_indices)
for index in [0, 1, 3]:
numpy.testing.assert_array_equal(
unsorted_state[:, index, :].data.numpy(), original[:, index, :].data.numpy()
)
def test_get_initial_states(self):
# First time we call it, there should be no state, so we should return None.
assert (
self.encoder_base._get_initial_states(
self.batch_size, self.num_valid, self.sorting_indices
)
is None
)
# First test the case that the previous state is _smaller_ than the current state input.
initial_states = (torch.randn([1, 3, 7]), torch.randn([1, 3, 7]))
self.encoder_base._states = initial_states
# sorting indices are: [0, 1, 3, 2, 4]
returned_states = self.encoder_base._get_initial_states(
self.batch_size, self.num_valid, self.sorting_indices
)
correct_expanded_states = [
torch.cat([state, torch.zeros([1, 2, 7])], 1) for state in initial_states
]
# State should have been expanded with zeros to have shape (1, batch_size, hidden_size).
numpy.testing.assert_array_equal(
self.encoder_base._states[0].data.numpy(), correct_expanded_states[0].data.numpy()
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1].data.numpy(), correct_expanded_states[1].data.numpy()
)
# The returned states should be of shape (1, num_valid, hidden_size) and
# they also should have been sorted with respect to the indices.
# sorting indices are: [0, 1, 3, 2, 4]
correct_returned_states = [
state.index_select(1, self.sorting_indices)[:, : self.num_valid, :]
for state in correct_expanded_states
]
numpy.testing.assert_array_equal(
returned_states[0].data.numpy(), correct_returned_states[0].data.numpy()
)
numpy.testing.assert_array_equal(
returned_states[1].data.numpy(), correct_returned_states[1].data.numpy()
)
# Now test the case that the previous state is larger:
original_states = (torch.randn([1, 10, 7]), torch.randn([1, 10, 7]))
self.encoder_base._states = original_states
# sorting indices are: [0, 1, 3, 2, 4]
returned_states = self.encoder_base._get_initial_states(
self.batch_size, self.num_valid, self.sorting_indices
)
# State should not have changed, as they were larger
# than the batch size of the requested states.
numpy.testing.assert_array_equal(
self.encoder_base._states[0].data.numpy(), original_states[0].data.numpy()
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1].data.numpy(), original_states[1].data.numpy()
)
# The returned states should be of shape (1, num_valid, hidden_size) and they
# also should have been sorted with respect to the indices.
correct_returned_state = [
x.index_select(1, self.sorting_indices)[:, : self.num_valid, :] for x in original_states
]
numpy.testing.assert_array_equal(
returned_states[0].data.numpy(), correct_returned_state[0].data.numpy()
)
numpy.testing.assert_array_equal(
returned_states[1].data.numpy(), correct_returned_state[1].data.numpy()
)
def test_update_states(self):
assert self.encoder_base._states is None
initial_states = torch.randn([1, 5, 7]), torch.randn([1, 5, 7])
index_selected_initial_states = (
initial_states[0].index_select(1, self.restoration_indices),
initial_states[1].index_select(1, self.restoration_indices),
)
self.encoder_base._update_states(initial_states, self.restoration_indices)
# State was None, so the updated state should just be the sorted given state.
numpy.testing.assert_array_equal(
self.encoder_base._states[0].data.numpy(), index_selected_initial_states[0].data.numpy()
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1].data.numpy(), index_selected_initial_states[1].data.numpy()
)
new_states = torch.randn([1, 5, 7]), torch.randn([1, 5, 7])
# tensor has 2 completely masked rows, so the last 2 rows of the _sorted_ states
# will be completely zero, having been appended after calling the respective encoder.
new_states[0][:, -2:, :] = 0
new_states[1][:, -2:, :] = 0
index_selected_new_states = (
new_states[0].index_select(1, self.restoration_indices),
new_states[1].index_select(1, self.restoration_indices),
)
self.encoder_base._update_states(new_states, self.restoration_indices)
# Check that the update _preserved_ the state for the rows which were
# completely masked (2 and 4):
for index in [2, 4]:
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, index, :].data.numpy(),
index_selected_initial_states[0][:, index, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, index, :].data.numpy(),
index_selected_initial_states[1][:, index, :].data.numpy(),
)
# Now the states which were updated:
for index in [0, 1, 3]:
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, index, :].data.numpy(),
index_selected_new_states[0][:, index, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, index, :].data.numpy(),
index_selected_new_states[1][:, index, :].data.numpy(),
)
# Now test the case that the new state is smaller:
small_new_states = torch.randn([1, 3, 7]), torch.randn([1, 3, 7])
# pretend the 2nd sequence in the batch was fully masked.
small_restoration_indices = torch.LongTensor([2, 0, 1])
small_new_states[0][:, 0, :] = 0
small_new_states[1][:, 0, :] = 0
index_selected_small_states = (
small_new_states[0].index_select(1, small_restoration_indices),
small_new_states[1].index_select(1, small_restoration_indices),
)
self.encoder_base._update_states(small_new_states, small_restoration_indices)
# Check the index for the row we didn't update is the same as the previous step:
for index in [1, 3]:
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, index, :].data.numpy(),
index_selected_new_states[0][:, index, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, index, :].data.numpy(),
index_selected_new_states[1][:, index, :].data.numpy(),
)
# Indices we did update:
for index in [0, 2]:
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, index, :].data.numpy(),
index_selected_small_states[0][:, index, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, index, :].data.numpy(),
index_selected_small_states[1][:, index, :].data.numpy(),
)
# We didn't update index 4 in the previous step either, so it should be equal to the
# 4th index of initial states.
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, 4, :].data.numpy(),
index_selected_initial_states[0][:, 4, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, 4, :].data.numpy(),
index_selected_initial_states[1][:, 4, :].data.numpy(),
)
def test_reset_states(self):
# Initialize the encoder states.
assert self.encoder_base._states is None
initial_states = torch.randn([1, 5, 7]), torch.randn([1, 5, 7])
index_selected_initial_states = (
initial_states[0].index_select(1, self.restoration_indices),
initial_states[1].index_select(1, self.restoration_indices),
)
self.encoder_base._update_states(initial_states, self.restoration_indices)
# Check that only some of the states are reset when a mask is provided.
mask = torch.tensor([True, True, False, False, False])
self.encoder_base.reset_states(mask)
# First two states should be zeros
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, :2, :].data.numpy(),
torch.zeros_like(initial_states[0])[:, :2, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, :2, :].data.numpy(),
torch.zeros_like(initial_states[1])[:, :2, :].data.numpy(),
)
# Remaining states should be the same
numpy.testing.assert_array_equal(
self.encoder_base._states[0][:, 2:, :].data.numpy(),
index_selected_initial_states[0][:, 2:, :].data.numpy(),
)
numpy.testing.assert_array_equal(
self.encoder_base._states[1][:, 2:, :].data.numpy(),
index_selected_initial_states[1][:, 2:, :].data.numpy(),
)
# Check that error is raised if mask has wrong batch size.
bad_mask = torch.tensor([True, True, False])
with pytest.raises(ValueError):
self.encoder_base.reset_states(bad_mask)
# Check that states are reset to None if no mask is provided.
self.encoder_base.reset_states()
assert self.encoder_base._states is None
def test_non_contiguous_initial_states_handled(self):
# Check that the encoder is robust to non-contiguous initial states.
# Case 1: Encoder is not stateful
# A transposition will make the tensors non-contiguous, start them off at the wrong shape
# and transpose them into the right shape.
encoder_base = _EncoderBase(stateful=False)
initial_states = (
torch.randn(5, 6, 7).permute(1, 0, 2),
torch.randn(5, 6, 7).permute(1, 0, 2),
)
assert not initial_states[0].is_contiguous() and not initial_states[1].is_contiguous()
assert initial_states[0].size() == torch.Size([6, 5, 7])
assert initial_states[1].size() == torch.Size([6, 5, 7])
# We'll pass them through an LSTM encoder and a vanilla RNN encoder to make sure it works
# whether the initial states are a tuple of tensors or just a single tensor.
encoder_base.sort_and_run_forward(self.lstm, self.tensor, self.mask, initial_states)
encoder_base.sort_and_run_forward(self.rnn, self.tensor, self.mask, initial_states[0])
# Case 2: Encoder is stateful
# For stateful encoders, the initial state may be non-contiguous if its state was
# previously updated with non-contiguous tensors. As in the non-stateful tests, we check
# that the encoder still works on initial states for RNNs and LSTMs.
final_states = initial_states
# Check LSTM
encoder_base = _EncoderBase(stateful=True)
encoder_base._update_states(final_states, self.restoration_indices)
encoder_base.sort_and_run_forward(self.lstm, self.tensor, self.mask)
# Check RNN
encoder_base.reset_states()
encoder_base._update_states([final_states[0]], self.restoration_indices)
encoder_base.sort_and_run_forward(self.rnn, self.tensor, self.mask)
@requires_gpu
def test_non_contiguous_initial_states_handled_on_gpu(self):
# Some PyTorch operations which produce contiguous tensors on the CPU produce
# non-contiguous tensors on the GPU (e.g. forward pass of an RNN when batch_first=True).
# Accordingly, we perform the same checks from previous test on the GPU to ensure the
# encoder is not affected by which device it is on.
# Case 1: Encoder is not stateful
# A transposition will make the tensors non-contiguous, start them off at the wrong shape
# and transpose them into the right shape.
encoder_base = _EncoderBase(stateful=False).cuda()
initial_states = (
torch.randn(5, 6, 7).cuda().permute(1, 0, 2),
torch.randn(5, 6, 7).cuda().permute(1, 0, 2),
)
assert not initial_states[0].is_contiguous() and not initial_states[1].is_contiguous()
assert initial_states[0].size() == torch.Size([6, 5, 7])
assert initial_states[1].size() == torch.Size([6, 5, 7])
# We'll pass them through an LSTM encoder and a vanilla RNN encoder to make sure it works
# whether the initial states are a tuple of tensors or just a single tensor.
encoder_base.sort_and_run_forward(
self.lstm.cuda(), self.tensor.cuda(), self.mask.cuda(), initial_states
)
encoder_base.sort_and_run_forward(
self.rnn.cuda(), self.tensor.cuda(), self.mask.cuda(), initial_states[0]
)
# Case 2: Encoder is stateful
# For stateful encoders, the initial state may be non-contiguous if its state was
# previously updated with non-contiguous tensors. As in the non-stateful tests, we check
# that the encoder still works on initial states for RNNs and LSTMs.
final_states = initial_states
# Check LSTM
encoder_base = _EncoderBase(stateful=True).cuda()
encoder_base._update_states(final_states, self.restoration_indices.cuda())
encoder_base.sort_and_run_forward(self.lstm.cuda(), self.tensor.cuda(), self.mask.cuda())
# Check RNN
encoder_base.reset_states()
encoder_base._update_states([final_states[0]], self.restoration_indices.cuda())
encoder_base.sort_and_run_forward(self.rnn.cuda(), self.tensor.cuda(), self.mask.cuda())
| allennlp-master | tests/modules/encoder_base_test.py |
import numpy
import torch
from allennlp.modules.lstm_cell_with_projection import LstmCellWithProjection
from allennlp.common.testing import AllenNlpTestCase
class TestLstmCellWithProjection(AllenNlpTestCase):
def test_elmo_lstm_cell_completes_forward_pass(self):
input_tensor = torch.rand(4, 5, 3)
input_tensor[1, 4:, :] = 0.0
input_tensor[2, 2:, :] = 0.0
input_tensor[3, 1:, :] = 0.0
initial_hidden_state = torch.ones([1, 4, 5])
initial_memory_state = torch.ones([1, 4, 7])
lstm = LstmCellWithProjection(
input_size=3,
hidden_size=5,
cell_size=7,
memory_cell_clip_value=2,
state_projection_clip_value=1,
)
output_sequence, lstm_state = lstm(
input_tensor, [5, 4, 2, 1], (initial_hidden_state, initial_memory_state)
)
numpy.testing.assert_array_equal(output_sequence.data[1, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 2:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 1:, :].numpy(), 0.0)
# Test the state clipping.
numpy.testing.assert_array_less(output_sequence.data.numpy(), 1.0)
numpy.testing.assert_array_less(-output_sequence.data.numpy(), 1.0)
# LSTM state should be (num_layers, batch_size, hidden_size)
assert list(lstm_state[0].size()) == [1, 4, 5]
# LSTM memory cell should be (num_layers, batch_size, cell_size)
assert list((lstm_state[1].size())) == [1, 4, 7]
# Test the cell clipping.
numpy.testing.assert_array_less(lstm_state[0].data.numpy(), 2.0)
numpy.testing.assert_array_less(-lstm_state[0].data.numpy(), 2.0)
| allennlp-master | tests/modules/lstm_cell_with_projection_test.py |
import pytest
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.modules import Seq2SeqEncoder
from allennlp.common.testing import AllenNlpTestCase
class TestSeq2SeqEncoder(AllenNlpTestCase):
def test_from_params_builders_encoder_correctly(self):
# We're just making sure parameters get passed through correctly here, and that the basic
# API works.
params = Params(
{
"type": "lstm",
"bidirectional": True,
"num_layers": 3,
"input_size": 5,
"hidden_size": 7,
"stateful": True,
}
)
encoder = Seq2SeqEncoder.from_params(params)
assert encoder.__class__.__name__ == "LstmSeq2SeqEncoder"
assert encoder._module.__class__.__name__ == "LSTM"
assert encoder._module.num_layers == 3
assert encoder._module.input_size == 5
assert encoder._module.hidden_size == 7
assert encoder._module.bidirectional is True
assert encoder._module.batch_first is True
assert encoder.stateful is True
def test_from_params_requires_batch_first(self):
params = Params({"type": "lstm", "batch_first": False})
with pytest.raises(ConfigurationError):
Seq2SeqEncoder.from_params(params)
| allennlp-master | tests/modules/seq2seq_encoder_test.py |
from numpy.testing import assert_almost_equal
import torch
from allennlp.modules import ResidualWithLayerDropout
from allennlp.common.testing import AllenNlpTestCase
class TestResidualWithLayerDropout(AllenNlpTestCase):
def test_dropout_works_for_training(self):
layer_input_tensor = torch.FloatTensor([[2, 1], [-3, -2]])
layer_output_tensor = torch.FloatTensor([[1, 3], [2, -1]])
# The layer output should be dropped
residual_with_layer_dropout = ResidualWithLayerDropout(1)
residual_with_layer_dropout.train()
result = residual_with_layer_dropout(layer_input_tensor, layer_output_tensor).data.numpy()
assert result.shape == (2, 2)
assert_almost_equal(result, [[2, 1], [-3, -2]])
result = residual_with_layer_dropout(
layer_input_tensor, layer_output_tensor, 1, 1
).data.numpy()
assert result.shape == (2, 2)
assert_almost_equal(result, [[2, 1], [-3, -2]])
# The layer output should not be dropped
residual_with_layer_dropout = ResidualWithLayerDropout(0.0)
residual_with_layer_dropout.train()
result = residual_with_layer_dropout(layer_input_tensor, layer_output_tensor).data.numpy()
assert result.shape == (2, 2)
assert_almost_equal(result, [[2 + 1, 1 + 3], [-3 + 2, -2 - 1]])
def test_dropout_works_for_testing(self):
layer_input_tensor = torch.FloatTensor([[2, 1], [-3, -2]])
layer_output_tensor = torch.FloatTensor([[1, 3], [2, -1]])
# During testing, the layer output is re-calibrated according to the survival probability,
# and then added to the input.
residual_with_layer_dropout = ResidualWithLayerDropout(0.2)
residual_with_layer_dropout.eval()
result = residual_with_layer_dropout(layer_input_tensor, layer_output_tensor).data.numpy()
assert result.shape == (2, 2)
assert_almost_equal(result, [[2 + 1 * 0.8, 1 + 3 * 0.8], [-3 + 2 * 0.8, -2 - 1 * 0.8]])
| allennlp-master | tests/modules/residual_with_layer_dropout_test.py |
import pytest
import torch
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules import GatedSum
class TestGatedSum(AllenNlpTestCase):
def test_gated_sum_can_run_forward(self):
a = torch.FloatTensor([1, 2, 3, 4, 5])
b = -a + 0.1
weight_value = 2
gate_value = torch.sigmoid(torch.FloatTensor([1]))
expected = gate_value * a + (1 - gate_value) * b
with torch.no_grad(): # because we want to change the weight
gated_sum = GatedSum(a.size(-1))
gated_sum._gate.weight *= 0
gated_sum._gate.weight += weight_value
gated_sum._gate.bias *= 0
out = gated_sum(a, b)
numpy.testing.assert_almost_equal(expected.data.numpy(), out.data.numpy(), decimal=5)
with pytest.raises(ValueError):
GatedSum(a.size(-1))(a, b.unsqueeze(0))
with pytest.raises(ValueError):
GatedSum(100)(a, b)
def test_input_output_dim(self):
dim = 77
gated_sum = GatedSum(dim)
numpy.testing.assert_equal(gated_sum.get_input_dim(), dim)
numpy.testing.assert_equal(gated_sum.get_output_dim(), dim)
| allennlp-master | tests/modules/gated_sum_test.py |
from numpy.testing import assert_almost_equal
import torch
from allennlp.modules import Highway
from allennlp.common.testing import AllenNlpTestCase
class TestHighway(AllenNlpTestCase):
def test_forward_works_on_simple_input(self):
highway = Highway(2, 2)
highway._layers[0].weight.data.fill_(1)
highway._layers[0].bias.data.fill_(0)
highway._layers[1].weight.data.fill_(2)
highway._layers[1].bias.data.fill_(-2)
input_tensor = torch.FloatTensor([[-2, 1], [3, -2]])
result = highway(input_tensor).data.numpy()
assert result.shape == (2, 2)
# This was checked by hand.
assert_almost_equal(result, [[-0.0394, 0.0197], [1.7527, -0.5550]], decimal=4)
def test_forward_works_on_nd_input(self):
highway = Highway(2, 2)
input_tensor = torch.ones(2, 2, 2)
output = highway(input_tensor)
assert output.size() == (2, 2, 2)
| allennlp-master | tests/modules/highway_test.py |
import itertools
import math
from pytest import approx, raises
import torch
from numpy.testing import assert_allclose
from allennlp.modules import ConditionalRandomField
from allennlp.modules.conditional_random_field import allowed_transitions
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
class TestConditionalRandomField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.logits = torch.Tensor(
[
[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3, 0.1], [0, 0, 0.9, 10, 1]],
[[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, 0.3, 0.1], [0, 0, 0.9, 1, 1]],
]
)
self.tags = torch.LongTensor([[2, 3, 4], [3, 2, 2]])
self.transitions = torch.Tensor(
[
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.8, 0.3, 0.1, 0.7, 0.9],
[-0.3, 2.1, -5.6, 3.4, 4.0],
[0.2, 0.4, 0.6, -0.3, -0.4],
[1.0, 1.0, 1.0, 1.0, 1.0],
]
)
self.transitions_from_start = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.6])
self.transitions_to_end = torch.Tensor([-0.1, -0.2, 0.3, -0.4, -0.4])
# Use the CRF Module with fixed transitions to compute the log_likelihood
self.crf = ConditionalRandomField(5)
self.crf.transitions = torch.nn.Parameter(self.transitions)
self.crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)
self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
def score(self, logits, tags):
"""
Computes the likelihood score for the given sequence of tags,
given the provided logits (and the transition weights in the CRF model)
"""
# Start with transitions from START and to END
total = self.transitions_from_start[tags[0]] + self.transitions_to_end[tags[-1]]
# Add in all the intermediate transitions
for tag, next_tag in zip(tags, tags[1:]):
total += self.transitions[tag, next_tag]
# Add in the logits for the observed tags
for logit, tag in zip(logits, tags):
total += logit[tag]
return total
def naive_most_likely_sequence(self, logits, mask):
# We iterate over all possible tag sequences and use self.score
# to check the likelihood of each. The most likely sequence should be the
# same as what we get from viterbi_tags.
most_likely_tags = []
best_scores = []
for logit, mas in zip(logits, mask):
mask_indices = mas.nonzero(as_tuple=False).squeeze()
logit = torch.index_select(logit, 0, mask_indices)
sequence_length = logit.shape[0]
most_likely, most_likelihood = None, -float("inf")
for tags in itertools.product(range(5), repeat=sequence_length):
score = self.score(logit.data, tags)
if score > most_likelihood:
most_likely, most_likelihood = tags, score
# Convert tuple to list; otherwise == complains.
most_likely_tags.append(list(most_likely))
best_scores.append(most_likelihood)
return most_likely_tags, best_scores
def test_forward_works_without_mask(self):
log_likelihood = self.crf(self.logits, self.tags).item()
# Now compute the log-likelihood manually
manual_log_likelihood = 0.0
# For each instance, manually compute the numerator
# (which is just the score for the logits and actual tags)
# and the denominator
# (which is the log-sum-exp of the scores for the logits across all possible tags)
for logits_i, tags_i in zip(self.logits, self.tags):
numerator = self.score(logits_i.detach(), tags_i.detach())
all_scores = [
self.score(logits_i.detach(), tags_j)
for tags_j in itertools.product(range(5), repeat=3)
]
denominator = math.log(sum(math.exp(score) for score in all_scores))
# And include them in the manual calculation.
manual_log_likelihood += numerator - denominator
# The manually computed log likelihood should equal the result of crf.forward.
assert manual_log_likelihood.item() == approx(log_likelihood)
def test_forward_works_with_mask(self):
# Use a non-trivial mask
mask = torch.tensor([[True, True, True], [True, True, False]])
log_likelihood = self.crf(self.logits, self.tags, mask).item()
# Now compute the log-likelihood manually
manual_log_likelihood = 0.0
# For each instance, manually compute the numerator
# (which is just the score for the logits and actual tags)
# and the denominator
# (which is the log-sum-exp of the scores for the logits across all possible tags)
for logits_i, tags_i, mask_i in zip(self.logits, self.tags, mask):
# Find the sequence length for this input and only look at that much of each sequence.
sequence_length = torch.sum(mask_i.detach())
logits_i = logits_i.data[:sequence_length]
tags_i = tags_i.data[:sequence_length]
numerator = self.score(logits_i, tags_i)
all_scores = [
self.score(logits_i, tags_j)
for tags_j in itertools.product(range(5), repeat=sequence_length)
]
denominator = math.log(sum(math.exp(score) for score in all_scores))
# And include them in the manual calculation.
manual_log_likelihood += numerator - denominator
# The manually computed log likelihood should equal the result of crf.forward.
assert manual_log_likelihood.item() == approx(log_likelihood)
def test_viterbi_tags(self):
mask = torch.tensor([[True, True, True], [True, False, True]])
viterbi_path = self.crf.viterbi_tags(self.logits, mask)
# Separate the tags and scores.
viterbi_tags = [x for x, y in viterbi_path]
viterbi_scores = [y for x, y in viterbi_path]
most_likely_tags, best_scores = self.naive_most_likely_sequence(self.logits, mask)
assert viterbi_tags == most_likely_tags
assert_allclose(viterbi_scores, best_scores, rtol=1e-5)
def test_viterbi_tags_no_mask(self):
viterbi_path = self.crf.viterbi_tags(self.logits)
# Separate the tags and scores.
viterbi_tags = [x for x, y in viterbi_path]
viterbi_scores = [y for x, y in viterbi_path]
mask = torch.tensor([[True, True, True], [True, True, True]])
most_likely_tags, best_scores = self.naive_most_likely_sequence(self.logits, mask)
assert viterbi_tags == most_likely_tags
assert_allclose(viterbi_scores, best_scores, rtol=1e-5)
def test_viterbi_tags_top_k(self):
mask = torch.tensor([[True, True, True], [True, True, False]])
best_paths = self.crf.viterbi_tags(self.logits, mask, top_k=2)
# Ensure the top path matches not passing top_k
top_path_and_score = [top_k_paths[0] for top_k_paths in best_paths]
assert top_path_and_score == self.crf.viterbi_tags(self.logits, mask)
next_path_and_score = [top_k_paths[1] for top_k_paths in best_paths]
next_viterbi_tags = [x for x, _ in next_path_and_score]
# Check that the next best viterbi tags are what I think they should be.
assert next_viterbi_tags == [[4, 2, 3], [3, 2]]
def test_constrained_viterbi_tags(self):
constraints = {
(0, 0),
(0, 1),
(1, 1),
(1, 2),
(2, 2),
(2, 3),
(3, 3),
(3, 4),
(4, 4),
(4, 0),
}
# Add the transitions to the end tag
# and from the start tag.
for i in range(5):
constraints.add((5, i))
constraints.add((i, 6))
crf = ConditionalRandomField(num_tags=5, constraints=constraints)
crf.transitions = torch.nn.Parameter(self.transitions)
crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)
crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
mask = torch.tensor([[True, True, True], [True, True, False]])
viterbi_path = crf.viterbi_tags(self.logits, mask)
# Get just the tags from each tuple of (tags, score).
viterbi_tags = [x for x, y in viterbi_path]
# Now the tags should respect the constraints
assert viterbi_tags == [[2, 3, 3], [2, 3]]
def test_allowed_transitions(self):
bio_labels = ["O", "B-X", "I-X", "B-Y", "I-Y"] # start tag, end tag
# 0 1 2 3 4 5 6
allowed = allowed_transitions("BIO", dict(enumerate(bio_labels)))
# The empty spaces in this matrix indicate disallowed transitions.
assert set(allowed) == { # Extra column for end tag.
(0, 0),
(0, 1),
(0, 3),
(0, 6),
(1, 0),
(1, 1),
(1, 2),
(1, 3),
(1, 6),
(2, 0),
(2, 1),
(2, 2),
(2, 3),
(2, 6),
(3, 0),
(3, 1),
(3, 3),
(3, 4),
(3, 6),
(4, 0),
(4, 1),
(4, 3),
(4, 4),
(4, 6),
(5, 0),
(5, 1),
(5, 3), # Extra row for start tag
}
bioul_labels = [
"O",
"B-X",
"I-X",
"L-X",
"U-X",
"B-Y",
"I-Y",
"L-Y",
"U-Y",
] # start tag, end tag
# 0 1 2 3 4 5 6 7 8 9 10
allowed = allowed_transitions("BIOUL", dict(enumerate(bioul_labels)))
# The empty spaces in this matrix indicate disallowed transitions.
assert set(allowed) == { # Extra column for end tag.
(0, 0),
(0, 1),
(0, 4),
(0, 5),
(0, 8),
(0, 10),
(1, 2),
(1, 3), # noqa
(2, 2),
(2, 3),
(3, 0),
(3, 1),
(3, 4),
(3, 5),
(3, 8),
(3, 10),
(4, 0),
(4, 1),
(4, 4),
(4, 5),
(4, 8),
(4, 10),
(5, 6),
(5, 7),
(6, 6),
(6, 7),
(7, 0),
(7, 1),
(7, 4),
(7, 5),
(7, 8),
(7, 10),
(8, 0),
(8, 1),
(8, 4),
(8, 5),
(8, 8),
(8, 10),
# Extra row for start tag.
(9, 0),
(9, 1),
(9, 4),
(9, 5),
(9, 8),
}
iob1_labels = ["O", "B-X", "I-X", "B-Y", "I-Y"] # start tag, end tag
# 0 1 2 3 4 5 6
allowed = allowed_transitions("IOB1", dict(enumerate(iob1_labels)))
# The empty spaces in this matrix indicate disallowed transitions.
assert set(allowed) == { # Extra column for end tag.
(0, 0),
(0, 2),
(0, 4),
(0, 6),
(1, 0),
(1, 1),
(1, 2),
(1, 4),
(1, 6),
(2, 0),
(2, 1),
(2, 2),
(2, 4),
(2, 6),
(3, 0),
(3, 2),
(3, 3),
(3, 4),
(3, 6),
(4, 0),
(4, 2),
(4, 3),
(4, 4),
(4, 6),
(5, 0),
(5, 2),
(5, 4), # Extra row for start tag
}
with raises(ConfigurationError):
allowed_transitions("allennlp", {})
bmes_labels = ["B-X", "M-X", "E-X", "S-X", "B-Y", "M-Y", "E-Y", "S-Y"] # start tag, end tag
# 0 1 2 3 4 5 6 7 8 9
allowed = allowed_transitions("BMES", dict(enumerate(bmes_labels)))
assert set(allowed) == {
(0, 1),
(0, 2),
(1, 1),
(1, 2), # Extra column for end tag.
(2, 0),
(2, 3),
(2, 4),
(2, 7),
(2, 9), # noqa
(3, 0),
(3, 3),
(3, 4),
(3, 7),
(3, 9),
(4, 5),
(4, 6),
(5, 5),
(5, 6),
(6, 0),
(6, 3),
(6, 4),
(6, 7),
(6, 9),
(7, 0),
(7, 3),
(7, 4),
(7, 7),
(7, 9),
(8, 0),
(8, 3),
(8, 4),
(8, 7), # Extra row for start tag
}
| allennlp-master | tests/modules/conditional_random_field_test.py |
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders.gated_cnn_encoder import GatedCnnEncoder
class TestGatedCnnEncoder(AllenNlpTestCase):
def test_gated_cnn_encoder(self):
cnn_encoder = GatedCnnEncoder(
input_dim=32,
layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = cnn_encoder(token_embeddings, mask)
assert list(output.size()) == [5, 10, 64]
def test_gated_cnn_encoder_dilations(self):
cnn_encoder = GatedCnnEncoder(
input_dim=32, layers=[[[2, 32, 1]], [[2, 32, 2]], [[2, 32, 4]], [[2, 32, 8]]]
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = cnn_encoder(token_embeddings, mask)
assert list(output.size()) == [5, 10, 64]
def test_gated_cnn_encoder_layers(self):
cnn_encoder = GatedCnnEncoder(
input_dim=32,
layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],
return_all_layers=True,
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = cnn_encoder(token_embeddings, mask)
assert len(output) == 3
concat_layers = torch.cat([layer.unsqueeze(1) for layer in output], dim=1)
assert list(concat_layers.size()) == [5, 3, 10, 64]
| allennlp-master | tests/modules/seq2seq_encoders/gated_cnn_encoder_test.py |
import torch
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders import PassThroughEncoder
class TestPassThroughEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
encoder = PassThroughEncoder(input_dim=9)
assert encoder.get_input_dim() == 9
assert encoder.get_output_dim() == 9
def test_pass_through_encoder_passes_through(self):
encoder = PassThroughEncoder(input_dim=9)
tensor = torch.randn([2, 3, 9])
output = encoder(tensor)
numpy.testing.assert_array_almost_equal(
tensor.detach().cpu().numpy(), output.detach().cpu().numpy()
)
def test_pass_through_encoder_with_mask(self):
encoder = PassThroughEncoder(input_dim=9)
tensor = torch.randn([2, 3, 9])
mask = torch.tensor([[True, True, True], [True, False, False]])
output = encoder(tensor, mask)
target = tensor * mask.unsqueeze(dim=-1).float()
numpy.testing.assert_array_almost_equal(
output.detach().cpu().numpy(), target.detach().cpu().numpy()
)
| allennlp-master | tests/modules/seq2seq_encoders/pass_through_encoder_test.py |
import numpy
from numpy.testing import assert_almost_equal
import pytest
import torch
from torch.nn import LSTM, GRU
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
class TestPytorchSeq2SeqWrapper(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
assert encoder.get_output_dim() == 14
assert encoder.get_input_dim() == 2
lstm = LSTM(
bidirectional=False, num_layers=3, input_size=2, hidden_size=7, batch_first=True
)
encoder = PytorchSeq2SeqWrapper(lstm)
assert encoder.get_output_dim() == 7
assert encoder.get_input_dim() == 2
def test_forward_works_even_with_empty_sequences(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
tensor = torch.rand([5, 7, 3])
tensor[1, 6:, :] = 0
tensor[2, :, :] = 0
tensor[3, 2:, :] = 0
tensor[4, :, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, :] = False
mask[3, 2:] = False
mask[4, :] = False
results = encoder(tensor, mask)
for i in (0, 1, 3):
assert not (results[i] == 0.0).data.all()
for i in (2, 4):
assert (results[i] == 0.0).data.all()
def test_forward_pulls_out_correct_tensor_without_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
lstm_output = lstm(input_tensor)
encoder_output = encoder(input_tensor, None)
assert_almost_equal(encoder_output.data.numpy(), lstm_output[0].data.numpy())
def test_forward_pulls_out_correct_tensor_with_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[1, 6:, :] = 0
input_tensor[2, 4:, :] = 0
input_tensor[3, 2:, :] = 0
input_tensor[4, 1:, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, 4:] = False
mask[3, 2:] = False
mask[4, 1:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
packed_sequence = pack_padded_sequence(
input_tensor, sequence_lengths.data.tolist(), batch_first=True
)
lstm_output, _ = lstm(packed_sequence)
encoder_output = encoder(input_tensor, mask)
lstm_tensor, _ = pad_packed_sequence(lstm_output, batch_first=True)
assert_almost_equal(encoder_output.data.numpy(), lstm_tensor.data.numpy())
def test_forward_pulls_out_correct_tensor_for_unsorted_batches(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[0, 3:, :] = 0
input_tensor[1, 4:, :] = 0
input_tensor[2, 2:, :] = 0
input_tensor[3, 6:, :] = 0
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 2:] = False
mask[3, 6:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, _ = sort_batch_by_length(
input_tensor, sequence_lengths
)
packed_sequence = pack_padded_sequence(
sorted_inputs, sorted_sequence_lengths.data.tolist(), batch_first=True
)
lstm_output, _ = lstm(packed_sequence)
encoder_output = encoder(input_tensor, mask)
lstm_tensor, _ = pad_packed_sequence(lstm_output, batch_first=True)
assert_almost_equal(
encoder_output.data.numpy(),
lstm_tensor.index_select(0, restoration_indices).data.numpy(),
)
def test_forward_does_not_compress_tensors_padded_to_greater_than_the_max_sequence_length(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 8, 3])
input_tensor[:, 7, :] = 0
mask = torch.ones(5, 8).bool()
mask[:, 7] = False
encoder_output = encoder(input_tensor, mask)
assert encoder_output.size(1) == 8
def test_wrapper_raises_if_batch_first_is_false(self):
with pytest.raises(ConfigurationError):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7)
_ = PytorchSeq2SeqWrapper(lstm)
def test_wrapper_works_when_passed_state_with_zero_length_sequences(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 0:] = False
mask[3, 6:] = False
# Initial states are of shape (num_layers * num_directions, batch_size, hidden_dim)
initial_states = torch.randn(6, 5, 7), torch.randn(6, 5, 7)
_ = encoder(input_tensor, mask, initial_states)
def test_wrapper_can_call_backward_with_zero_length_sequences(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 0:] = 0 # zero length False
mask[3, 6:] = False
output = encoder(input_tensor, mask)
output.sum().backward()
def test_wrapper_stateful(self):
lstm = LSTM(bidirectional=True, num_layers=2, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm, stateful=True)
# To test the stateful functionality we need to call the encoder multiple times.
# Different batch sizes further tests some of the logic.
batch_sizes = [5, 10, 8]
sequence_lengths = [4, 6, 7]
states = []
for batch_size, sequence_length in zip(batch_sizes, sequence_lengths):
tensor = torch.rand([batch_size, sequence_length, 3])
mask = torch.ones(batch_size, sequence_length).bool()
mask.data[0, 3:] = 0
encoder_output = encoder(tensor, mask)
states.append(encoder._states)
# Check that the output is masked properly.
assert_almost_equal(encoder_output[0, 3:, :].data.numpy(), numpy.zeros((4, 14)))
for k in range(2):
assert_almost_equal(
states[-1][k][:, -2:, :].data.numpy(), states[-2][k][:, -2:, :].data.numpy()
)
def test_wrapper_stateful_single_state_gru(self):
gru = GRU(bidirectional=True, num_layers=2, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(gru, stateful=True)
batch_sizes = [10, 5]
states = []
for batch_size in batch_sizes:
tensor = torch.rand([batch_size, 5, 3])
mask = torch.ones(batch_size, 5).bool()
mask.data[0, 3:] = 0
encoder_output = encoder(tensor, mask)
states.append(encoder._states)
assert_almost_equal(encoder_output[0, 3:, :].data.numpy(), numpy.zeros((2, 14)))
assert_almost_equal(
states[-1][0][:, -5:, :].data.numpy(), states[-2][0][:, -5:, :].data.numpy()
)
| allennlp-master | tests/modules/seq2seq_encoders/pytorch_seq2seq_wrapper_test.py |
import torch
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules import FeedForward
from allennlp.modules.seq2seq_encoders.feedforward_encoder import FeedForwardEncoder
from allennlp.nn import Activation
class TestFeedforwardEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
feedforward = FeedForward(
input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")()
)
encoder = FeedForwardEncoder(feedforward)
assert encoder.get_input_dim() == feedforward.get_input_dim()
assert encoder.get_output_dim() == feedforward.get_output_dim()
def test_feedforward_encoder_exactly_match_feedforward_each_item(self):
feedforward = FeedForward(
input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")()
)
encoder = FeedForwardEncoder(feedforward)
tensor = torch.randn([2, 3, 10])
output = encoder(tensor)
target = feedforward(tensor)
numpy.testing.assert_array_almost_equal(
target.detach().cpu().numpy(), output.detach().cpu().numpy()
)
# mask should work
mask = torch.tensor([[True, True, True], [True, False, False]])
output = encoder(tensor, mask)
target = feedforward(tensor) * mask.unsqueeze(dim=-1).float()
numpy.testing.assert_array_almost_equal(
target.detach().cpu().numpy(), output.detach().cpu().numpy()
)
| allennlp-master | tests/modules/seq2seq_encoders/feedforward_encoder_test.py |
allennlp-master | tests/modules/seq2seq_encoders/__init__.py |
|
from typing import Optional
import torch
import pytest
from allennlp.modules.seq2seq_encoders import PytorchTransformer
@pytest.mark.parametrize("positional_encoding", [None, "sinusoidal", "embedding"])
def test_positional_embeddings(positional_encoding: Optional[str]):
# All sizes are prime, making them easy to find during debugging.
batch_size = 7
max_seq_len = 101
n_head = 5
dims = 11 * n_head
transformer = PytorchTransformer(
dims, 3, positional_encoding=positional_encoding, num_attention_heads=n_head
)
transformer.eval()
with torch.no_grad():
inputs = torch.randn(batch_size, max_seq_len, dims)
mask = torch.ones(batch_size, max_seq_len, dtype=torch.bool)
for b in range(batch_size):
mask[b, max_seq_len - b :] = False
assert not torch.isnan(inputs).any()
assert torch.isfinite(inputs).all()
outputs = transformer(inputs, mask)
assert outputs.size() == inputs.size()
assert not torch.isnan(outputs).any()
assert torch.isfinite(outputs).all()
@pytest.mark.parametrize("positional_encoding", [None, "sinusoidal", "embedding"])
def test_mask_works(positional_encoding: Optional[str]):
# All sizes are prime, making them easy to find during debugging.
batch_size = 3
max_seq_len = 11
n_head = 2
dims = 7 * n_head
transformer = PytorchTransformer(
dims, 2, positional_encoding=positional_encoding, num_attention_heads=n_head
)
transformer.eval()
with torch.no_grad():
# Construct inputs and masks
inputs = torch.randn(batch_size, max_seq_len, dims)
all_ones_mask = torch.ones(batch_size, max_seq_len, dtype=torch.bool)
mask = all_ones_mask.clone()
for b in range(batch_size):
mask[b, max_seq_len - b :] = False
altered_inputs = inputs + (~mask).unsqueeze(2) * 10.0
# Make sure there is a difference without the mask
assert not torch.allclose(
transformer(inputs, all_ones_mask), transformer(altered_inputs, all_ones_mask)
)
# Make sure there is no difference with the mask
assert torch.allclose(
torch.masked_select(transformer(inputs, mask), mask.unsqueeze(2)),
torch.masked_select(transformer(altered_inputs, mask), mask.unsqueeze(2)),
)
@pytest.mark.parametrize("positional_encoding", [None, "sinusoidal", "embedding"])
def test_positional_encodings(positional_encoding: Optional[str]):
# All sizes are prime, making them easy to find during debugging.
batch_size = 3
max_seq_len = 11
n_head = 2
dims = 7 * n_head
transformer = PytorchTransformer(
dims, 2, positional_encoding=positional_encoding, num_attention_heads=n_head
)
transformer.eval()
with torch.no_grad():
# We test this by running it twice, once with a shuffled sequence. The results should be the same if there
# is no positional encoding, and different otherwise.
inputs = torch.randn(batch_size, max_seq_len, dims)
mask = torch.ones(batch_size, max_seq_len, dtype=torch.bool)
for b in range(batch_size):
mask[b, max_seq_len - b :] = False
unshuffled_output = transformer(inputs, mask)
shuffle = torch.arange(0, max_seq_len).unsqueeze(0).expand_as(mask).clone()
for b in range(batch_size):
# Take care not to shuffle the masked values
perm = torch.randperm(max_seq_len - b)
shuffle[b, : max_seq_len - b] = shuffle[b, perm]
shuffle = shuffle.unsqueeze(2).expand_as(inputs)
shuffled_input = torch.gather(inputs, 1, shuffle)
shuffled_output = transformer(shuffled_input, mask)
if positional_encoding is None:
assert torch.allclose(
torch.gather(unshuffled_output, 1, shuffle), shuffled_output, atol=2e-7
)
else:
assert not torch.allclose(
torch.gather(unshuffled_output, 1, shuffle), shuffled_output, atol=2e-7
)
| allennlp-master | tests/modules/seq2seq_encoders/pytorch_transformer_wrapper_test.py |
import torch
import numpy
from overrides import overrides
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders import ComposeEncoder, FeedForwardEncoder, Seq2SeqEncoder
from allennlp.modules import FeedForward
class MockSeq2SeqEncoder(Seq2SeqEncoder):
def __init__(self, input_dim: int, output_dim: int, bidirectional: bool = False):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.bidirectional = bidirectional
@overrides
def forward(self, inputs, mask):
pass
@overrides
def get_input_dim(self) -> int:
return self.input_dim
@overrides
def get_output_dim(self) -> int:
return self.output_dim
@overrides
def is_bidirectional(self) -> bool:
return self.bidirectional
def _make_feedforward(input_dim, output_dim):
return FeedForwardEncoder(
FeedForward(
input_dim=input_dim, num_layers=1, activations=torch.nn.ReLU(), hidden_dims=output_dim
)
)
class TestPassThroughEncoder(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.encoder = ComposeEncoder(
[_make_feedforward(9, 5), _make_feedforward(5, 10), _make_feedforward(10, 3)]
)
def test_get_dimension_is_correct(self):
assert self.encoder.get_input_dim() == 9
assert self.encoder.get_output_dim() == 3
def test_composes(self):
tensor = torch.zeros(2, 10, 9)
output = self.encoder(tensor)
for encoder in self.encoder.encoders:
tensor = encoder(tensor)
numpy.testing.assert_array_almost_equal(
output.detach().cpu().numpy(), tensor.detach().cpu().numpy()
)
def test_pass_through_encoder_with_mask(self):
tensor = torch.randn([2, 3, 9])
mask = torch.tensor([[True, True, True], [True, False, False]])
output = self.encoder(tensor, mask)
for encoder in self.encoder.encoders:
tensor = encoder(tensor, mask)
numpy.testing.assert_array_almost_equal(
output.detach().cpu().numpy(), tensor.detach().cpu().numpy()
)
def test_empty(self):
with pytest.raises(ValueError):
ComposeEncoder([])
def test_mismatched_size(self):
with pytest.raises(ValueError):
ComposeEncoder(
[
MockSeq2SeqEncoder(input_dim=9, output_dim=5),
MockSeq2SeqEncoder(input_dim=1, output_dim=2),
]
)
def test_mismatched_bidirectionality(self):
with pytest.raises(ValueError):
ComposeEncoder(
[
MockSeq2SeqEncoder(input_dim=9, output_dim=5),
MockSeq2SeqEncoder(input_dim=5, output_dim=2, bidirectional=True),
]
)
def test_all_bidirectional(self):
ComposeEncoder(
[
MockSeq2SeqEncoder(input_dim=9, output_dim=5, bidirectional=True),
MockSeq2SeqEncoder(input_dim=5, output_dim=2, bidirectional=True),
]
)
| allennlp-master | tests/modules/seq2seq_encoders/compose_encoder_test.py |
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.attention.attention import Attention
from allennlp.modules.attention.dot_product_attention import DotProductAttention
class TestDotProductAttention(AllenNlpTestCase):
def test_can_init_dot(self):
legacy_attention = Attention.from_params(Params({"type": "dot_product"}))
isinstance(legacy_attention, DotProductAttention)
def test_dot_product_similarity(self):
linear = DotProductAttention(normalize=False)
output = linear(
torch.FloatTensor([[0, 0, 0], [1, 1, 1]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
assert_almost_equal(output.numpy(), numpy.array([[0.0, 0.0], [24.0, 33.0]]), decimal=2)
| allennlp-master | tests/modules/attention/dot_product_attention_test.py |
allennlp-master | tests/modules/attention/__init__.py |
|
from numpy.testing import assert_almost_equal
import torch
from torch.nn.parameter import Parameter
from allennlp.common import Params
from allennlp.modules.attention import BilinearAttention
from allennlp.common.testing import AllenNlpTestCase
class TestBilinearAttention(AllenNlpTestCase):
def test_forward_does_a_bilinear_product(self):
params = Params({"vector_dim": 2, "matrix_dim": 2, "normalize": False})
bilinear = BilinearAttention.from_params(params)
bilinear._weight_matrix = Parameter(torch.FloatTensor([[-0.3, 0.5], [2.0, -1.0]]))
bilinear._bias = Parameter(torch.FloatTensor([0.1]))
a_vectors = torch.FloatTensor([[1, 1]])
b_vectors = torch.FloatTensor([[[1, 0], [0, 1]]])
result = bilinear(a_vectors, b_vectors).detach().numpy()
assert result.shape == (1, 2)
assert_almost_equal(result, [[1.8, -0.4]])
| allennlp-master | tests/modules/attention/bilinear_attention_test.py |
from numpy.testing import assert_almost_equal
import numpy
import torch
from torch.autograd import Variable
from torch.nn import Parameter
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.attention import LinearAttention
from allennlp.modules.attention.attention import Attention
class LinearAttentionTest(AllenNlpTestCase):
def test_can_init_linear(self):
legacy_attention = Attention.from_params(
Params({"type": "linear", "tensor_1_dim": 3, "tensor_2_dim": 3})
)
isinstance(legacy_attention, LinearAttention)
def test_linear_similarity(self):
linear = LinearAttention(3, 3, normalize=True)
linear._weight_vector = Parameter(torch.FloatTensor([-0.3, 0.5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([0.1]))
output = linear(
Variable(torch.FloatTensor([[-7, -8, -9]])),
Variable(torch.FloatTensor([[[1, 2, 3], [4, 5, 6]]])),
)
assert_almost_equal(output.data.numpy(), numpy.array([[0.0474, 0.9526]]), decimal=2)
def test_bidaf_trilinear_similarity(self):
linear = LinearAttention(2, 2, combination="x,y,x*y", normalize=False)
linear._weight_vector = Parameter(torch.FloatTensor([-0.3, 0.5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([0.0]))
output = linear(
torch.FloatTensor([[4, 5]]), torch.FloatTensor([[[1, 2], [4, 5], [7, 8], [10, 11]]])
)
assert_almost_equal(
output.data.numpy(),
numpy.array(
[
[
-1.2 + 2.5 + 2 + -2 + 4 + 10,
-1.2 + 2.5 + 8 + -5 + 16 + 25,
-1.2 + 2.5 + 14 + -8 + 28 + 40,
-1.2 + 2.5 + 20 + -11 + 40 + 55,
]
]
),
decimal=2,
)
| allennlp-master | tests/modules/attention/linear_attention_test.py |
from numpy.testing import assert_almost_equal
import torch
from torch.nn.parameter import Parameter
from allennlp.common import Params
from allennlp.modules.attention import AdditiveAttention
from allennlp.common.testing import AllenNlpTestCase
class TestAdditiveAttention(AllenNlpTestCase):
def test_forward_does_an_additive_product(self):
params = Params({"vector_dim": 2, "matrix_dim": 3, "normalize": False})
additive = AdditiveAttention.from_params(params)
additive._w_matrix = Parameter(torch.Tensor([[-0.2, 0.3], [-0.5, 0.5]]))
additive._u_matrix = Parameter(torch.Tensor([[0.0, 1.0], [1.0, 1.0], [1.0, -1.0]]))
additive._v_vector = Parameter(torch.Tensor([[1.0], [-1.0]]))
vectors = torch.FloatTensor([[0.7, -0.8], [0.4, 0.9]])
matrices = torch.FloatTensor(
[
[[1.0, -1.0, 3.0], [0.5, -0.3, 0.0], [0.2, -1.0, 1.0], [0.7, 0.8, -1.0]],
[[-2.0, 3.0, -3.0], [0.6, 0.2, 2.0], [0.5, -0.4, -1.0], [0.2, 0.2, 0.0]],
]
)
result = additive(vectors, matrices).detach().numpy()
assert result.shape == (2, 4)
assert_almost_equal(
result,
[
[1.975072, -0.04997836, 1.2176098, -0.9205586],
[-1.4851665, 1.489604, -1.890285, -1.0672251],
],
)
| allennlp-master | tests/modules/attention/additive_attention_test.py |
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.attention.attention import Attention
from allennlp.modules.attention.cosine_attention import CosineAttention
class TestCosineAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = Attention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineAttention)
def test_cosine_similarity(self):
linear = CosineAttention(normalize=False)
output = linear(
torch.FloatTensor([[0, 0, 0], [1, 1, 1]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
assert_almost_equal(output.numpy(), numpy.array([[0.0, 0.0], [0.9948, 0.9973]]), decimal=2)
| allennlp-master | tests/modules/attention/cosine_attention_test.py |
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBasicTextFieldEmbedder(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
params = Params(
{
"token_embedders": {
"words1": {"type": "embedding", "embedding_dim": 2},
"words2": {"type": "embedding", "embedding_dim": 5},
"words3": {"type": "embedding", "embedding_dim": 3},
}
}
)
self.token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
self.inputs = {
"words1": {"tokens": torch.LongTensor([[0, 2, 3, 5]])},
"words2": {"tokens": torch.LongTensor([[1, 4, 3, 2]])},
"words3": {"tokens": torch.LongTensor([[1, 5, 1, 2]])},
}
def test_get_output_dim_aggregates_dimension_from_each_embedding(self):
assert self.token_embedder.get_output_dim() == 10
def test_forward_asserts_input_field_match(self):
# Total mismatch
self.inputs["words4"] = self.inputs["words3"]
del self.inputs["words3"]
with pytest.raises(ConfigurationError) as exc:
self.token_embedder(self.inputs)
assert exc.match("Mismatched token keys")
self.inputs["words3"] = self.inputs["words4"]
# Text field has too many inputs
with pytest.raises(ConfigurationError) as exc:
self.token_embedder(self.inputs)
assert exc.match("Mismatched token keys")
del self.inputs["words4"]
def test_forward_concats_resultant_embeddings(self):
assert self.token_embedder(self.inputs).size() == (1, 4, 10)
def test_forward_works_on_higher_order_input(self):
params = Params(
{
"token_embedders": {
"words": {"type": "embedding", "num_embeddings": 20, "embedding_dim": 2},
"characters": {
"type": "character_encoding",
"embedding": {"embedding_dim": 4, "num_embeddings": 15},
"encoder": {
"type": "cnn",
"embedding_dim": 4,
"num_filters": 10,
"ngram_filter_sizes": [3],
},
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"words": {"tokens": (torch.rand(3, 4, 5, 6) * 20).long()},
"characters": {"token_characters": (torch.rand(3, 4, 5, 6, 7) * 15).long()},
}
assert token_embedder(inputs, num_wrapping_dims=2).size() == (3, 4, 5, 6, 12)
def test_forward_runs_with_forward_params(self):
class FakeEmbedder(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, tokens: torch.Tensor, extra_arg: int = None):
assert tokens is not None
assert extra_arg is not None
return tokens
token_embedder = BasicTextFieldEmbedder({"elmo": FakeEmbedder()})
inputs = {"elmo": {"elmo_tokens": (torch.rand(3, 6, 5) * 2).long()}}
kwargs = {"extra_arg": 1}
token_embedder(inputs, **kwargs)
def test_forward_runs_with_non_bijective_mapping(self):
elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
options_file = str(elmo_fixtures_path / "options.json")
weight_file = str(elmo_fixtures_path / "lm_weights.hdf5")
params = Params(
{
"token_embedders": {
"words": {"type": "embedding", "num_embeddings": 20, "embedding_dim": 2},
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file,
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"words": {"tokens": (torch.rand(3, 6) * 20).long()},
"elmo": {"elmo_tokens": (torch.rand(3, 6, 50) * 15).long()},
}
token_embedder(inputs)
def test_forward_runs_with_non_bijective_mapping_with_null(self):
elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
options_file = str(elmo_fixtures_path / "options.json")
weight_file = str(elmo_fixtures_path / "lm_weights.hdf5")
params = Params(
{
"token_embedders": {
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {"elmo": {"elmo_tokens": (torch.rand(3, 6, 50) * 15).long()}}
token_embedder(inputs)
def test_forward_runs_with_non_bijective_mapping_with_dict(self):
elmo_fixtures_path = self.FIXTURES_ROOT / "elmo"
options_file = str(elmo_fixtures_path / "options.json")
weight_file = str(elmo_fixtures_path / "lm_weights.hdf5")
params = Params(
{
"token_embedders": {
"words": {"type": "embedding", "num_embeddings": 20, "embedding_dim": 2},
"elmo": {
"type": "elmo_token_embedder",
"options_file": options_file,
"weight_file": weight_file,
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"words": {"tokens": (torch.rand(3, 6) * 20).long()},
"elmo": {"elmo_tokens": (torch.rand(3, 6, 50) * 15).long()},
}
token_embedder(inputs)
def test_forward_runs_with_bijective_and_non_bijective_mapping(self):
params = Params(
{
"token_embedders": {
"bert": {"type": "pretrained_transformer", "model_name": "bert-base-uncased"},
"token_characters": {
"type": "character_encoding",
"embedding": {"embedding_dim": 5},
"encoder": {
"type": "cnn",
"embedding_dim": 5,
"num_filters": 5,
"ngram_filter_sizes": [5],
},
},
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=self.vocab, params=params)
inputs = {
"bert": {
"token_ids": (torch.rand(3, 5) * 10).long(),
"mask": (torch.rand(3, 5) * 1).bool(),
},
"token_characters": {"token_characters": (torch.rand(3, 5, 5) * 1).long()},
}
token_embedder(inputs)
| allennlp-master | tests/modules/text_field_embedders/basic_text_field_embedder_test.py |
allennlp-master | tests/modules/text_field_embedders/__init__.py |
|
import numpy
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.modules.seq2vec_encoders import CnnEncoder
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.common.testing import AllenNlpTestCase
class TestCnnEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
encoder = CnnEncoder(embedding_dim=5, num_filters=4, ngram_filter_sizes=(3, 5))
assert encoder.get_output_dim() == 8
assert encoder.get_input_dim() == 5
encoder = CnnEncoder(
embedding_dim=5, num_filters=4, ngram_filter_sizes=(3, 5), output_dim=7
)
assert encoder.get_output_dim() == 7
assert encoder.get_input_dim() == 5
def test_can_construct_from_params(self):
params = Params({"embedding_dim": 5, "num_filters": 4, "ngram_filter_sizes": [3, 5]})
encoder = CnnEncoder.from_params(params)
assert encoder.get_output_dim() == 8
params = Params(
{"embedding_dim": 5, "num_filters": 4, "ngram_filter_sizes": [3, 5], "output_dim": 7}
)
encoder = CnnEncoder.from_params(params)
assert encoder.get_output_dim() == 7
def test_forward_does_correct_computation(self):
encoder = CnnEncoder(embedding_dim=2, num_filters=1, ngram_filter_sizes=(1, 2))
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(encoder)
input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
encoder_output = encoder(input_tensor, None)
assert_almost_equal(
encoder_output.data.numpy(), numpy.asarray([[1.6 + 1.0, 3.1 + 1.0]]), decimal=6
)
def test_forward_runs_with_larger_input(self):
encoder = CnnEncoder(
embedding_dim=7, num_filters=13, ngram_filter_sizes=(1, 2, 3, 4, 5), output_dim=30
)
tensor = torch.rand(4, 8, 7)
assert encoder(tensor, None).size() == (4, 30)
def test_forward_respects_masking(self):
# seed 1 fails on the old cnn encoder code
torch.manual_seed(1)
encoder = CnnEncoder(embedding_dim=7, num_filters=13, ngram_filter_sizes=(1, 2, 3, 4, 5))
init = Initializer.from_params(Params({"type": "normal", "mean": 0.0, "std": 10}))
initializer = InitializerApplicator([(".*", init)])
initializer(encoder)
tokens = torch.ones(4, 8, 7)
padded_tokens = torch.nn.functional.pad(tokens.transpose(1, 2), (0, 2), value=5).transpose(
1, 2
)
mask = (
torch.where(
padded_tokens == 5, torch.zeros_like(padded_tokens), torch.ones_like(padded_tokens)
)
.bool()
.any(dim=2)
)
regular_output = encoder.forward(tokens=tokens, mask=None)
masked_output = encoder.forward(tokens=padded_tokens, mask=mask)
assert_almost_equal(regular_output.data.numpy(), masked_output.data.numpy(), decimal=6)
| allennlp-master | tests/modules/seq2vec_encoders/cnn_encoder_test.py |
import pytest
from numpy.testing import assert_almost_equal
import torch
from torch.nn import LSTM
from torch.nn.utils.rnn import pack_padded_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
class TestPytorchSeq2VecWrapper(AllenNlpTestCase):
def test_get_dimensions_is_correct(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
assert encoder.get_output_dim() == 14
assert encoder.get_input_dim() == 2
lstm = LSTM(
bidirectional=False, num_layers=3, input_size=2, hidden_size=7, batch_first=True
)
encoder = PytorchSeq2VecWrapper(lstm)
assert encoder.get_output_dim() == 7
assert encoder.get_input_dim() == 2
def test_forward_pulls_out_correct_tensor_without_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
lstm_output = lstm(input_tensor)
encoder_output = encoder(input_tensor, None)
assert_almost_equal(encoder_output.data.numpy(), lstm_output[0].data.numpy()[:, -1, :])
def test_forward_pulls_out_correct_tensor_with_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[1, 6:, :] = 0
input_tensor[2, 4:, :] = 0
input_tensor[3, 2:, :] = 0
input_tensor[4, 1:, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, 4:] = False
mask[3, 2:] = False
mask[4, 1:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
packed_sequence = pack_padded_sequence(
input_tensor, sequence_lengths.tolist(), batch_first=True
)
_, state = lstm(packed_sequence)
# Transpose output state, extract the last forward and backward states and
# reshape to be of dimension (batch_size, 2 * hidden_size).
reshaped_state = state[0].transpose(0, 1)[:, -2:, :].contiguous()
explicitly_concatenated_state = torch.cat(
[reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1
)
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())
def test_forward_works_even_with_empty_sequences(self):
lstm = LSTM(
bidirectional=True, num_layers=3, input_size=3, hidden_size=11, batch_first=True
)
encoder = PytorchSeq2VecWrapper(lstm)
tensor = torch.rand([5, 7, 3])
tensor[1, 6:, :] = 0
tensor[2, :, :] = 0
tensor[3, 2:, :] = 0
tensor[4, :, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, :] = False
mask[3, 2:] = False
mask[4, :] = False
results = encoder(tensor, mask)
for i in (0, 1, 3):
assert not (results[i] == 0.0).data.all()
for i in (2, 4):
assert (results[i] == 0.0).data.all()
def test_forward_pulls_out_correct_tensor_with_unsorted_batches(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[0, 3:, :] = 0
input_tensor[1, 4:, :] = 0
input_tensor[2, 2:, :] = 0
input_tensor[3, 6:, :] = 0
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 2:] = False
mask[3, 6:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, _ = sort_batch_by_length(
input_tensor, sequence_lengths
)
packed_sequence = pack_padded_sequence(
sorted_inputs, sorted_sequence_lengths.tolist(), batch_first=True
)
_, state = lstm(packed_sequence)
# Transpose output state, extract the last forward and backward states and
# reshape to be of dimension (batch_size, 2 * hidden_size).
sorted_transposed_state = state[0].transpose(0, 1).index_select(0, restoration_indices)
reshaped_state = sorted_transposed_state[:, -2:, :].contiguous()
explicitly_concatenated_state = torch.cat(
[reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1
)
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())
def test_wrapper_raises_if_batch_first_is_false(self):
with pytest.raises(ConfigurationError):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7)
_ = PytorchSeq2VecWrapper(lstm)
def test_wrapper_works_with_alternating_lstm(self):
model = PytorchSeq2VecWrapper(
StackedAlternatingLstm(input_size=4, hidden_size=5, num_layers=3)
)
input_tensor = torch.randn(2, 3, 4)
mask = torch.ones(2, 3).bool()
output = model(input_tensor, mask)
assert tuple(output.size()) == (2, 5)
| allennlp-master | tests/modules/seq2vec_encoders/pytorch_seq2vec_wrapper_test.py |
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders.cls_pooler import ClsPooler
class TestClsPooler(AllenNlpTestCase):
def test_encoder(self):
embedding = torch.rand(5, 50, 7)
encoder = ClsPooler(embedding_dim=7)
pooled = encoder(embedding, mask=None)
assert list(pooled.size()) == [5, 7]
numpy.testing.assert_array_almost_equal(embedding[:, 0], pooled)
def test_cls_at_end(self):
embedding = torch.arange(20).reshape(5, 4).unsqueeze(-1).expand(5, 4, 7)
mask = torch.tensor(
[
[True, True, True, True],
[True, True, True, False],
[True, True, True, True],
[True, False, False, False],
[True, True, False, False],
]
)
expected = torch.LongTensor([3, 6, 11, 12, 17]).unsqueeze(-1).expand(5, 7)
encoder = ClsPooler(embedding_dim=7, cls_is_last_token=True)
pooled = encoder(embedding, mask=mask)
assert list(pooled.size()) == [5, 7]
numpy.testing.assert_array_almost_equal(expected, pooled)
| allennlp-master | tests/modules/seq2vec_encoders/cls_pooler_test.py |
allennlp-master | tests/modules/seq2vec_encoders/__init__.py |
|
import numpy
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfEmbeddingsEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=5)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 5
encoder = BagOfEmbeddingsEncoder(embedding_dim=12)
assert encoder.get_input_dim() == 12
assert encoder.get_output_dim() == 12
def test_can_construct_from_params(self):
params = Params({"embedding_dim": 5})
encoder = BagOfEmbeddingsEncoder.from_params(params)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 5
params = Params({"embedding_dim": 12, "averaged": True})
encoder = BagOfEmbeddingsEncoder.from_params(params)
assert encoder.get_input_dim() == 12
assert encoder.get_output_dim() == 12
def test_forward_does_correct_computation(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2)
input_tensor = torch.FloatTensor(
[[[0.7, 0.8], [0.1, 1.5], [0.3, 0.6]], [[0.5, 0.3], [1.4, 1.1], [0.3, 0.9]]]
)
mask = torch.ByteTensor([[1, 1, 1], [1, 1, 0]])
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(
encoder_output.data.numpy(),
numpy.asarray([[0.7 + 0.1 + 0.3, 0.8 + 1.5 + 0.6], [0.5 + 1.4, 0.3 + 1.1]]),
)
def test_forward_does_correct_computation_with_average(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2, averaged=True)
input_tensor = torch.FloatTensor(
[
[[0.7, 0.8], [0.1, 1.5], [0.3, 0.6]],
[[0.5, 0.3], [1.4, 1.1], [0.3, 0.9]],
[[0.4, 0.3], [0.4, 0.3], [1.4, 1.7]],
]
)
mask = torch.ByteTensor([[1, 1, 1], [1, 1, 0], [0, 0, 0]])
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(
encoder_output.data.numpy(),
numpy.asarray(
[
[(0.7 + 0.1 + 0.3) / 3, (0.8 + 1.5 + 0.6) / 3],
[(0.5 + 1.4) / 2, (0.3 + 1.1) / 2],
[0.0, 0.0],
]
),
)
def test_forward_does_correct_computation_with_average_no_mask(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2, averaged=True)
input_tensor = torch.FloatTensor(
[[[0.7, 0.8], [0.1, 1.5], [0.3, 0.6]], [[0.5, 0.3], [1.4, 1.1], [0.3, 0.9]]]
)
encoder_output = encoder(input_tensor)
assert_almost_equal(
encoder_output.data.numpy(),
numpy.asarray(
[
[(0.7 + 0.1 + 0.3) / 3, (0.8 + 1.5 + 0.6) / 3],
[(0.5 + 1.4 + 0.3) / 3, (0.3 + 1.1 + 0.9) / 3],
]
),
)
| allennlp-master | tests/modules/seq2vec_encoders/boe_encoder_test.py |
import numpy as np
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders.cnn_highway_encoder import CnnHighwayEncoder
from allennlp.modules.time_distributed import TimeDistributed
class TestCnnHighwayEncoder(AllenNlpTestCase):
def run_encoder_against_random_embeddings(self, do_layer_norm):
encoder = CnnHighwayEncoder(
activation="relu",
embedding_dim=4,
filters=[[1, 4], [2, 8], [3, 16], [4, 32], [5, 64]],
num_highway=2,
projection_dim=16,
projection_location="after_cnn",
do_layer_norm=do_layer_norm,
)
encoder = TimeDistributed(encoder)
embedding = torch.from_numpy(np.random.randn(5, 6, 50, 4)).float()
mask = torch.ones(5, 6, 50).bool()
token_embedding = encoder(embedding, mask)
assert list(token_embedding.size()) == [5, 6, 16]
def test_cnn_highway_encoder(self):
self.run_encoder_against_random_embeddings(do_layer_norm=False)
def test_cnn_highway_encoder_with_layer_norm(self):
self.run_encoder_against_random_embeddings(do_layer_norm=True)
| allennlp-master | tests/modules/seq2vec_encoders/cnn_highway_encoder_test.py |
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders import BertPooler
class TestBertPooler(AllenNlpTestCase):
def test_encoder(self):
encoder = BertPooler("bert-base-uncased")
assert encoder.get_input_dim() == encoder.get_output_dim()
embedding = torch.rand(8, 24, encoder.get_input_dim())
pooled1 = encoder(embedding)
assert pooled1.size() == (8, encoder.get_input_dim())
embedding[:, 1:, :] = 0
pooled2 = encoder(embedding)
numpy.testing.assert_array_almost_equal(pooled1.detach().numpy(), pooled2.detach().numpy())
| allennlp-master | tests/modules/seq2vec_encoders/bert_pooler_test.py |
import numpy
import torch
from allennlp.modules.span_extractors import SpanExtractor, SelfAttentiveSpanExtractor
from allennlp.common.params import Params
class TestSelfAttentiveSpanExtractor:
def test_locally_normalised_span_extractor_can_build_from_params(self):
params = Params({"type": "self_attentive", "input_dim": 5})
extractor = SpanExtractor.from_params(params)
assert isinstance(extractor, SelfAttentiveSpanExtractor)
def test_attention_is_normalised_correctly(self):
input_dim = 7
sequence_tensor = torch.randn([2, 5, input_dim])
extractor = SelfAttentiveSpanExtractor(input_dim=input_dim)
assert extractor.get_output_dim() == input_dim
assert extractor.get_input_dim() == input_dim
# In order to test the attention, we'll make the weight which computes the logits
# zero, so the attention distribution is uniform over the sentence. This lets
# us check that the computed spans are just the averages of their representations.
extractor._global_attention._module.weight.data.fill_(0.0)
extractor._global_attention._module.bias.data.fill_(0.0)
indices = torch.LongTensor(
[[[1, 3], [2, 4]], [[0, 2], [3, 4]]]
) # smaller span tests masking.
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 2, input_dim]
# First element in the batch.
batch_element = 0
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 1:4, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 2:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now the second element in the batch.
batch_element = 1
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 0:3, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 3:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now test the case in which we have some masked spans in our indices.
indices_mask = torch.tensor([[True, True], [True, False]])
span_representations = extractor(sequence_tensor, indices, span_indices_mask=indices_mask)
# First element in the batch.
batch_element = 0
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 1:4, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 2:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now the second element in the batch.
batch_element = 1
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 0:3, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span was masked, so should be completely zero.
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), numpy.zeros([input_dim]))
| allennlp-master | tests/modules/span_extractors/self_attentive_span_extractor_test.py |
allennlp-master | tests/modules/span_extractors/__init__.py |
|
import numpy
import torch
from allennlp.modules.span_extractors import SpanExtractor, EndpointSpanExtractor
from allennlp.common.params import Params
from allennlp.nn.util import batched_index_select
class TestEndpointSpanExtractor:
def test_endpoint_span_extractor_can_build_from_params(self):
params = Params(
{
"type": "endpoint",
"input_dim": 7,
"num_width_embeddings": 5,
"span_width_embedding_dim": 3,
}
)
extractor = SpanExtractor.from_params(params)
assert isinstance(extractor, EndpointSpanExtractor)
assert extractor.get_output_dim() == 17 # 2 * input_dim + span_width_embedding_dim
def test_correct_sequence_elements_are_embedded(self):
sequence_tensor = torch.randn([2, 5, 7])
# Concatentate start and end points together to form our representation.
extractor = EndpointSpanExtractor(7, "x,y")
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [3, 4]]])
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 2, 14]
assert extractor.get_output_dim() == 14
assert extractor.get_input_dim() == 7
start_indices, end_indices = indices.split(1, -1)
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
start_embeddings, end_embeddings = span_representations.split(7, -1)
correct_start_embeddings = batched_index_select(sequence_tensor, start_indices.squeeze())
correct_end_embeddings = batched_index_select(sequence_tensor, end_indices.squeeze())
numpy.testing.assert_array_equal(
start_embeddings.data.numpy(), correct_start_embeddings.data.numpy()
)
numpy.testing.assert_array_equal(
end_embeddings.data.numpy(), correct_end_embeddings.data.numpy()
)
def test_masked_indices_are_handled_correctly(self):
sequence_tensor = torch.randn([2, 5, 7])
# concatentate start and end points together to form our representation.
extractor = EndpointSpanExtractor(7, "x,y")
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [3, 4]]])
span_representations = extractor(sequence_tensor, indices)
# Make a mask with the second batch element completely masked.
indices_mask = torch.tensor([[True, True], [False, False]])
span_representations = extractor(sequence_tensor, indices, span_indices_mask=indices_mask)
start_embeddings, end_embeddings = span_representations.split(7, -1)
start_indices, end_indices = indices.split(1, -1)
correct_start_embeddings = batched_index_select(
sequence_tensor, start_indices.squeeze()
).data
# Completely masked second batch element, so it should all be zero.
correct_start_embeddings[1, :, :].fill_(0)
correct_end_embeddings = batched_index_select(sequence_tensor, end_indices.squeeze()).data
correct_end_embeddings[1, :, :].fill_(0)
numpy.testing.assert_array_equal(
start_embeddings.data.numpy(), correct_start_embeddings.numpy()
)
numpy.testing.assert_array_equal(
end_embeddings.data.numpy(), correct_end_embeddings.numpy()
)
def test_masked_indices_are_handled_correctly_with_exclusive_indices(self):
sequence_tensor = torch.randn([2, 5, 8])
# concatentate start and end points together to form our representation
# for both the forward and backward directions.
extractor = EndpointSpanExtractor(8, "x,y", use_exclusive_start_indices=True)
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [0, 1]]])
sequence_mask = torch.tensor(
[[True, True, True, True, True], [True, True, True, False, False]]
)
span_representations = extractor(sequence_tensor, indices, sequence_mask=sequence_mask)
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
start_embeddings, end_embeddings = span_representations.split(8, -1)
correct_start_indices = torch.LongTensor([[0, 1], [-1, -1]])
# These indices should be -1, so they'll be replaced with a sentinel. Here,
# we'll set them to a value other than -1 so we can index select the indices and
# replace them later.
correct_start_indices[1, 0] = 1
correct_start_indices[1, 1] = 1
correct_end_indices = torch.LongTensor([[3, 4], [2, 1]])
correct_start_embeddings = batched_index_select(
sequence_tensor.contiguous(), correct_start_indices
)
# This element had sequence_tensor index of 0, so it's exclusive index is the start sentinel.
correct_start_embeddings[1, 0] = extractor._start_sentinel.data
correct_start_embeddings[1, 1] = extractor._start_sentinel.data
numpy.testing.assert_array_equal(
start_embeddings.data.numpy(), correct_start_embeddings.data.numpy()
)
correct_end_embeddings = batched_index_select(
sequence_tensor.contiguous(), correct_end_indices
)
numpy.testing.assert_array_equal(
end_embeddings.data.numpy(), correct_end_embeddings.data.numpy()
)
| allennlp-master | tests/modules/span_extractors/endpoint_span_extractor_test.py |
import numpy
import pytest
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.modules.span_extractors import BidirectionalEndpointSpanExtractor, SpanExtractor
from allennlp.nn.util import batched_index_select
class TestBidirectonalEndpointSpanExtractor:
def test_bidirectional_endpoint_span_extractor_can_build_from_params(self):
params = Params(
{
"type": "bidirectional_endpoint",
"input_dim": 4,
"num_width_embeddings": 5,
"span_width_embedding_dim": 3,
}
)
extractor = SpanExtractor.from_params(params)
assert isinstance(extractor, BidirectionalEndpointSpanExtractor)
assert extractor.get_output_dim() == 2 + 2 + 3
def test_raises_on_odd_input_dimension(self):
with pytest.raises(ConfigurationError):
_ = BidirectionalEndpointSpanExtractor(7)
def test_correct_sequence_elements_are_embedded(self):
sequence_tensor = torch.randn([2, 5, 8])
# concatentate start and end points together to form our representation
# for both the forward and backward directions.
extractor = BidirectionalEndpointSpanExtractor(
input_dim=8, forward_combination="x,y", backward_combination="x,y"
)
indices = torch.LongTensor([[[1, 3], [2, 4]], [[0, 2], [3, 4]]])
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 2, 16]
assert extractor.get_output_dim() == 16
assert extractor.get_input_dim() == 8
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
(
forward_start_embeddings,
forward_end_embeddings,
backward_start_embeddings,
backward_end_embeddings,
) = span_representations.split(4, -1)
forward_sequence_tensor, backward_sequence_tensor = sequence_tensor.split(4, -1)
# Forward direction => subtract 1 from start indices to make them exlusive.
correct_forward_start_indices = torch.LongTensor([[0, 1], [-1, 2]])
# This index should be -1, so it will be replaced with a sentinel. Here,
# we'll set it to a value other than -1 so we can index select the indices and
# replace it later.
correct_forward_start_indices[1, 0] = 1
# Forward direction => end indices are the same.
correct_forward_end_indices = torch.LongTensor([[3, 4], [2, 4]])
# Backward direction => start indices are exclusive, so add 1 to the end indices.
correct_backward_start_indices = torch.LongTensor([[4, 5], [3, 5]])
# These exclusive end indices are outside the tensor, so will be replaced with the end sentinel.
# Here we replace them with ones so we can index select using these indices without torch
# complaining.
correct_backward_start_indices[0, 1] = 1
correct_backward_start_indices[1, 1] = 1
# Backward direction => end indices are inclusive and equal to the forward start indices.
correct_backward_end_indices = torch.LongTensor([[1, 2], [0, 3]])
correct_forward_start_embeddings = batched_index_select(
forward_sequence_tensor.contiguous(), correct_forward_start_indices
)
# This element had sequence_tensor index of 0, so it's exclusive index is the start sentinel.
correct_forward_start_embeddings[1, 0] = extractor._start_sentinel.data
numpy.testing.assert_array_equal(
forward_start_embeddings.data.numpy(), correct_forward_start_embeddings.data.numpy()
)
correct_forward_end_embeddings = batched_index_select(
forward_sequence_tensor.contiguous(), correct_forward_end_indices
)
numpy.testing.assert_array_equal(
forward_end_embeddings.data.numpy(), correct_forward_end_embeddings.data.numpy()
)
correct_backward_end_embeddings = batched_index_select(
backward_sequence_tensor.contiguous(), correct_backward_end_indices
)
numpy.testing.assert_array_equal(
backward_end_embeddings.data.numpy(), correct_backward_end_embeddings.data.numpy()
)
correct_backward_start_embeddings = batched_index_select(
backward_sequence_tensor.contiguous(), correct_backward_start_indices
)
# This element had sequence_tensor index == sequence_tensor.size(1),
# so it's exclusive index is the end sentinel.
correct_backward_start_embeddings[0, 1] = extractor._end_sentinel.data
correct_backward_start_embeddings[1, 1] = extractor._end_sentinel.data
numpy.testing.assert_array_equal(
backward_start_embeddings.data.numpy(), correct_backward_start_embeddings.data.numpy()
)
def test_correct_sequence_elements_are_embedded_with_a_masked_sequence(self):
sequence_tensor = torch.randn([2, 5, 8])
# concatentate start and end points together to form our representation
# for both the forward and backward directions.
extractor = BidirectionalEndpointSpanExtractor(
input_dim=8, forward_combination="x,y", backward_combination="x,y"
)
indices = torch.LongTensor(
[
[[1, 3], [2, 4]],
# This span has an end index at the
# end of the padded sequence.
[[0, 2], [0, 1]],
]
)
sequence_mask = torch.tensor(
[[True, True, True, True, True], [True, True, True, False, False]]
)
span_representations = extractor(sequence_tensor, indices, sequence_mask=sequence_mask)
# We just concatenated the start and end embeddings together, so
# we can check they match the original indices if we split them apart.
(
forward_start_embeddings,
forward_end_embeddings,
backward_start_embeddings,
backward_end_embeddings,
) = span_representations.split(4, -1)
forward_sequence_tensor, backward_sequence_tensor = sequence_tensor.split(4, -1)
# Forward direction => subtract 1 from start indices to make them exlusive.
correct_forward_start_indices = torch.LongTensor([[0, 1], [-1, -1]])
# These indices should be -1, so they'll be replaced with a sentinel. Here,
# we'll set them to a value other than -1 so we can index select the indices and
# replace them later.
correct_forward_start_indices[1, 0] = 1
correct_forward_start_indices[1, 1] = 1
# Forward direction => end indices are the same.
correct_forward_end_indices = torch.LongTensor([[3, 4], [2, 1]])
# Backward direction => start indices are exclusive, so add 1 to the end indices.
correct_backward_start_indices = torch.LongTensor([[4, 5], [3, 2]])
# These exclusive backward start indices are outside the tensor, so will be replaced
# with the end sentinel. Here we replace them with ones so we can index select using
# these indices without torch complaining.
correct_backward_start_indices[0, 1] = 1
# Backward direction => end indices are inclusive and equal to the forward start indices.
correct_backward_end_indices = torch.LongTensor([[1, 2], [0, 0]])
correct_forward_start_embeddings = batched_index_select(
forward_sequence_tensor.contiguous(), correct_forward_start_indices
)
# This element had sequence_tensor index of 0, so it's exclusive index is the start sentinel.
correct_forward_start_embeddings[1, 0] = extractor._start_sentinel.data
correct_forward_start_embeddings[1, 1] = extractor._start_sentinel.data
numpy.testing.assert_array_equal(
forward_start_embeddings.data.numpy(), correct_forward_start_embeddings.data.numpy()
)
correct_forward_end_embeddings = batched_index_select(
forward_sequence_tensor.contiguous(), correct_forward_end_indices
)
numpy.testing.assert_array_equal(
forward_end_embeddings.data.numpy(), correct_forward_end_embeddings.data.numpy()
)
correct_backward_end_embeddings = batched_index_select(
backward_sequence_tensor.contiguous(), correct_backward_end_indices
)
numpy.testing.assert_array_equal(
backward_end_embeddings.data.numpy(), correct_backward_end_embeddings.data.numpy()
)
correct_backward_start_embeddings = batched_index_select(
backward_sequence_tensor.contiguous(), correct_backward_start_indices
)
# This element had sequence_tensor index == sequence_tensor.size(1),
# so it's exclusive index is the end sentinel.
correct_backward_start_embeddings[0, 1] = extractor._end_sentinel.data
# This element has sequence_tensor index == the masked length of the batch element,
# so it should be the end_sentinel even though it isn't greater than sequence_tensor.size(1).
correct_backward_start_embeddings[1, 0] = extractor._end_sentinel.data
numpy.testing.assert_array_equal(
backward_start_embeddings.data.numpy(), correct_backward_start_embeddings.data.numpy()
)
def test_forward_doesnt_raise_with_empty_sequence(self):
# size: (batch_size=1, sequence_length=2, emb_dim=2)
sequence_tensor = torch.FloatTensor([[[0.0, 0.0], [0.0, 0.0]]])
# size: (batch_size=1, sequence_length=2)
sequence_mask = torch.tensor([[False, False]])
# size: (batch_size=1, spans_count=1, 2)
span_indices = torch.LongTensor([[[-1, -1]]])
# size: (batch_size=1, spans_count=1)
span_indices_mask = torch.tensor([[False]])
extractor = BidirectionalEndpointSpanExtractor(
input_dim=2, forward_combination="x,y", backward_combination="x,y"
)
span_representations = extractor(
sequence_tensor,
span_indices,
sequence_mask=sequence_mask,
span_indices_mask=span_indices_mask,
)
numpy.testing.assert_array_equal(
span_representations.detach(), torch.FloatTensor([[[0.0, 0.0, 0.0, 0.0]]])
)
def test_forward_raises_with_invalid_indices(self):
sequence_tensor = torch.randn([2, 5, 8])
extractor = BidirectionalEndpointSpanExtractor(input_dim=8)
indices = torch.LongTensor([[[-1, 3], [7, 4]], [[0, 12], [0, -1]]])
with pytest.raises(ValueError):
_ = extractor(sequence_tensor, indices)
| allennlp-master | tests/modules/span_extractors/bidirectional_endpoint_span_extractor_test.py |
import numpy
from numpy.testing import assert_almost_equal
import torch
from torch.nn import Parameter
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import LinearMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestLinearMatrixAttention(AllenNlpTestCase):
def test_can_init_dot(self):
legacy_attention = MatrixAttention.from_params(
Params({"type": "linear", "tensor_1_dim": 3, "tensor_2_dim": 3})
)
isinstance(legacy_attention, LinearMatrixAttention)
def test_linear_similarity(self):
linear = LinearMatrixAttention(3, 3)
linear._weight_vector = Parameter(torch.FloatTensor([-0.3, 0.5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([0.1]))
output = linear(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
assert_almost_equal(
output.data.numpy(),
numpy.array(
[[[4.1000, 7.1000], [17.4000, 20.4000]], [[-9.8000, -6.8000], [36.6000, 39.6000]]]
),
decimal=2,
)
def test_bidaf_trilinear_similarity(self):
linear = LinearMatrixAttention(2, 2, combination="x,y,x*y")
linear._weight_vector = Parameter(torch.FloatTensor([-0.3, 0.5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([0.0]))
output = linear(
torch.FloatTensor([[[0, 0], [4, 5]], [[-7, -8], [10, 11]]]),
torch.FloatTensor([[[1, 2], [4, 5]], [[7, 8], [10, 11]]]),
)
assert_almost_equal(
output.data.numpy(),
numpy.array(
[
[
[0 + 0 + 2 + -2 + 0 + 0, 0 + 0 + 8 + -5 + 0 + 0],
[-1.2 + 2.5 + 2 + -2 + 4 + 10, -1.2 + 2.5 + 8 + -5 + 16 + 25],
],
[
[2.1 + -4 + 14 + -8 + -49 + -64, 2.1 + -4 + 20 + -11 + -70 + -88],
[-3 + 5.5 + 14 + -8 + 70 + 88, -3 + 5.5 + 20 + -11 + 100 + 121],
],
]
),
decimal=2,
)
| allennlp-master | tests/modules/matrix_attention/linear_matrix_attention_test.py |
from numpy.testing import assert_almost_equal
import torch
from torch.nn.parameter import Parameter
from allennlp.common import Params
from allennlp.modules.matrix_attention import BilinearMatrixAttention
from allennlp.common.testing import AllenNlpTestCase
class TestBilinearMatrixAttention(AllenNlpTestCase):
def test_forward_does_a_bilinear_product(self):
params = Params({"matrix_1_dim": 2, "matrix_2_dim": 2})
bilinear = BilinearMatrixAttention.from_params(params)
bilinear._weight_matrix = Parameter(torch.FloatTensor([[-0.3, 0.5], [2.0, -1.0]]))
bilinear._bias = Parameter(torch.FloatTensor([0.1]))
a_vectors = torch.FloatTensor([[[1, 1], [2, 2]]])
b_vectors = torch.FloatTensor([[[1, 0], [0, 1]]])
result = bilinear(a_vectors, b_vectors).detach().numpy()
assert result.shape == (1, 2, 2)
assert_almost_equal(result, [[[1.8, -0.4], [3.5, -0.9]]])
def test_forward_does_a_bilinear_product_when_using_biases(self):
params = Params({"matrix_1_dim": 2, "matrix_2_dim": 2, "use_input_biases": True})
bilinear = BilinearMatrixAttention.from_params(params)
bilinear._weight_matrix = Parameter(
torch.FloatTensor([[-0.3, 0.5, 1.0], [2.0, -1.0, -1.0], [1.0, 0.5, 1.0]])
)
bilinear._bias = Parameter(torch.FloatTensor([0.1]))
a_vectors = torch.FloatTensor([[[1, 1], [2, 2]]])
b_vectors = torch.FloatTensor([[[1, 0], [0, 1]]])
result = bilinear(a_vectors, b_vectors).detach().numpy()
assert result.shape == (1, 2, 2)
assert_almost_equal(result, [[[3.8, 1.1], [5.5, 0.6]]])
| allennlp-master | tests/modules/matrix_attention/bilinear_matrix_attention_test.py |
allennlp-master | tests/modules/matrix_attention/__init__.py |
|
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import DotProductMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestDotProductMatrixAttention(AllenNlpTestCase):
def test_can_init_dot(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "dot_product"}))
isinstance(legacy_attention, DotProductMatrixAttention)
def test_dot_product_similarity(self):
# example use case: a batch of size 2,
# with a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# it is comparing this with another input of the same type
output = DotProductMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# for the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# for the second batch there is
# negative correlation for the first words
# a correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [32, 77]], [[-194, -266], [266, 365]]]), decimal=2
)
| allennlp-master | tests/modules/matrix_attention/dot_product_matrix_attention_test.py |
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestCosineMatrixAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineMatrixAttention)
def test_cosine_similarity(self):
# example use case: a batch of size 2.
# With a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# It is comparing this with another input of the same type
output = CosineMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# For the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# For the second batch there is
# negative correlation for the first words
# correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]), decimal=2
)
| allennlp-master | tests/modules/matrix_attention/cosine_matrix_attention_test.py |
import numpy
import torch
from allennlp.modules.token_embedders import PassThroughTokenEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def test_pass_through_embedder(self):
embedder = PassThroughTokenEmbedder(3)
tensor = torch.randn([4, 3])
numpy.testing.assert_equal(tensor.numpy(), embedder(tensor).numpy())
assert embedder.get_output_dim() == 3
| allennlp-master | tests/modules/token_embedders/pass_through_embedder_test.py |
import pytest
import torch
from allennlp.common import Params
from allennlp.data import Token, Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerMismatchedIndexer
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerMismatchedEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestPretrainedTransformerMismatchedEmbedder(AllenNlpTestCase):
@pytest.mark.parametrize("train_parameters", [True, False])
def test_end_to_end(self, train_parameters: bool):
token_indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased")
sentence1 = ["A", ",", "AllenNLP", "sentence", "."]
sentence2 = ["AllenNLP", "is", "great"]
tokens1 = [Token(word) for word in sentence1]
tokens2 = [Token(word) for word in sentence2]
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer_mismatched",
"model_name": "bert-base-uncased",
"train_parameters": train_parameters,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
assert tokens["bert"]["offsets"].tolist() == [
[[1, 1], [2, 2], [3, 5], [6, 6], [7, 7]],
[[1, 3], [4, 4], [5, 5], [0, 0], [0, 0]],
]
# Attention mask
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, max(len(sentence1), len(sentence2)), 768)
assert not torch.isnan(bert_vectors).any()
assert bert_vectors.requires_grad == train_parameters
def test_long_sequence_splitting_end_to_end(self):
token_indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased", max_length=4)
sentence1 = ["A", ",", "AllenNLP", "sentence", "."]
sentence2 = ["AllenNLP", "is", "great"]
tokens1 = [Token(word) for word in sentence1]
tokens2 = [Token(word) for word in sentence2]
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer_mismatched",
"model_name": "bert-base-uncased",
"max_length": 4,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
assert tokens["bert"]["mask"].tolist() == [
[True, True, True, True, True],
[True, True, True, False, False],
]
assert tokens["bert"]["offsets"].tolist() == [
[[1, 1], [2, 2], [3, 5], [6, 6], [7, 7]],
[[1, 3], [4, 4], [5, 5], [0, 0], [0, 0]],
]
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, max(len(sentence1), len(sentence2)), 768)
assert not torch.isnan(bert_vectors).any()
def test_token_without_wordpieces(self):
token_indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased")
sentence1 = ["A", "", "AllenNLP", "sentence", "."]
sentence2 = ["AllenNLP", "", "great"]
tokens1 = [Token(word) for word in sentence1]
tokens2 = [Token(word) for word in sentence2]
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer_mismatched",
"model_name": "bert-base-uncased",
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
assert tokens["bert"]["offsets"].tolist() == [
[[1, 1], [-1, -1], [2, 4], [5, 5], [6, 6]],
[[1, 3], [-1, -1], [4, 4], [0, 0], [0, 0]],
]
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, max(len(sentence1), len(sentence2)), 768)
assert not torch.isnan(bert_vectors).any()
assert all(bert_vectors[0, 1] == 0)
assert all(bert_vectors[1, 1] == 0)
def test_exotic_tokens_no_nan_grads(self):
token_indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased")
sentence1 = ["A", "", "AllenNLP", "sentence", "."]
sentence2 = ["A", "\uf732\uf730\uf730\uf733", "AllenNLP", "sentence", "."]
tokens1 = [Token(word) for word in sentence1]
tokens2 = [Token(word) for word in sentence2]
vocab = Vocabulary()
token_embedder = BasicTextFieldEmbedder(
{"bert": PretrainedTransformerMismatchedEmbedder("bert-base-uncased")}
)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
bert_vectors = token_embedder(tokens)
test_loss = bert_vectors.mean()
test_loss.backward()
for name, param in token_embedder.named_parameters():
grad = param.grad
assert (grad is None) or (not torch.any(torch.isnan(grad)).item())
| allennlp-master | tests/modules/token_embedders/pretrained_transformer_mismatched_embedder_test.py |
allennlp-master | tests/modules/token_embedders/__init__.py |
|
import gzip
import warnings
import numpy
import pytest
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders.embedding import (
_read_pretrained_embeddings_file,
Embedding,
EmbeddingsTextFile,
format_embeddings_file_uri,
parse_embeddings_file_uri,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
class TestEmbedding(AllenNlpTestCase):
def test_get_embedding_layer_uses_correct_embedding_dim(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1")
vocab.add_token_to_namespace("word2")
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode("utf-8"))
embedding_weights = _read_pretrained_embeddings_file(embeddings_filename, 3, vocab)
assert tuple(embedding_weights.size()) == (4, 3) # 4 because of padding and OOV
with pytest.raises(ConfigurationError):
_read_pretrained_embeddings_file(embeddings_filename, 4, vocab)
def test_forward_works_with_projection_layer(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("the")
vocab.add_token_to_namespace("a")
params = Params(
{
"pretrained_file": str(
self.FIXTURES_ROOT / "embeddings/glove.6B.300d.sample.txt.gz"
),
"embedding_dim": 300,
"projection_dim": 20,
}
)
embedding_layer = Embedding.from_params(params, vocab=vocab)
input_tensor = torch.LongTensor([[3, 2, 1, 0]])
embedded = embedding_layer(input_tensor).data.numpy()
assert embedded.shape == (1, 4, 20)
input_tensor = torch.LongTensor([[[3, 2, 1, 0]]])
embedded = embedding_layer(input_tensor).data.numpy()
assert embedded.shape == (1, 1, 4, 20)
def test_embedding_layer_actually_initializes_word_vectors_correctly(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
vocab.add_token_to_namespace("word2")
unicode_space = "\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"
vocab.add_token_to_namespace(unicode_space)
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write(f"{unicode_space} 3.4 3.3 5.0\n".encode("utf-8"))
params = Params({"pretrained_file": embeddings_filename, "embedding_dim": 3})
embedding_layer = Embedding.from_params(params, vocab=vocab)
word_vector = embedding_layer.weight.data[vocab.get_token_index("word")]
assert numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
word_vector = embedding_layer.weight.data[vocab.get_token_index(unicode_space)]
assert numpy.allclose(word_vector.numpy(), numpy.array([3.4, 3.3, 5.0]))
word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
assert not numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
def test_get_embedding_layer_initializes_unseen_words_randomly_not_zero(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
vocab.add_token_to_namespace("word2")
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
params = Params({"pretrained_file": embeddings_filename, "embedding_dim": 3})
embedding_layer = Embedding.from_params(params, vocab=vocab)
word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
assert not numpy.allclose(word_vector.numpy(), numpy.array([0.0, 0.0, 0.0]))
def test_read_hdf5_format_file(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
vocab.add_token_to_namespace("word2")
embeddings_filename = str(self.TEST_DIR / "embeddings.hdf5")
embeddings = numpy.random.rand(vocab.get_vocab_size(), 5)
with h5py.File(embeddings_filename, "w") as fout:
_ = fout.create_dataset("embedding", embeddings.shape, dtype="float32", data=embeddings)
params = Params({"pretrained_file": embeddings_filename, "embedding_dim": 5})
embedding_layer = Embedding.from_params(params, vocab=vocab)
assert numpy.allclose(embedding_layer.weight.data.numpy(), embeddings)
def test_read_hdf5_raises_on_invalid_shape(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
embeddings_filename = str(self.TEST_DIR / "embeddings.hdf5")
embeddings = numpy.random.rand(vocab.get_vocab_size(), 10)
with h5py.File(embeddings_filename, "w") as fout:
_ = fout.create_dataset("embedding", embeddings.shape, dtype="float32", data=embeddings)
params = Params({"pretrained_file": embeddings_filename, "embedding_dim": 5})
with pytest.raises(ConfigurationError):
_ = Embedding.from_params(params, vocab=vocab)
def test_read_embedding_file_inside_archive(self):
token2vec = {
"think": torch.Tensor([0.143, 0.189, 0.555, 0.361, 0.472]),
"make": torch.Tensor([0.878, 0.651, 0.044, 0.264, 0.872]),
"difference": torch.Tensor([0.053, 0.162, 0.671, 0.110, 0.259]),
"àèìòù": torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0]),
}
vocab = Vocabulary()
for token in token2vec:
vocab.add_token_to_namespace(token)
params = Params(
{
"pretrained_file": str(self.FIXTURES_ROOT / "embeddings/multi-file-archive.zip"),
"embedding_dim": 5,
}
)
with pytest.raises(
ValueError,
match="The archive .*/embeddings/multi-file-archive.zip contains multiple files, "
"so you must select one of the files inside "
"providing a uri of the type: "
"\\(path_or_url_to_archive\\)#path_inside_archive\\.",
):
Embedding.from_params(params, vocab=vocab)
for ext in [".zip", ".tar.gz"]:
archive_path = str(self.FIXTURES_ROOT / "embeddings/multi-file-archive") + ext
file_uri = format_embeddings_file_uri(archive_path, "folder/fake_embeddings.5d.txt")
params = Params({"pretrained_file": file_uri, "embedding_dim": 5})
embeddings = Embedding.from_params(params, vocab=vocab).weight.data
for tok, vec in token2vec.items():
i = vocab.get_token_index(tok)
assert torch.equal(embeddings[i], vec), "Problem with format " + archive_path
def test_embeddings_text_file(self):
txt_path = str(self.FIXTURES_ROOT / "utf-8_sample/utf-8_sample.txt")
# This is for sure a correct way to read an utf-8 encoded text file
with open(txt_path, "rt", encoding="utf-8") as f:
correct_text = f.read()
# Check if we get the correct text on plain and compressed versions of the file
paths = [txt_path] + [txt_path + ext for ext in [".gz", ".zip"]]
for path in paths:
with EmbeddingsTextFile(path) as f:
text = f.read()
assert text == correct_text, "Test failed for file: " + path
# Check for a file contained inside an archive with multiple files
for ext in [".zip", ".tar.gz", ".tar.bz2", ".tar.lzma"]:
archive_path = str(self.FIXTURES_ROOT / "utf-8_sample/archives/utf-8") + ext
file_uri = format_embeddings_file_uri(archive_path, "folder/utf-8_sample.txt")
with EmbeddingsTextFile(file_uri) as f:
text = f.read()
assert text == correct_text, "Test failed for file: " + archive_path
# Passing a second level path when not reading an archive
with pytest.raises(ValueError):
with EmbeddingsTextFile(format_embeddings_file_uri(txt_path, "a/fake/path")):
pass
def test_embeddings_text_file_num_tokens(self):
test_filename = str(self.TEST_DIR / "temp_embeddings.vec")
def check_num_tokens(first_line, expected_num_tokens):
with open(test_filename, "w") as f:
f.write(first_line)
with EmbeddingsTextFile(test_filename) as f:
assert (
f.num_tokens == expected_num_tokens
), f"Wrong num tokens for line: {first_line}"
valid_header_lines = ["1000000 300", "300 1000000", "1000000"]
for line in valid_header_lines:
check_num_tokens(line, expected_num_tokens=1_000_000)
not_header_lines = ["hello 1", "hello 1 2", "111 222 333", "111 222 hello"]
for line in not_header_lines:
check_num_tokens(line, expected_num_tokens=None)
def test_decode_embeddings_file_uri(self):
first_level_paths = [
"path/to/embeddings.gz",
"unicode/path/òàè+ù.vec",
"http://www.embeddings.com/path/to/embeddings.gz",
"http://www.embeddings.com/àèìòù?query=blabla.zip",
]
second_level_paths = ["path/to/glove.27B.300d.vec", "òàè+ù.vec", "crawl-300d-2M.vec"]
for simple_path in first_level_paths:
assert parse_embeddings_file_uri(simple_path) == (simple_path, None)
for path1, path2 in zip(first_level_paths, second_level_paths):
uri = format_embeddings_file_uri(path1, path2)
decoded = parse_embeddings_file_uri(uri)
assert decoded == (path1, path2)
def test_embedding_vocab_extension_with_specified_namespace(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1", "tokens_a")
vocab.add_token_to_namespace("word2", "tokens_a")
embedding_params = Params({"vocab_namespace": "tokens_a", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
original_weight = embedder.weight
assert original_weight.shape[0] == 4
extension_counter = {"tokens_a": {"word3": 1}}
vocab._extend(extension_counter)
embedder.extend_vocab(vocab, "tokens_a") # specified namespace
extended_weight = embedder.weight
assert extended_weight.shape[0] == 5
assert torch.all(extended_weight[:4, :] == original_weight[:4, :])
def test_embedding_vocab_extension_with_default_namespace(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1")
vocab.add_token_to_namespace("word2")
embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
original_weight = embedder.weight
assert original_weight.shape[0] == 4
extension_counter = {"tokens": {"word3": 1}}
vocab._extend(extension_counter)
embedder.extend_vocab(vocab) # default namespace
extended_weight = embedder.weight
assert extended_weight.shape[0] == 5
assert torch.all(extended_weight[:4, :] == original_weight[:4, :])
def test_embedding_vocab_extension_without_stored_namespace(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1", "tokens_a")
vocab.add_token_to_namespace("word2", "tokens_a")
embedding_params = Params({"vocab_namespace": "tokens_a", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
# Previous models won't have _vocab_namespace attribute. Force it to be None
embedder._vocab_namespace = None
original_weight = embedder.weight
assert original_weight.shape[0] == 4
extension_counter = {"tokens_a": {"word3": 1}}
vocab._extend(extension_counter)
embedder.extend_vocab(vocab, "tokens_a") # specified namespace
extended_weight = embedder.weight
assert extended_weight.shape[0] == 5
assert torch.all(extended_weight[:4, :] == original_weight[:4, :])
def test_embedding_vocab_extension_works_with_pretrained_embedding_file(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word1")
vocab.add_token_to_namespace("word2")
embeddings_filename = str(self.TEST_DIR / "embeddings2.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word3 0.5 0.3 -6.0\n".encode("utf-8"))
embeddings_file.write("word4 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode("utf-8"))
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode("utf-8"))
embedding_params = Params(
{
"vocab_namespace": "tokens",
"embedding_dim": 3,
"pretrained_file": embeddings_filename,
}
)
embedder = Embedding.from_params(embedding_params, vocab=vocab)
# Change weight to simulate embedding training
embedder.weight.data += 1
assert torch.all(
embedder.weight[2:, :] == torch.Tensor([[2.0, 3.3, 0.0], [1.1, 1.4, -3.0]])
)
original_weight = embedder.weight
assert tuple(original_weight.size()) == (4, 3) # 4 because of padding and OOV
vocab.add_token_to_namespace("word3")
embedder.extend_vocab(
vocab, extension_pretrained_file=embeddings_filename
) # default namespace
extended_weight = embedder.weight
# Make sure extenstion happened for extra token in extended vocab
assert tuple(extended_weight.size()) == (5, 3)
# Make sure extension doesn't change original trained weights.
assert torch.all(original_weight[:4, :] == extended_weight[:4, :])
# Make sure extended weight is taken from the embedding file.
assert torch.all(extended_weight[4, :] == torch.Tensor([0.5, 0.3, -6.0]))
def test_embedding_vocab_extension_is_no_op_when_extension_should_not_happen(self):
# Case1: When vocab is already in sync with embeddings it should be a no-op.
vocab = Vocabulary({"tokens": {"word1": 1, "word2": 1}})
embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
original_weight = embedder.weight
embedder.extend_vocab(vocab, "tokens")
assert torch.all(embedder.weight == original_weight)
# Case2: Shouldn't wrongly assuming "tokens" namespace for extension if no
# information on vocab_namespece is available. Rather log a warning and be a no-op.
vocab = Vocabulary()
vocab.add_token_to_namespace("word1", "tokens")
vocab.add_token_to_namespace("word2", "tokens")
embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
# Previous models won't have _vocab_namespace attribute. Force it to be None
embedder._vocab_namespace = None
embedder.weight = torch.nn.Parameter(embedder.weight[:1, :])
assert embedder.weight.shape[0] == 1
embedder.extend_vocab(vocab) # Don't specify namespace
assert embedder.weight.shape[0] == 1
def test_embedding_vocab_extension_raises_error_for_incorrect_vocab(self):
# When vocab namespace of extension vocab is smaller than embeddings
# it should raise configuration error.
vocab = Vocabulary({"tokens": {"word1": 1, "word2": 1}})
embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
embedder = Embedding.from_params(embedding_params, vocab=vocab)
with pytest.raises(ConfigurationError):
embedder.extend_vocab(Vocabulary(), "tokens")
def test_embedding_constructed_directly_with_pretrained_file(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("word")
vocab.add_token_to_namespace("word2")
unicode_space = "\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"
vocab.add_token_to_namespace(unicode_space)
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write(f"{unicode_space} 3.4 3.3 5.0\n".encode("utf-8"))
num_embeddings = vocab.get_vocab_size()
embedding_layer = Embedding(
embedding_dim=3,
num_embeddings=num_embeddings,
pretrained_file=embeddings_filename,
vocab=vocab,
)
word_vector = embedding_layer.weight.data[vocab.get_token_index("word")]
assert numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
word_vector = embedding_layer.weight.data[vocab.get_token_index(unicode_space)]
assert numpy.allclose(word_vector.numpy(), numpy.array([3.4, 3.3, 5.0]))
word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
assert not numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
| allennlp-master | tests/modules/token_embedders/embedding_test.py |
import numpy as np
import pytest
import torch
from numpy.testing import assert_almost_equal
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
self.non_padded_vocab = Vocabulary(non_padded_namespaces=["tokens"])
def test_forward_calculates_bow_properly(self):
embedder = BagOfWordCountsTokenEmbedder(self.vocab)
numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_zeros_out_unknown_tokens(self):
embedder = BagOfWordCountsTokenEmbedder(self.vocab, ignore_oov=True)
numpy_tensor = np.array([[1, 5], [2, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_ignore_oov_should_fail_on_non_padded_vocab(self):
with pytest.raises(ConfigurationError):
BagOfWordCountsTokenEmbedder(self.non_padded_vocab, ignore_oov=True)
def test_projects_properly(self):
embedder = BagOfWordCountsTokenEmbedder(vocab=self.vocab, projection_dim=50)
numpy_tensor = np.array([[1, 0], [1, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor)
embedder_output = embedder(inputs)
assert embedder_output.shape[1] == 50
| allennlp-master | tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py |
from copy import deepcopy
import numpy
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.modules import Seq2VecEncoder
from allennlp.modules.token_embedders import Embedding, TokenCharactersEncoder
from allennlp.nn import InitializerApplicator, Initializer
from allennlp.common.testing import AllenNlpTestCase
class TestTokenCharactersEncoder(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1", "token_characters")
self.vocab.add_token_to_namespace("2", "token_characters")
self.vocab.add_token_to_namespace("3", "token_characters")
self.vocab.add_token_to_namespace("4", "token_characters")
params = Params(
{
"embedding": {"embedding_dim": 2, "vocab_namespace": "token_characters"},
"encoder": {
"type": "cnn",
"embedding_dim": 2,
"num_filters": 4,
"ngram_filter_sizes": [1, 2],
"output_dim": 3,
},
}
)
self.encoder = TokenCharactersEncoder.from_params(vocab=self.vocab, params=deepcopy(params))
self.embedding = Embedding.from_params(vocab=self.vocab, params=params["embedding"])
self.inner_encoder = Seq2VecEncoder.from_params(params["encoder"])
constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
initializer = InitializerApplicator([(".*", constant_init)])
initializer(self.encoder)
initializer(self.embedding)
initializer(self.inner_encoder)
def test_get_output_dim_uses_encoder_output_dim(self):
assert self.encoder.get_output_dim() == 3
def test_forward_applies_embedding_then_encoder(self):
numpy_tensor = numpy.random.randint(6, size=(3, 4, 7))
inputs = torch.from_numpy(numpy_tensor)
encoder_output = self.encoder(inputs)
reshaped_input = inputs.view(12, 7)
embedded = self.embedding(reshaped_input)
mask = (inputs != 0).long().view(12, 7)
reshaped_manual_output = self.inner_encoder(embedded, mask)
manual_output = reshaped_manual_output.view(3, 4, 3)
assert_almost_equal(encoder_output.data.numpy(), manual_output.data.numpy())
| allennlp-master | tests/modules/token_embedders/token_characters_encoder_test.py |
import math
import pytest
import torch
from allennlp.common import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
class TestPretrainedTransformerEmbedder(AllenNlpTestCase):
def test_forward_runs_when_initialized_from_params(self):
# This code just passes things off to `transformers`, so we only have a very simple
# test.
params = Params({"model_name": "bert-base-uncased"})
embedder = PretrainedTransformerEmbedder.from_params(params)
token_ids = torch.randint(0, 100, (1, 4))
mask = torch.randint(0, 2, (1, 4)).bool()
output = embedder(token_ids=token_ids, mask=mask)
assert tuple(output.size()) == (1, 4, 768)
@pytest.mark.parametrize(
"train_parameters, last_layer_only, gradient_checkpointing",
[
(train_parameters, last_layer_only, gradient_checkpointing)
for train_parameters in {True, False}
for last_layer_only in {True, False}
for gradient_checkpointing in {True, False}
if train_parameters
or not gradient_checkpointing # checkpointing only makes sense when we're actually training the layers
],
)
def test_end_to_end(
self,
train_parameters: bool,
last_layer_only: bool,
gradient_checkpointing: bool,
):
tokenizer = PretrainedTransformerTokenizer(model_name="bert-base-uncased")
token_indexer = PretrainedTransformerIndexer(model_name="bert-base-uncased")
sentence1 = "A, AllenNLP sentence."
tokens1 = tokenizer.tokenize(sentence1)
expected_tokens1 = ["[CLS]", "a", ",", "allen", "##nl", "##p", "sentence", ".", "[SEP]"]
assert [t.text for t in tokens1] == expected_tokens1
sentence2 = "AllenNLP is great"
tokens2 = tokenizer.tokenize(sentence2)
expected_tokens2 = ["[CLS]", "allen", "##nl", "##p", "is", "great", "[SEP]"]
assert [t.text for t in tokens2] == expected_tokens2
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer",
"model_name": "bert-base-uncased",
"train_parameters": train_parameters,
"last_layer_only": last_layer_only,
"gradient_checkpointing": gradient_checkpointing,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
max_length = max(len(tokens1), len(tokens2))
assert tokens["bert"]["token_ids"].shape == (2, max_length)
assert tokens["bert"]["mask"].tolist() == [
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, False, False],
]
# Attention mask
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, 9, 768)
assert bert_vectors.requires_grad == (train_parameters or not last_layer_only)
@pytest.mark.parametrize(
"train_parameters, last_layer_only, gradient_checkpointing",
[
(train_parameters, last_layer_only, gradient_checkpointing)
for train_parameters in {True, False}
for last_layer_only in {
True
} # Huggingface T5 is totally different in the way it returns the
# intermediate layers, and we don't support that.
for gradient_checkpointing in {True, False}
if train_parameters
or not gradient_checkpointing # checkpointing only makes sense when we're actually training the layers
],
)
def test_end_to_end_t5(
self,
train_parameters: bool,
last_layer_only: bool,
gradient_checkpointing: bool,
):
tokenizer = PretrainedTransformerTokenizer(model_name="patrickvonplaten/t5-tiny-random")
token_indexer = PretrainedTransformerIndexer(model_name="patrickvonplaten/t5-tiny-random")
sentence1 = "A, AllenNLP sentence."
tokens1 = tokenizer.tokenize(sentence1)
expected_tokens1 = ["▁A", ",", "▁Allen", "N", "LP", "▁sentence", ".", "</s>"]
assert [t.text for t in tokens1] == expected_tokens1
sentence2 = "AllenNLP is great"
tokens2 = tokenizer.tokenize(sentence2)
expected_tokens2 = ["▁Allen", "N", "LP", "▁is", "▁great", "</s>"]
assert [t.text for t in tokens2] == expected_tokens2
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer",
"model_name": "patrickvonplaten/t5-tiny-random",
"train_parameters": train_parameters,
"last_layer_only": last_layer_only,
"gradient_checkpointing": gradient_checkpointing,
"sub_module": "encoder",
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
max_length = max(len(tokens1), len(tokens2))
assert tokens["bert"]["token_ids"].shape == (2, max_length)
assert tokens["bert"]["mask"].tolist() == [
[True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, False, False],
]
# Attention mask
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, 8, 64)
assert bert_vectors.requires_grad == (train_parameters or not last_layer_only)
def test_big_token_type_ids(self):
token_embedder = PretrainedTransformerEmbedder("roberta-base")
token_ids = torch.LongTensor([[1, 2, 3], [2, 3, 4]])
mask = torch.ones_like(token_ids).bool()
type_ids = torch.zeros_like(token_ids)
type_ids[1, 1] = 1
with pytest.raises(ValueError):
token_embedder(token_ids, mask, type_ids)
def test_xlnet_token_type_ids(self):
token_embedder = PretrainedTransformerEmbedder("xlnet-base-cased")
token_ids = torch.LongTensor([[1, 2, 3], [2, 3, 4]])
mask = torch.ones_like(token_ids).bool()
type_ids = torch.zeros_like(token_ids)
type_ids[1, 1] = 1
token_embedder(token_ids, mask, type_ids)
def test_long_sequence_splitting_end_to_end(self):
# Mostly the same as the end_to_end test (except for adding max_length=4),
# because we don't want this splitting behavior to change input/output format.
tokenizer = PretrainedTransformerTokenizer(model_name="bert-base-uncased")
token_indexer = PretrainedTransformerIndexer(model_name="bert-base-uncased", max_length=4)
sentence1 = "A, AllenNLP sentence."
tokens1 = tokenizer.tokenize(sentence1)
sentence2 = "AllenNLP is great"
tokens2 = tokenizer.tokenize(sentence2)
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {
"type": "pretrained_transformer",
"model_name": "bert-base-uncased",
"max_length": 4,
}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
max_length = max(len(tokens1), len(tokens2))
# Adds n_segments * 2 special tokens
segment_concat_length = int(math.ceil(max_length / 4)) * 2 + max_length
assert tokens["bert"]["token_ids"].shape == (2, segment_concat_length)
assert tokens["bert"]["mask"].tolist() == [
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, False, False],
]
assert tokens["bert"]["segment_concat_mask"].tolist() == [
[True] * segment_concat_length,
[True] * (segment_concat_length - 4) + [False] * 4, # 4 is hard-coded length difference
]
# Attention mask
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, 9, 768)
def test_fold_long_sequences(self):
# Let's just say [PAD] is 0, [CLS] is 1, and [SEP] is 2
token_ids = torch.LongTensor(
[
[1, 101, 102, 103, 104, 2, 1, 105, 106, 107, 108, 2, 1, 109, 2],
[1, 201, 202, 203, 204, 2, 1, 205, 206, 207, 208, 2, 0, 0, 0],
[1, 301, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
) # Shape: [3, 15]
segment_concat_mask = (token_ids > 0).long()
folded_token_ids = torch.LongTensor(
[
[1, 101, 102, 103, 104, 2],
[1, 105, 106, 107, 108, 2],
[1, 109, 2, 0, 0, 0],
[1, 201, 202, 203, 204, 2],
[1, 205, 206, 207, 208, 2],
[0, 0, 0, 0, 0, 0],
[1, 301, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
)
folded_segment_concat_mask = (folded_token_ids > 0).long()
token_embedder = PretrainedTransformerEmbedder("bert-base-uncased", max_length=6)
(
folded_token_ids_out,
folded_segment_concat_mask_out,
_,
) = token_embedder._fold_long_sequences(token_ids, segment_concat_mask)
assert (folded_token_ids_out == folded_token_ids).all()
assert (folded_segment_concat_mask_out == folded_segment_concat_mask).all()
def test_unfold_long_sequences(self):
# Let's just say [PAD] is 0, [CLS] is xxx1, and [SEP] is xxx2
# We assume embeddings are 1-dim and are the same as indices
embeddings = torch.LongTensor(
[
[1001, 101, 102, 103, 104, 1002],
[1011, 105, 106, 107, 108, 1012],
[1021, 109, 1022, 0, 0, 0],
[2001, 201, 202, 203, 204, 2002],
[2011, 205, 206, 207, 208, 2012],
[0, 0, 0, 0, 0, 0],
[3001, 301, 3002, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
).unsqueeze(-1)
mask = (embeddings > 0).long()
unfolded_embeddings = torch.LongTensor(
[
[1001, 101, 102, 103, 104, 105, 106, 107, 108, 109, 1022],
[2001, 201, 202, 203, 204, 205, 206, 207, 208, 2012, 0],
[3001, 301, 3002, 0, 0, 0, 0, 0, 0, 0, 0],
]
).unsqueeze(-1)
token_embedder = PretrainedTransformerEmbedder("bert-base-uncased", max_length=6)
unfolded_embeddings_out = token_embedder._unfold_long_sequences(
embeddings, mask, unfolded_embeddings.size(0), 15
)
assert (unfolded_embeddings_out == unfolded_embeddings).all()
def test_encoder_decoder_model(self):
token_embedder = PretrainedTransformerEmbedder("facebook/bart-large", sub_module="encoder")
token_ids = torch.LongTensor([[1, 2, 3], [2, 3, 4]])
mask = torch.ones_like(token_ids).bool()
token_embedder(token_ids, mask)
| allennlp-master | tests/modules/token_embedders/pretrained_transformer_embedder_test.py |
import torch
from allennlp.common import Params
from allennlp.common.testing import ModelTestCase
from allennlp.data.batch import Batch
from allennlp.modules.token_embedders import ElmoTokenEmbedder
class TestElmoTokenEmbedder(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
self.FIXTURES_ROOT / "elmo" / "config" / "characters_token_embedder.json",
self.FIXTURES_ROOT / "data" / "conll2003.txt",
)
def test_tagger_with_elmo_token_embedder_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_tagger_with_elmo_token_embedder_forward_pass_runs_correctly(self):
dataset = Batch(self.instances)
dataset.index_instances(self.vocab)
training_tensors = dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
probs = output_dict["class_probabilities"]
assert probs.size() == (2, 7, self.model.vocab.get_vocab_size("labels"))
def test_forward_works_with_projection_layer(self):
params = Params(
{
"options_file": self.FIXTURES_ROOT / "elmo" / "options.json",
"weight_file": self.FIXTURES_ROOT / "elmo" / "lm_weights.hdf5",
"projection_dim": 20,
}
)
word1 = [0] * 50
word2 = [0] * 50
word1[0] = 6
word1[1] = 5
word1[2] = 4
word1[3] = 3
word2[0] = 3
word2[1] = 2
word2[2] = 1
word2[3] = 0
embedding_layer = ElmoTokenEmbedder.from_params(vocab=None, params=params)
assert embedding_layer.get_output_dim() == 20
input_tensor = torch.LongTensor([[word1, word2]])
embedded = embedding_layer(input_tensor).data.numpy()
assert embedded.shape == (1, 2, 20)
input_tensor = torch.LongTensor([[[word1]]])
embedded = embedding_layer(input_tensor).data.numpy()
assert embedded.shape == (1, 1, 1, 20)
| allennlp-master | tests/modules/token_embedders/elmo_token_embedder_test.py |
import codecs
import gzip
import pickle
import shutil
import zipfile
from copy import deepcopy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance, Token
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer
from allennlp.data.tokenizers import CharacterTokenizer
from allennlp.data.vocabulary import (
_NamespaceDependentDefaultDict,
_read_pretrained_tokens,
DEFAULT_OOV_TOKEN,
Vocabulary,
)
from allennlp.modules.token_embedders.embedding import format_embeddings_file_uri
class TestVocabulary(AllenNlpTestCase):
def setup_method(self):
token_indexer = SingleIdTokenIndexer("tokens")
text_field = TextField(
[Token(t) for t in ["a", "a", "a", "a", "b", "b", "c", "c", "c"]],
{"tokens": token_indexer},
)
self.instance = Instance({"text": text_field})
self.dataset = Batch([self.instance])
super().setup_method()
def test_pickling(self):
vocab = Vocabulary.from_instances(self.dataset)
pickled = pickle.dumps(vocab)
unpickled = pickle.loads(pickled)
assert dict(unpickled._index_to_token) == dict(vocab._index_to_token)
assert dict(unpickled._token_to_index) == dict(vocab._token_to_index)
assert unpickled._non_padded_namespaces == vocab._non_padded_namespaces
assert unpickled._oov_token == vocab._oov_token
assert unpickled._padding_token == vocab._padding_token
assert unpickled._retained_counter == vocab._retained_counter
def test_from_dataset_respects_max_vocab_size_single_int(self):
max_vocab_size = 1
vocab = Vocabulary.from_instances(self.dataset, max_vocab_size=max_vocab_size)
words = vocab.get_index_to_token_vocabulary().values()
# Additional 2 tokens are '@@PADDING@@' and '@@UNKNOWN@@' by default
assert len(words) == max_vocab_size + 2
vocab = Vocabulary.from_instances(self.dataset, min_count=None)
words = vocab.get_index_to_token_vocabulary().values()
assert len(words) == 5
def test_from_dataset_respects_min_count(self):
vocab = Vocabulary.from_instances(self.dataset, min_count={"tokens": 4})
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" not in words
assert "c" not in words
vocab = Vocabulary.from_instances(self.dataset, min_count=None)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" in words
assert "c" in words
def test_from_dataset_respects_exclusive_embedding_file(self):
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("a 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("b 0.1 0.4 -4.0\n".encode("utf-8"))
vocab = Vocabulary.from_instances(
self.dataset,
min_count={"tokens": 4},
pretrained_files={"tokens": embeddings_filename},
only_include_pretrained_words=True,
)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" not in words
assert "c" not in words
vocab = Vocabulary.from_instances(
self.dataset,
pretrained_files={"tokens": embeddings_filename},
only_include_pretrained_words=True,
)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" in words
assert "c" not in words
def test_from_dataset_respects_inclusive_embedding_file(self):
embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
with gzip.open(embeddings_filename, "wb") as embeddings_file:
embeddings_file.write("a 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("b 0.1 0.4 -4.0\n".encode("utf-8"))
vocab = Vocabulary.from_instances(
self.dataset,
min_count={"tokens": 4},
pretrained_files={"tokens": embeddings_filename},
only_include_pretrained_words=False,
)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" in words
assert "c" not in words
vocab = Vocabulary.from_instances(
self.dataset,
pretrained_files={"tokens": embeddings_filename},
only_include_pretrained_words=False,
)
words = vocab.get_index_to_token_vocabulary().values()
assert "a" in words
assert "b" in words
assert "c" in words
def test_add_word_to_index_gives_consistent_results(self):
vocab = Vocabulary()
initial_vocab_size = vocab.get_vocab_size()
word_index = vocab.add_token_to_namespace("word")
assert "word" in vocab.get_index_to_token_vocabulary().values()
assert vocab.get_token_index("word") == word_index
assert vocab.get_token_from_index(word_index) == "word"
assert vocab.get_vocab_size() == initial_vocab_size + 1
# Now add it again, and make sure nothing changes.
vocab.add_token_to_namespace("word")
assert "word" in vocab.get_index_to_token_vocabulary().values()
assert vocab.get_token_index("word") == word_index
assert vocab.get_token_from_index(word_index) == "word"
assert vocab.get_vocab_size() == initial_vocab_size + 1
def test_namespaces(self):
vocab = Vocabulary()
initial_vocab_size = vocab.get_vocab_size()
word_index = vocab.add_token_to_namespace("word", namespace="1")
assert "word" in vocab.get_index_to_token_vocabulary(namespace="1").values()
assert vocab.get_token_index("word", namespace="1") == word_index
assert vocab.get_token_from_index(word_index, namespace="1") == "word"
assert vocab.get_vocab_size(namespace="1") == initial_vocab_size + 1
# Now add it again, in a different namespace and a different word, and make sure it's like
# new.
word2_index = vocab.add_token_to_namespace("word2", namespace="2")
word_index = vocab.add_token_to_namespace("word", namespace="2")
assert "word" in vocab.get_index_to_token_vocabulary(namespace="2").values()
assert "word2" in vocab.get_index_to_token_vocabulary(namespace="2").values()
assert vocab.get_token_index("word", namespace="2") == word_index
assert vocab.get_token_index("word2", namespace="2") == word2_index
assert vocab.get_token_from_index(word_index, namespace="2") == "word"
assert vocab.get_token_from_index(word2_index, namespace="2") == "word2"
assert vocab.get_vocab_size(namespace="2") == initial_vocab_size + 2
def test_namespace_dependent_default_dict(self):
default_dict = _NamespaceDependentDefaultDict(["bar", "*baz"], lambda: 7, lambda: 3)
# 'foo' is not a padded namespace
assert default_dict["foo"] == 7
# "baz" is a direct match with a padded namespace
assert default_dict["baz"] == 3
# the following match the wildcard "*baz"
assert default_dict["bar"] == 3
assert default_dict["foobaz"] == 3
def test_unknown_token(self):
# We're putting this behavior in a test so that the behavior is documented. There is
# solver code that depends in a small way on how we treat the unknown token, so any
# breaking change to this behavior should break a test, so you know you've done something
# that needs more consideration.
vocab = Vocabulary()
oov_token = vocab._oov_token
oov_index = vocab.get_token_index(oov_token)
assert oov_index == 1
assert vocab.get_token_index("unseen word") == oov_index
def test_get_token_index(self):
# The behavior of get_token_index depends on whether or not the namespace has an OOV token.
vocab = Vocabulary(
counter={"labels": {"foo": 3, "bar": 2}, "tokens": {"foo": 3, "bar": 2}},
non_padded_namespaces=["labels"],
)
# Quick sanity check, this is what the token to index mappings should look like.
expected_token_to_index_dicts = {
"tokens": {vocab._padding_token: 0, vocab._oov_token: 1, "foo": 2, "bar": 3},
"labels": {"foo": 0, "bar": 1},
}
assert vocab._token_to_index["tokens"] == expected_token_to_index_dicts["tokens"]
assert vocab._token_to_index["labels"] == expected_token_to_index_dicts["labels"]
# get_token_index should return the OOV token index for OOV tokens when it can.
assert vocab.get_token_index("baz", "tokens") == 1
# get_token_index should raise helpful error message when token is OOV and there
# is no default OOV token in the namespace.
with pytest.raises(
KeyError,
match=r"'baz' not found .* and namespace does not contain the default OOV token .*",
):
vocab.get_token_index("baz", "labels")
# same should happen for the default OOV token itself, if not in namespace.
with pytest.raises(KeyError, match=rf"'{vocab._oov_token}' not found .*"):
vocab.get_token_index(vocab._oov_token, "labels")
# Now just make sure the token_to_index mappings haven't been modified
# (since we're defaultdicts we need to be a little careful here).
assert vocab._token_to_index["tokens"] == expected_token_to_index_dicts["tokens"]
assert vocab._token_to_index["labels"] == expected_token_to_index_dicts["labels"]
def test_set_from_file_reads_padded_files(self):
vocab_filename = self.TEST_DIR / "vocab_file"
with codecs.open(vocab_filename, "w", "utf-8") as vocab_file:
vocab_file.write("<S>\n")
vocab_file.write("</S>\n")
vocab_file.write("<UNK>\n")
vocab_file.write("a\n")
vocab_file.write("tricky\x0bchar\n")
vocab_file.write("word\n")
vocab_file.write("another\n")
vocab = Vocabulary()
vocab.set_from_file(vocab_filename, is_padded=True, oov_token="<UNK>")
assert vocab._oov_token == DEFAULT_OOV_TOKEN
assert vocab.get_token_index("random string") == 3
assert vocab.get_token_index("<S>") == 1
assert vocab.get_token_index("</S>") == 2
assert vocab.get_token_index(DEFAULT_OOV_TOKEN) == 3
assert vocab.get_token_index("a") == 4
assert vocab.get_token_index("tricky\x0bchar") == 5
assert vocab.get_token_index("word") == 6
assert vocab.get_token_index("another") == 7
assert vocab.get_token_from_index(0) == vocab._padding_token
assert vocab.get_token_from_index(1) == "<S>"
assert vocab.get_token_from_index(2) == "</S>"
assert vocab.get_token_from_index(3) == DEFAULT_OOV_TOKEN
assert vocab.get_token_from_index(4) == "a"
assert vocab.get_token_from_index(5) == "tricky\x0bchar"
assert vocab.get_token_from_index(6) == "word"
assert vocab.get_token_from_index(7) == "another"
def test_set_from_file_reads_non_padded_files(self):
vocab_filename = self.TEST_DIR / "vocab_file"
with codecs.open(vocab_filename, "w", "utf-8") as vocab_file:
vocab_file.write("B-PERS\n")
vocab_file.write("I-PERS\n")
vocab_file.write("O\n")
vocab_file.write("B-ORG\n")
vocab_file.write("I-ORG\n")
vocab = Vocabulary()
vocab.set_from_file(vocab_filename, is_padded=False, namespace="tags")
assert vocab.get_token_index("B-PERS", namespace="tags") == 0
assert vocab.get_token_index("I-PERS", namespace="tags") == 1
assert vocab.get_token_index("O", namespace="tags") == 2
assert vocab.get_token_index("B-ORG", namespace="tags") == 3
assert vocab.get_token_index("I-ORG", namespace="tags") == 4
assert vocab.get_token_from_index(0, namespace="tags") == "B-PERS"
assert vocab.get_token_from_index(1, namespace="tags") == "I-PERS"
assert vocab.get_token_from_index(2, namespace="tags") == "O"
assert vocab.get_token_from_index(3, namespace="tags") == "B-ORG"
assert vocab.get_token_from_index(4, namespace="tags") == "I-ORG"
def test_saving_and_loading(self):
vocab_dir = self.TEST_DIR / "vocab_save"
vocab = Vocabulary(non_padded_namespaces=["a", "c"])
vocab.add_tokens_to_namespace(
["a0", "a1", "a2"], namespace="a"
) # non-padded, should start at 0
vocab.add_tokens_to_namespace(["b2", "b3"], namespace="b") # padded, should start at 2
vocab.save_to_files(vocab_dir)
vocab2 = Vocabulary.from_files(vocab_dir)
assert vocab2._non_padded_namespaces == {"a", "c"}
# Check namespace a.
assert vocab2.get_vocab_size(namespace="a") == 3
assert vocab2.get_token_from_index(0, namespace="a") == "a0"
assert vocab2.get_token_from_index(1, namespace="a") == "a1"
assert vocab2.get_token_from_index(2, namespace="a") == "a2"
assert vocab2.get_token_index("a0", namespace="a") == 0
assert vocab2.get_token_index("a1", namespace="a") == 1
assert vocab2.get_token_index("a2", namespace="a") == 2
# Check namespace b.
assert vocab2.get_vocab_size(namespace="b") == 4 # (unk + padding + two tokens)
assert vocab2.get_token_from_index(0, namespace="b") == vocab._padding_token
assert vocab2.get_token_from_index(1, namespace="b") == vocab._oov_token
assert vocab2.get_token_from_index(2, namespace="b") == "b2"
assert vocab2.get_token_from_index(3, namespace="b") == "b3"
assert vocab2.get_token_index(vocab._padding_token, namespace="b") == 0
assert vocab2.get_token_index(vocab._oov_token, namespace="b") == 1
assert vocab2.get_token_index("b2", namespace="b") == 2
assert vocab2.get_token_index("b3", namespace="b") == 3
# Check the dictionaries containing the reverse mapping are identical.
assert vocab.get_index_to_token_vocabulary("a") == vocab2.get_index_to_token_vocabulary("a")
assert vocab.get_index_to_token_vocabulary("b") == vocab2.get_index_to_token_vocabulary("b")
def test_saving_and_loading_works_with_byte_encoding(self):
# We're going to set a vocabulary from a TextField using byte encoding, index it, save the
# vocab, load the vocab, then index the text field again, and make sure we get the same
# result.
tokenizer = CharacterTokenizer(byte_encoding="utf-8")
token_indexer = TokenCharactersIndexer(character_tokenizer=tokenizer, min_padding_length=2)
tokens = [Token(t) for t in ["Øyvind", "für", "汉字"]]
text_field = TextField(tokens, {"characters": token_indexer})
dataset = Batch([Instance({"sentence": text_field})])
vocab = Vocabulary.from_instances(dataset)
text_field.index(vocab)
indexed_tokens = deepcopy(text_field._indexed_tokens)
vocab_dir = self.TEST_DIR / "vocab_save"
vocab.save_to_files(vocab_dir)
vocab2 = Vocabulary.from_files(vocab_dir)
text_field2 = TextField(tokens, {"characters": token_indexer})
text_field2.index(vocab2)
indexed_tokens2 = deepcopy(text_field2._indexed_tokens)
assert indexed_tokens == indexed_tokens2
def test_from_params(self):
# Save a vocab to check we can load it from_params.
vocab_dir = self.TEST_DIR / "vocab_save"
vocab = Vocabulary(non_padded_namespaces=["a", "c"])
vocab.add_tokens_to_namespace(
["a0", "a1", "a2"], namespace="a"
) # non-padded, should start at 0
vocab.add_tokens_to_namespace(["b2", "b3"], namespace="b") # padded, should start at 2
vocab.save_to_files(vocab_dir)
params = Params({"type": "from_files", "directory": vocab_dir})
vocab2 = Vocabulary.from_params(params)
assert vocab.get_index_to_token_vocabulary("a") == vocab2.get_index_to_token_vocabulary("a")
assert vocab.get_index_to_token_vocabulary("b") == vocab2.get_index_to_token_vocabulary("b")
# Test case where we build a vocab from a dataset.
vocab2 = Vocabulary.from_params(Params({}), instances=self.dataset)
assert vocab2.get_index_to_token_vocabulary("tokens") == {
0: "@@PADDING@@",
1: "@@UNKNOWN@@",
2: "a",
3: "c",
4: "b",
}
# Test from_params raises when we have neither a dataset and a vocab_directory.
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(Params({}))
# Test from_params raises when there are any other dict keys
# present apart from 'directory' and we aren't calling from_dataset.
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(
Params({"type": "from_files", "directory": vocab_dir, "min_count": {"tokens": 2}})
)
def test_from_params_adds_tokens_to_vocab(self):
vocab = Vocabulary.from_params(
Params({"tokens_to_add": {"tokens": ["q", "x", "z"]}}), instances=self.dataset
)
assert vocab.get_index_to_token_vocabulary("tokens") == {
0: "@@PADDING@@",
1: "@@UNKNOWN@@",
2: "a",
3: "c",
4: "b",
5: "q",
6: "x",
7: "z",
}
def test_valid_vocab_extension(self):
vocab_dir = self.TEST_DIR / "vocab_save"
# Test: padded/non-padded common namespaces are extending appropriately
non_padded_namespaces_list = [[], ["tokens"]]
for non_padded_namespaces in non_padded_namespaces_list:
original_vocab = Vocabulary(non_padded_namespaces=non_padded_namespaces)
original_vocab.add_tokens_to_namespace(["d", "a", "b"], namespace="tokens")
text_field = TextField(
[Token(t) for t in ["a", "d", "c", "e"]], {"tokens": SingleIdTokenIndexer("tokens")}
)
vocab_dir = self.TEST_DIR / "vocab_save"
shutil.rmtree(vocab_dir, ignore_errors=True)
original_vocab.save_to_files(vocab_dir)
instances = Batch([Instance({"text": text_field})])
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": non_padded_namespaces,
}
)
extended_vocab = Vocabulary.from_params(params, instances=instances)
extra_count = 2 if extended_vocab.is_padded("tokens") else 0
assert extended_vocab.get_token_index("d", "tokens") == 0 + extra_count
assert extended_vocab.get_token_index("a", "tokens") == 1 + extra_count
assert extended_vocab.get_token_index("b", "tokens") == 2 + extra_count
assert extended_vocab.get_token_index("c", "tokens") # should be present
assert extended_vocab.get_token_index("e", "tokens") # should be present
assert extended_vocab.get_vocab_size("tokens") == 5 + extra_count
# Test: padded/non-padded non-common namespaces are extending appropriately
non_padded_namespaces_list = [[], ["tokens1"], ["tokens1", "tokens2"]]
for non_padded_namespaces in non_padded_namespaces_list:
original_vocab = Vocabulary(non_padded_namespaces=non_padded_namespaces)
original_vocab.add_token_to_namespace("a", namespace="tokens1") # index2
text_field = TextField(
[Token(t) for t in ["b"]], {"tokens2": SingleIdTokenIndexer("tokens2")}
)
instances = Batch([Instance({"text": text_field})])
vocab_dir = self.TEST_DIR / "vocab_save"
shutil.rmtree(vocab_dir, ignore_errors=True)
original_vocab.save_to_files(vocab_dir)
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": non_padded_namespaces,
}
)
extended_vocab = Vocabulary.from_params(params, instances=instances)
# Should have two namespaces
assert len(extended_vocab._token_to_index) == 2
extra_count = 2 if extended_vocab.is_padded("tokens1") else 0
assert extended_vocab.get_vocab_size("tokens1") == 1 + extra_count
extra_count = 2 if extended_vocab.is_padded("tokens2") else 0
assert extended_vocab.get_vocab_size("tokens2") == 1 + extra_count
def test_invalid_vocab_extension(self):
vocab_dir = self.TEST_DIR / "vocab_save"
original_vocab = Vocabulary(non_padded_namespaces=["tokens1"])
original_vocab.add_tokens_to_namespace(["a", "b"], namespace="tokens1")
original_vocab.add_token_to_namespace("p", namespace="tokens2")
original_vocab.save_to_files(vocab_dir)
text_field1 = TextField(
[Token(t) for t in ["a", "c"]], {"tokens1": SingleIdTokenIndexer("tokens1")}
)
text_field2 = TextField(
[Token(t) for t in ["p", "q", "r"]], {"tokens2": SingleIdTokenIndexer("tokens2")}
)
instances = Batch([Instance({"text1": text_field1, "text2": text_field2})])
# Following 2 should give error: tokens1 is non-padded in original_vocab but not in instances
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": [],
"tokens_to_add": {"tokens1": ["a"], "tokens2": ["p"]},
}
)
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(params, instances=instances)
# Following 2 should not give error: overlapping namespaces have same padding setting
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": ["tokens1"],
"tokens_to_add": {"tokens1": ["a"], "tokens2": ["p"]},
}
)
Vocabulary.from_params(params, instances=instances)
# Following 2 should give error: tokens2 is padded in instances but not in original_vocab
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": ["tokens1", "tokens2"],
"tokens_to_add": {"tokens1": ["a"], "tokens2": ["p"]},
}
)
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(params, instances=instances)
def test_from_params_extend_config(self):
vocab_dir = self.TEST_DIR / "vocab_save"
original_vocab = Vocabulary(non_padded_namespaces=["tokens"])
original_vocab.add_token_to_namespace("a", namespace="tokens")
original_vocab.save_to_files(vocab_dir)
text_field = TextField(
[Token(t) for t in ["a", "b"]], {"tokens": SingleIdTokenIndexer("tokens")}
)
instances = Batch([Instance({"text": text_field})])
# If you ask to extend vocab from `directory`, instances must be passed
# in Vocabulary constructor, or else there is nothing to extend to.
params = Params({"type": "extend", "directory": vocab_dir})
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(params)
# If you ask to extend vocab, `directory` key must be present in params,
# or else there is nothing to extend from.
params = Params({"type": "extend"})
with pytest.raises(ConfigurationError):
_ = Vocabulary.from_params(params, instances=instances)
def test_from_params_valid_vocab_extension_thoroughly(self):
"""
Tests for Valid Vocab Extension thoroughly: Vocab extension is valid
when overlapping namespaces have same padding behaviour (padded/non-padded)
Summary of namespace paddings in this test:
original_vocab namespaces
tokens0 padded
tokens1 non-padded
tokens2 padded
tokens3 non-padded
instances namespaces
tokens0 padded
tokens1 non-padded
tokens4 padded
tokens5 non-padded
TypicalExtention example: (of tokens1 namespace)
-> original_vocab index2token
apple #0->apple
bat #1->bat
cat #2->cat
-> Token to be extended with: cat, an, apple, banana, atom, bat
-> extended_vocab: index2token
apple #0->apple
bat #1->bat
cat #2->cat
an #3->an
atom #4->atom
banana #5->banana
"""
vocab_dir = self.TEST_DIR / "vocab_save"
original_vocab = Vocabulary(non_padded_namespaces=["tokens1", "tokens3"])
original_vocab.add_token_to_namespace("apple", namespace="tokens0") # index:2
original_vocab.add_token_to_namespace("bat", namespace="tokens0") # index:3
original_vocab.add_token_to_namespace("cat", namespace="tokens0") # index:4
original_vocab.add_token_to_namespace("apple", namespace="tokens1") # index:0
original_vocab.add_token_to_namespace("bat", namespace="tokens1") # index:1
original_vocab.add_token_to_namespace("cat", namespace="tokens1") # index:2
original_vocab.add_token_to_namespace("a", namespace="tokens2") # index:0
original_vocab.add_token_to_namespace("b", namespace="tokens2") # index:1
original_vocab.add_token_to_namespace("c", namespace="tokens2") # index:2
original_vocab.add_token_to_namespace("p", namespace="tokens3") # index:0
original_vocab.add_token_to_namespace("q", namespace="tokens3") # index:1
original_vocab.save_to_files(vocab_dir)
text_field0 = TextField(
[Token(t) for t in ["cat", "an", "apple", "banana", "atom", "bat"]],
{"tokens0": SingleIdTokenIndexer("tokens0")},
)
text_field1 = TextField(
[Token(t) for t in ["cat", "an", "apple", "banana", "atom", "bat"]],
{"tokens1": SingleIdTokenIndexer("tokens1")},
)
text_field4 = TextField(
[Token(t) for t in ["l", "m", "n", "o"]], {"tokens4": SingleIdTokenIndexer("tokens4")}
)
text_field5 = TextField(
[Token(t) for t in ["x", "y", "z"]], {"tokens5": SingleIdTokenIndexer("tokens5")}
)
instances = Batch(
[
Instance(
{
"text0": text_field0,
"text1": text_field1,
"text4": text_field4,
"text5": text_field5,
}
)
]
)
params = Params(
{
"type": "extend",
"directory": vocab_dir,
"non_padded_namespaces": ["tokens1", "tokens5"],
}
)
extended_vocab = Vocabulary.from_params(params, instances=instances)
# namespaces: tokens0, tokens1 is common.
# tokens2, tokens3 only vocab has. tokens4, tokens5 only instances
extended_namespaces = {*extended_vocab._token_to_index}
assert extended_namespaces == {"tokens{}".format(i) for i in range(6)}
# # Check that _non_padded_namespaces list is consistent after extension
assert extended_vocab._non_padded_namespaces == {"tokens1", "tokens3", "tokens5"}
# # original_vocab["tokens1"] has 3 tokens, instances of "tokens1" ns has 5 tokens. 2 overlapping
assert extended_vocab.get_vocab_size("tokens1") == 6
assert extended_vocab.get_vocab_size("tokens0") == 8 # 2 extra overlapping because padded
# namespace tokens3, tokens4 was only in original_vocab,
# and its token count should be same in extended_vocab
assert extended_vocab.get_vocab_size("tokens2") == original_vocab.get_vocab_size("tokens2")
assert extended_vocab.get_vocab_size("tokens3") == original_vocab.get_vocab_size("tokens3")
# namespace tokens2 was only in instances,
# and its token count should be same in extended_vocab
assert extended_vocab.get_vocab_size("tokens4") == 6 # l,m,n,o + oov + padding
assert extended_vocab.get_vocab_size("tokens5") == 3 # x,y,z
# Word2index mapping of all words in all namespaces of original_vocab
# should be maintained in extended_vocab
for namespace, token2index in original_vocab._token_to_index.items():
for token, _ in token2index.items():
vocab_index = original_vocab.get_token_index(token, namespace)
extended_vocab_index = extended_vocab.get_token_index(token, namespace)
assert vocab_index == extended_vocab_index
# And same for Index2Word mapping
for namespace, index2token in original_vocab._index_to_token.items():
for index, _ in index2token.items():
vocab_token = original_vocab.get_token_from_index(index, namespace)
extended_vocab_token = extended_vocab.get_token_from_index(index, namespace)
assert vocab_token == extended_vocab_token
# Manual Print Check
# original_vocab._token_to_index :>
# {
# "tokens0": {"@@PADDING@@":0,"@@UNKNOWN@@":1,"apple":2,"bat":3,"cat":4},
# "tokens1": {"apple": 0,"bat":1,"cat":2},
# "tokens2": {"@@PADDING@@":0,"@@UNKNOWN@@":1,"a":2,"b":3,"c": 4},
# "tokens3": {"p":0,"q":1}
# }
# extended_vocab._token_to_index :>
# {
# "tokens0": {"@@PADDING@@": 0,"@@UNKNOWN@@": 1,
# "apple": 2,"bat": 3,"cat": 4,"an": 5,"banana": 6,"atom": 7},
# "tokens1": {"apple": 0,"bat": 1,"cat": 2,"an": 3,"banana": 4,"atom": 5},
# "tokens2": {"@@PADDING@@": 0,"@@UNKNOWN@@": 1,"a": 2,"b": 3,"c": 4},
# "tokens3": {"p": 0,"q": 1},
# "tokens4": {"@@PADDING@@": 0,"@@UNKNOWN@@": 1,"l": 2,"m": 3,"n": 4,"o": 5},
# "tokens5": {"x": 0,"y": 1,"z": 2}
# }
def test_vocab_can_print(self):
vocab = Vocabulary(non_padded_namespaces=["a", "c"])
vocab.add_tokens_to_namespace(["a0", "a1", "a2"], namespace="a")
vocab.add_tokens_to_namespace(["b2", "b3"], namespace="b")
print(vocab)
def test_read_pretrained_words(self):
# The fixture "fake_embeddings.5d.txt" was generated using the words in this random quote
words = set(
"If you think you are too small to make a difference "
"try to sleeping with a mosquito àèìòù".split(" ")
)
# Reading from a single (compressed) file or a single-file archive
base_path = str(self.FIXTURES_ROOT / "embeddings/fake_embeddings.5d.txt")
for ext in ["", ".gz", ".lzma", ".bz2", ".zip", ".tar.gz"]:
file_path = base_path + ext
words_read = set(_read_pretrained_tokens(file_path))
assert words_read == words, (
f"Wrong words for file {file_path}\n"
f" Read: {sorted(words_read)}\n"
f"Correct: {sorted(words)}"
)
# Reading from a multi-file archive
base_path = str(self.FIXTURES_ROOT / "embeddings/multi-file-archive")
file_path = "folder/fake_embeddings.5d.txt"
for ext in [".zip", ".tar.gz"]:
archive_path = base_path + ext
embeddings_file_uri = format_embeddings_file_uri(archive_path, file_path)
words_read = set(_read_pretrained_tokens(embeddings_file_uri))
assert words_read == words, (
f"Wrong words for file {archive_path}\n"
f" Read: {sorted(words_read)}\n"
f"Correct: {sorted(words)}"
)
def test_from_instances_exclusive_embeddings_file_inside_archive(self):
""" Just for ensuring there are no problems when reading pretrained tokens from an archive """
# Read embeddings file from archive
archive_path = str(self.TEST_DIR / "embeddings-archive.zip")
with zipfile.ZipFile(archive_path, "w") as archive:
file_path = "embedding.3d.vec"
with archive.open(file_path, "w") as embeddings_file:
embeddings_file.write("a 1.0 2.3 -1.0\n".encode("utf-8"))
embeddings_file.write("b 0.1 0.4 -4.0\n".encode("utf-8"))
with archive.open("dummy.vec", "w") as dummy_file:
dummy_file.write("c 1.0 2.3 -1.0 3.0\n".encode("utf-8"))
embeddings_file_uri = format_embeddings_file_uri(archive_path, file_path)
vocab = Vocabulary.from_instances(
self.dataset,
min_count={"tokens": 4},
pretrained_files={"tokens": embeddings_file_uri},
only_include_pretrained_words=True,
)
words = set(vocab.get_index_to_token_vocabulary().values())
assert "a" in words
assert "b" not in words
assert "c" not in words
vocab = Vocabulary.from_instances(
self.dataset,
pretrained_files={"tokens": embeddings_file_uri},
only_include_pretrained_words=True,
)
words = set(vocab.get_index_to_token_vocabulary().values())
assert "a" in words
assert "b" in words
assert "c" not in words
def test_registrability(self):
@Vocabulary.register("my-vocabulary", constructor="constructor")
class MyVocabulary(Vocabulary):
@classmethod
def constructor(cls):
return MyVocabulary()
params = Params({"type": "my-vocabulary"})
instance = Instance(fields={})
vocab = Vocabulary.from_params(params=params, instances=[instance])
assert isinstance(vocab, MyVocabulary)
def test_max_vocab_size_dict(self):
params = Params({"max_vocab_size": {"tokens": 1, "characters": 20}})
vocab = Vocabulary.from_params(params=params, instances=self.dataset)
words = vocab.get_index_to_token_vocabulary().values()
# Additional 2 tokens are '@@PADDING@@' and '@@UNKNOWN@@' by default
assert len(words) == 3
def test_max_vocab_size_partial_dict(self):
indexers = {
"tokens": SingleIdTokenIndexer(),
"token_characters": TokenCharactersIndexer(min_padding_length=3),
}
instance = Instance(
{
"text": TextField(
[Token(w) for w in "Abc def ghi jkl mno pqr stu vwx yz".split(" ")], indexers
)
}
)
dataset = Batch([instance])
params = Params({"max_vocab_size": {"tokens": 1}})
vocab = Vocabulary.from_params(params=params, instances=dataset)
assert len(vocab.get_index_to_token_vocabulary("tokens").values()) == 3 # 1 + 2
assert len(vocab.get_index_to_token_vocabulary("token_characters").values()) == 28 # 26 + 2
def test_min_pretrained_embeddings(self):
params = Params(
{
"pretrained_files": {
"tokens": str(self.FIXTURES_ROOT / "embeddings/glove.6B.100d.sample.txt.gz")
},
"min_pretrained_embeddings": {"tokens": 50},
}
)
vocab = Vocabulary.from_params(params=params, instances=self.dataset)
assert vocab.get_vocab_size() >= 50
assert vocab.get_token_index("his") > 1 # not @@UNKNOWN@@
def test_custom_padding_oov_tokens(self):
vocab = Vocabulary(oov_token="[UNK]")
assert vocab._oov_token == "[UNK]"
assert vocab._padding_token == "@@PADDING@@"
vocab = Vocabulary(padding_token="[PAD]")
assert vocab._oov_token == "@@UNKNOWN@@"
assert vocab._padding_token == "[PAD]"
vocab_dir = self.TEST_DIR / "vocab_save"
vocab = Vocabulary(oov_token="<UNK>")
vocab.add_tokens_to_namespace(["a0", "a1", "a2"], namespace="a")
vocab.save_to_files(vocab_dir)
params = Params({"type": "from_files", "directory": vocab_dir, "oov_token": "<UNK>"})
vocab = Vocabulary.from_params(params)
with pytest.raises(AssertionError) as excinfo:
vocab = Vocabulary.from_params(Params({"type": "from_files", "directory": vocab_dir}))
assert "OOV token not found!" in str(excinfo.value)
def test_extend_from_vocab(self):
vocab1 = Vocabulary(non_padded_namespaces={"1", "2"})
vocab2 = Vocabulary(non_padded_namespaces={"3"})
vocab1.add_tokens_to_namespace(["a", "b", "c"], namespace="1")
vocab1.add_tokens_to_namespace(["d", "e", "f"], namespace="2")
vocab2.add_tokens_to_namespace(["c", "d", "e"], namespace="1")
vocab2.add_tokens_to_namespace(["g", "h", "i"], namespace="3")
vocab1.extend_from_vocab(vocab2)
assert vocab1.get_namespaces() == {"1", "2", "3"}
assert vocab1._non_padded_namespaces == {"1", "2", "3"}
assert vocab1.get_token_to_index_vocabulary("1") == {
"a": 0,
"b": 1,
"c": 2,
"@@PADDING@@": 3,
"@@UNKNOWN@@": 4,
"d": 5,
"e": 6,
}
assert vocab1.get_token_to_index_vocabulary("2") == {"d": 0, "e": 1, "f": 2}
assert vocab1.get_token_to_index_vocabulary("3") == {"g": 0, "h": 1, "i": 2}
class TestVocabularyFromFilesWithArchive(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.tar_archive = self.TEST_DIR / "vocab.tar.gz"
self.zip_archive = self.TEST_DIR / "vocab.zip"
self.model_archive = self.TEST_DIR / "model.tar.gz"
shutil.copyfile(
self.FIXTURES_ROOT / "data" / "vocab.tar.gz",
self.tar_archive,
)
shutil.copyfile(
self.FIXTURES_ROOT / "data" / "vocab.zip",
self.zip_archive,
)
shutil.copyfile(
self.FIXTURES_ROOT / "simple_tagger" / "serialization" / "model.tar.gz",
self.model_archive,
)
def test_from_files_with_zip_archive(self):
vocab = Vocabulary.from_files(str(self.zip_archive))
vocab.get_namespaces() == {"tokens"}
assert vocab.get_token_from_index(3, namespace="tokens") == ","
def test_from_files_with_tar_archive(self):
vocab = Vocabulary.from_files(str(self.tar_archive))
vocab.get_namespaces() == {"tokens"}
assert vocab.get_token_from_index(3, namespace="tokens") == ","
def test_from_files_with_model_archive(self):
vocab = Vocabulary.from_files(str(self.model_archive))
vocab.get_namespaces() == {"tokens", "labels"}
assert vocab.get_token_from_index(3, namespace="tokens") == "u.n."
| allennlp-master | tests/data/vocabulary_test.py |
allennlp-master | tests/data/__init__.py |
|
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField, LabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestDataset(AllenNlpTestCase):
def setup_method(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("this")
self.vocab.add_token_to_namespace("is")
self.vocab.add_token_to_namespace("a")
self.vocab.add_token_to_namespace("sentence")
self.vocab.add_token_to_namespace(".")
self.token_indexer = {"tokens": SingleIdTokenIndexer()}
self.instances = self.get_instances()
super().setup_method()
def test_instances_must_have_homogeneous_fields(self):
instance1 = Instance({"tag": (LabelField(1, skip_indexing=True))})
instance2 = Instance({"words": TextField([Token("hello")], {})})
with pytest.raises(ConfigurationError):
_ = Batch([instance1, instance2])
def test_padding_lengths_uses_max_instance_lengths(self):
dataset = Batch(self.instances)
dataset.index_instances(self.vocab)
padding_lengths = dataset.get_padding_lengths()
assert padding_lengths == {"text1": {"tokens___tokens": 5}, "text2": {"tokens___tokens": 6}}
def test_as_tensor_dict(self):
dataset = Batch(self.instances)
dataset.index_instances(self.vocab)
padding_lengths = dataset.get_padding_lengths()
tensors = dataset.as_tensor_dict(padding_lengths)
text1 = tensors["text1"]["tokens"]["tokens"].detach().cpu().numpy()
text2 = tensors["text2"]["tokens"]["tokens"].detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(
text1, numpy.array([[2, 3, 4, 5, 6], [1, 3, 4, 5, 6]])
)
numpy.testing.assert_array_almost_equal(
text2, numpy.array([[2, 3, 4, 1, 5, 6], [2, 3, 1, 0, 0, 0]])
)
def get_instances(self):
field1 = TextField(
[Token(t) for t in ["this", "is", "a", "sentence", "."]], self.token_indexer
)
field2 = TextField(
[Token(t) for t in ["this", "is", "a", "different", "sentence", "."]],
self.token_indexer,
)
field3 = TextField(
[Token(t) for t in ["here", "is", "a", "sentence", "."]], self.token_indexer
)
field4 = TextField([Token(t) for t in ["this", "is", "short"]], self.token_indexer)
instances = [
Instance({"text1": field1, "text2": field2}),
Instance({"text1": field3, "text2": field4}),
]
return instances
| allennlp-master | tests/data/dataset_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance
from allennlp.data.fields import TextField, LabelField
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token
class TestInstance(AllenNlpTestCase):
def test_instance_implements_mutable_mapping(self):
words_field = TextField([Token("hello")], {})
label_field = LabelField(1, skip_indexing=True)
instance = Instance({"words": words_field, "labels": label_field})
assert instance["words"] == words_field
assert instance["labels"] == label_field
assert len(instance) == 2
keys = {k for k, v in instance.items()}
assert keys == {"words", "labels"}
values = [v for k, v in instance.items()]
assert words_field in values
assert label_field in values
def test_duplicate(self):
# Verify the `duplicate()` method works with a `PretrainedTransformerIndexer` in
# a `TextField`. See https://github.com/allenai/allennlp/issues/4270.
instance = Instance(
{
"words": TextField(
[Token("hello")], {"tokens": PretrainedTransformerIndexer("bert-base-uncased")}
)
}
)
other = instance.duplicate()
assert other == instance
# Adding new fields to the original instance should not effect the duplicate.
instance.add_field("labels", LabelField("some_label"))
assert "labels" not in other.fields
assert other != instance # sanity check on the '__eq__' method.
| allennlp-master | tests/data/instance_test.py |
from typing import Iterable
import pytest
from allennlp.data.fields import LabelField
from allennlp.data.instance import Instance
from allennlp.data.dataloader import PyTorchDataLoader
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
@pytest.mark.parametrize("lazy", (True, False))
def test_loader_uses_all_instances_when_batches_per_epochs_set(lazy):
NUM_INSTANCES = 20
BATCH_SIZE = 2
BATCHES_PER_EPOCH = 3
EPOCHS = 4
class FakeDatasetReader(DatasetReader):
def _read(self, filename: str) -> Iterable[Instance]:
for i in range(NUM_INSTANCES):
yield Instance({"index": LabelField(i, skip_indexing=True)})
reader = FakeDatasetReader(lazy=lazy)
dataset = reader.read("blah")
loader = PyTorchDataLoader(dataset, batch_size=BATCH_SIZE, batches_per_epoch=BATCHES_PER_EPOCH)
epoch_batches = []
for epoch in range(EPOCHS):
batches = []
for batch in loader:
instances = []
for index in batch["index"]:
instances.append(index)
batches.append(instances)
epoch_batches.append(batches)
assert epoch_batches == [
# Epoch 0.
[[0, 1], [2, 3], [4, 5]],
# Epoch 1.
[[6, 7], [8, 9], [10, 11]],
# Epoch 2.
[[12, 13], [14, 15], [16, 17]],
# Epoch 3.
[[18, 19], [0, 1], [2, 3]],
]
| allennlp-master | tests/data/dataloader_test.py |
import spacy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import Token, SpacyTokenizer
class TestSpacyTokenizer(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.word_tokenizer = SpacyTokenizer()
def test_tokenize_handles_complex_punctuation(self):
sentence = "this (sentence) has 'crazy' \"punctuation\"."
expected_tokens = [
"this",
"(",
"sentence",
")",
"has",
"'",
"crazy",
"'",
'"',
"punctuation",
'"',
".",
]
tokens = self.word_tokenizer.tokenize(sentence)
token_text = [t.text for t in tokens]
assert token_text == expected_tokens
for token in tokens:
start = token.idx
end = start + len(token.text)
assert sentence[start:end] == token.text
def test_tokenize_handles_contraction(self):
# note that "would've" is kept together, while "ain't" is not.
sentence = "it ain't joe's problem; would been yesterday"
expected_tokens = [
"it",
"ai",
"n't",
"joe",
"'s",
"problem",
";",
"would",
"been",
"yesterday",
]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_handles_multiple_contraction(self):
sentence = "wouldn't've"
expected_tokens = ["would", "n't", "'ve"]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_handles_final_apostrophe(self):
sentence = "the jones' house"
expected_tokens = ["the", "jones", "'", "house"]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_removes_whitespace_tokens(self):
sentence = "the\n jones' house \x0b 55"
expected_tokens = ["the", "jones", "'", "house", "55"]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_handles_special_cases(self):
# note that the etc. doesn't quite work --- we can special case this if we want.
sentence = "Mr. and Mrs. Jones, etc., went to, e.g., the store"
expected_tokens = [
"Mr.",
"and",
"Mrs.",
"Jones",
",",
"etc",
".",
",",
"went",
"to",
",",
"e.g.",
",",
"the",
"store",
]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_batch_tokenization(self):
sentences = [
"This is a sentence",
"This isn't a sentence.",
"This is the 3rd sentence." "Here's the 'fourth' sentence.",
]
batch_split = self.word_tokenizer.batch_tokenize(sentences)
separately_split = [self.word_tokenizer.tokenize(sentence) for sentence in sentences]
assert len(batch_split) == len(separately_split)
for batch_sentence, separate_sentence in zip(batch_split, separately_split):
assert len(batch_sentence) == len(separate_sentence)
for batch_word, separate_word in zip(batch_sentence, separate_sentence):
assert batch_word.text == separate_word.text
def test_keep_spacy_tokens(self):
word_tokenizer = SpacyTokenizer()
sentence = "This should be an allennlp Token"
tokens = word_tokenizer.tokenize(sentence)
assert tokens
assert all(isinstance(token, Token) for token in tokens)
word_tokenizer = SpacyTokenizer(keep_spacy_tokens=True)
sentence = "This should be a spacy Token"
tokens = word_tokenizer.tokenize(sentence)
assert tokens
assert all(isinstance(token, spacy.tokens.Token) for token in tokens)
| allennlp-master | tests/data/tokenizers/spacy_tokenizer_test.py |
from typing import Iterable, List
from allennlp.common import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
class TestPretrainedTransformerTokenizer(AllenNlpTestCase):
def test_splits_roberta(self):
tokenizer = PretrainedTransformerTokenizer("roberta-base")
sentence = "A, <mask> AllenNLP sentence."
expected_tokens = [
"<s>",
"A",
",",
"<mask>",
"ĠAllen",
"N",
"LP",
"Ġsentence",
".",
"</s>",
]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_splits_cased_bert(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_splits_uncased_bert(self):
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"a",
",",
"[MASK]",
"allen",
"##nl",
"##p",
"sentence",
".",
"[SEP]",
]
tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_splits_reformer_small(self):
sentence = "A, [MASK] AllenNLP sentence."
expected_tokens = [
"▁A",
",",
"▁",
"<unk>",
"M",
"A",
"S",
"K",
"<unk>",
"▁A",
"ll",
"en",
"N",
"L",
"P",
"▁s",
"ent",
"en",
"ce",
".",
]
tokenizer = PretrainedTransformerTokenizer("google/reformer-crime-and-punishment")
tokens = [t.text for t in tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_token_idx_bert_uncased(self):
sentence = "A, naïve [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"a",
",",
"naive", # BERT normalizes this away
"[MASK]",
"allen",
"##nl",
"##p",
"sentence",
".",
"[SEP]",
]
expected_idxs = [None, 0, 1, 3, 9, 16, 21, 23, 25, 33, None]
tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
tokenized = tokenizer.tokenize(sentence)
tokens = [t.text for t in tokenized]
assert tokens == expected_tokens
idxs = [t.idx for t in tokenized]
assert idxs == expected_idxs
def test_token_idx_bert_cased(self):
sentence = "A, naïve [MASK] AllenNLP sentence."
expected_tokens = [
"[CLS]",
"A",
",",
"na",
"##ï",
"##ve",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
expected_idxs = [None, 0, 1, 3, 5, 6, 9, 16, 21, 23, 25, 33, None]
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
tokenized = tokenizer.tokenize(sentence)
tokens = [t.text for t in tokenized]
assert tokens == expected_tokens
idxs = [t.idx for t in tokenized]
assert idxs == expected_idxs
def test_max_length(self):
tokenizer = PretrainedTransformerTokenizer(
"bert-base-cased", max_length=10, add_special_tokens=False
)
tokens = tokenizer.tokenize(
"hi there, this should be at least 10 tokens, but some will be truncated"
)
assert len(tokens) == 10
def test_no_max_length(self):
tokenizer = PretrainedTransformerTokenizer(
"bert-base-cased", max_length=None, add_special_tokens=False
)
# Even though the bert model has a max input length of 512, when we tokenize
# with `max_length = None`, we should not get any truncation.
tokens = tokenizer.tokenize(" ".join(["a"] * 550))
assert len(tokens) == 550
def test_token_idx_roberta(self):
sentence = "A, naïve <mask> AllenNLP sentence."
expected_tokens = [
"<s>",
"A",
",",
"Ġnaïve", # RoBERTa mangles this. Or maybe it "encodes"?
"<mask>",
"ĠAllen",
"N",
"LP",
"Ġsentence",
".",
"</s>",
]
expected_idxs = [None, 0, 1, 3, 9, 16, 21, 22, 25, 33, None]
tokenizer = PretrainedTransformerTokenizer("roberta-base")
tokenized = tokenizer.tokenize(sentence)
tokens = [t.text for t in tokenized]
assert tokens == expected_tokens
idxs = [t.idx for t in tokenized]
assert idxs == expected_idxs
def test_token_idx_wikipedia(self):
sentence = (
"Tokyo (東京 Tōkyō, English: /ˈtoʊkioʊ/,[7] Japanese: [toːkʲoː]), officially "
"Tokyo Metropolis (東京都 Tōkyō-to), is one of the 47 prefectures of Japan."
)
for tokenizer_name in ["roberta-base", "bert-base-uncased", "bert-base-cased"]:
tokenizer = PretrainedTransformerTokenizer(tokenizer_name)
tokenized = tokenizer.tokenize(sentence)
assert tokenized[-2].text == "."
assert tokenized[-2].idx == len(sentence) - 1
def test_intra_word_tokenize(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
sentence = "A, [MASK] AllenNLP sentence.".split(" ")
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
expected_offsets = [(1, 2), (3, 3), (4, 6), (7, 8)]
tokens, offsets = tokenizer.intra_word_tokenize(sentence)
tokens = [t.text for t in tokens]
assert tokens == expected_tokens
assert offsets == expected_offsets
# sentence pair
sentence_1 = "A, [MASK] AllenNLP sentence.".split(" ")
sentence_2 = "A sentence.".split(" ")
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
"A", # 10
"sentence",
".",
"[SEP]",
]
expected_offsets_a = [(1, 2), (3, 3), (4, 6), (7, 8)]
expected_offsets_b = [(10, 10), (11, 12)]
tokens, offsets_a, offsets_b = tokenizer.intra_word_tokenize_sentence_pair(
sentence_1, sentence_2
)
tokens = [t.text for t in tokens]
assert tokens == expected_tokens
assert offsets_a == expected_offsets_a
assert offsets_b == expected_offsets_b
def test_intra_word_tokenize_whitespaces(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
sentence = ["A,", " ", "[MASK]", "AllenNLP", "\u007f", "sentence."]
expected_tokens = [
"[CLS]",
"A",
",",
"[MASK]",
"Allen",
"##NL",
"##P",
"sentence",
".",
"[SEP]",
]
expected_offsets = [(1, 2), None, (3, 3), (4, 6), None, (7, 8)]
tokens, offsets = tokenizer.intra_word_tokenize(sentence)
tokens = [t.text for t in tokens]
assert tokens == expected_tokens
assert offsets == expected_offsets
def test_special_tokens_added(self):
def get_token_ids(tokens: Iterable[Token]) -> List[int]:
return [t.text_id for t in tokens]
def get_type_ids(tokens: Iterable[Token]) -> List[int]:
return [t.type_id for t in tokens]
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
assert get_token_ids(tokenizer.sequence_pair_start_tokens) == [101]
assert get_token_ids(tokenizer.sequence_pair_mid_tokens) == [102]
assert get_token_ids(tokenizer.sequence_pair_end_tokens) == [102]
assert get_token_ids(tokenizer.single_sequence_start_tokens) == [101]
assert get_token_ids(tokenizer.single_sequence_end_tokens) == [102]
assert get_type_ids(tokenizer.sequence_pair_start_tokens) == [0]
assert tokenizer.sequence_pair_first_token_type_id == 0
assert get_type_ids(tokenizer.sequence_pair_mid_tokens) == [0]
assert tokenizer.sequence_pair_second_token_type_id == 1
assert get_type_ids(tokenizer.sequence_pair_end_tokens) == [1]
assert get_type_ids(tokenizer.single_sequence_start_tokens) == [0]
assert tokenizer.single_sequence_token_type_id == 0
assert get_type_ids(tokenizer.single_sequence_end_tokens) == [0]
tokenizer = PretrainedTransformerTokenizer("xlnet-base-cased")
assert get_token_ids(tokenizer.sequence_pair_start_tokens) == []
assert get_token_ids(tokenizer.sequence_pair_mid_tokens) == [4]
assert get_token_ids(tokenizer.sequence_pair_end_tokens) == [4, 3]
assert get_token_ids(tokenizer.single_sequence_start_tokens) == []
assert get_token_ids(tokenizer.single_sequence_end_tokens) == [4, 3]
assert get_type_ids(tokenizer.sequence_pair_start_tokens) == []
assert tokenizer.sequence_pair_first_token_type_id == 0
assert get_type_ids(tokenizer.sequence_pair_mid_tokens) == [0]
assert tokenizer.sequence_pair_second_token_type_id == 1
assert get_type_ids(tokenizer.sequence_pair_end_tokens) == [1, 2]
assert get_type_ids(tokenizer.single_sequence_start_tokens) == []
assert tokenizer.single_sequence_token_type_id == 0
assert get_type_ids(tokenizer.single_sequence_end_tokens) == [0, 2]
def test_tokenizer_kwargs_default(self):
text = "Hello there! General Kenobi."
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
original_tokens = [
"[CLS]",
"Hello",
"there",
"!",
"General",
"Ken",
"##ob",
"##i",
".",
"[SEP]",
]
tokenized = [token.text for token in tokenizer.tokenize(text)]
assert tokenized == original_tokens
def test_from_params_kwargs(self):
PretrainedTransformerTokenizer.from_params(
Params({"model_name": "bert-base-uncased", "tokenizer_kwargs": {"max_len": 10}})
)
| allennlp-master | tests/data/tokenizers/pretrained_transformer_tokenizer_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers.sentence_splitter import SpacySentenceSplitter
class TestSentenceSplitter(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.dep_parse_splitter = SpacySentenceSplitter(rule_based=False)
self.rule_based_splitter = SpacySentenceSplitter(rule_based=True)
def test_rule_based_splitter_passes_through_correctly(self):
text = "This is the first sentence. This is the second sentence! "
tokens = self.rule_based_splitter.split_sentences(text)
expected_tokens = ["This is the first sentence.", "This is the second sentence!"]
assert tokens == expected_tokens
def test_dep_parse_splitter_passes_through_correctly(self):
text = "This is the first sentence. This is the second sentence! "
tokens = self.dep_parse_splitter.split_sentences(text)
expected_tokens = ["This is the first sentence.", "This is the second sentence!"]
assert tokens == expected_tokens
def test_batch_rule_based_sentence_splitting(self):
text = [
"This is a sentence. This is a second sentence.",
"This isn't a sentence. This is a second sentence! This is a third sentence.",
]
batch_split = self.rule_based_splitter.batch_split_sentences(text)
separately_split = [self.rule_based_splitter.split_sentences(doc) for doc in text]
assert len(batch_split) == len(separately_split)
for batch_doc, separate_doc in zip(batch_split, separately_split):
assert len(batch_doc) == len(separate_doc)
for batch_sentence, separate_sentence in zip(batch_doc, separate_doc):
assert batch_sentence == separate_sentence
def test_batch_dep_parse_sentence_splitting(self):
text = [
"This is a sentence. This is a second sentence.",
"This isn't a sentence. This is a second sentence! This is a third sentence.",
]
batch_split = self.dep_parse_splitter.batch_split_sentences(text)
separately_split = [self.dep_parse_splitter.split_sentences(doc) for doc in text]
assert len(batch_split) == len(separately_split)
for batch_doc, separate_doc in zip(batch_split, separately_split):
assert len(batch_doc) == len(separate_doc)
for batch_sentence, separate_sentence in zip(batch_doc, separate_doc):
assert batch_sentence == separate_sentence
| allennlp-master | tests/data/tokenizers/sentence_splitter_test.py |
allennlp-master | tests/data/tokenizers/__init__.py |
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import CharacterTokenizer
class TestCharacterTokenizer(AllenNlpTestCase):
def test_splits_into_characters(self):
tokenizer = CharacterTokenizer(start_tokens=["<S1>", "<S2>"], end_tokens=["</S2>", "</S1>"])
sentence = "A, small sentence."
tokens = [t.text for t in tokenizer.tokenize(sentence)]
expected_tokens = [
"<S1>",
"<S2>",
"A",
",",
" ",
"s",
"m",
"a",
"l",
"l",
" ",
"s",
"e",
"n",
"t",
"e",
"n",
"c",
"e",
".",
"</S2>",
"</S1>",
]
assert tokens == expected_tokens
def test_batch_tokenization(self):
tokenizer = CharacterTokenizer()
sentences = [
"This is a sentence",
"This isn't a sentence.",
"This is the 3rd sentence." "Here's the 'fourth' sentence.",
]
batch_tokenized = tokenizer.batch_tokenize(sentences)
separately_tokenized = [tokenizer.tokenize(sentence) for sentence in sentences]
assert len(batch_tokenized) == len(separately_tokenized)
for batch_sentence, separate_sentence in zip(batch_tokenized, separately_tokenized):
assert len(batch_sentence) == len(separate_sentence)
for batch_word, separate_word in zip(batch_sentence, separate_sentence):
assert batch_word.text == separate_word.text
def test_handles_byte_encoding(self):
tokenizer = CharacterTokenizer(byte_encoding="utf-8", start_tokens=[259], end_tokens=[260])
word = "åøâáabe"
tokens = [t.text_id for t in tokenizer.tokenize(word)]
# Note that we've added one to the utf-8 encoded bytes, to account for masking.
expected_tokens = [259, 196, 166, 196, 185, 196, 163, 196, 162, 98, 99, 102, 260]
assert tokens == expected_tokens
| allennlp-master | tests/data/tokenizers/character_tokenizer_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import Token, LettersDigitsTokenizer
class TestLettersDigitsTokenizer(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.word_tokenizer = LettersDigitsTokenizer()
def test_tokenize_handles_complex_punctuation(self):
sentence = "this (sentence) has 'crazy' \"punctuation\"."
expected_tokens = [
"this",
"(",
"sentence",
")",
"has",
"'",
"crazy",
"'",
'"',
"punctuation",
'"',
".",
]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
def test_tokenize_handles_unicode_letters(self):
sentence = "HAL9000 and Ångström"
expected_tokens = [
Token("HAL", 0),
Token("9000", 3),
Token("and", 10),
Token("Ångström", 17),
]
tokens = self.word_tokenizer.tokenize(sentence)
assert [t.text for t in tokens] == [t.text for t in expected_tokens]
assert [t.idx for t in tokens] == [t.idx for t in expected_tokens]
def test_tokenize_handles_splits_all_punctuation(self):
sentence = "wouldn't.[have] -3.45(m^2)"
expected_tokens = [
"wouldn",
"'",
"t",
".",
"[",
"have",
"]",
"-",
"3",
".",
"45",
"(",
"m",
"^",
"2",
")",
]
tokens = [t.text for t in self.word_tokenizer.tokenize(sentence)]
assert tokens == expected_tokens
| allennlp-master | tests/data/tokenizers/letters_digits_tokenizer_test.py |
from typing import Iterable
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers import DatasetReader, InterleavingDatasetReader
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import SpacyTokenizer
class PlainTextReader(DatasetReader):
def __init__(self):
super().__init__()
self._token_indexers = {"tokens": SingleIdTokenIndexer()}
self._tokenizer = SpacyTokenizer()
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path) as input_file:
for line in input_file:
yield self.text_to_instance(line)
def text_to_instance(self, line: str) -> Instance: # type: ignore
tokens = self._tokenizer.tokenize(line)
return Instance({"line": TextField(tokens, self._token_indexers)})
class TestInterleavingDatasetReader(AllenNlpTestCase):
def test_round_robin(self):
readers = {"a": PlainTextReader(), "b": PlainTextReader(), "c": PlainTextReader()}
reader = InterleavingDatasetReader(readers)
data_dir = self.FIXTURES_ROOT / "data"
file_path = f"""{{
"a": "{data_dir / 'babi.txt'}",
"b": "{data_dir / 'conll2003.txt'}",
"c": "{data_dir / 'conll2003.txt'}"
}}"""
instances = list(reader.read(file_path))
first_three_keys = {instance.fields["dataset"].metadata for instance in instances[:3]}
assert first_three_keys == {"a", "b", "c"}
next_three_keys = {instance.fields["dataset"].metadata for instance in instances[3:6]}
assert next_three_keys == {"a", "b", "c"}
def test_all_at_once(self):
readers = {"f": PlainTextReader(), "g": PlainTextReader(), "h": PlainTextReader()}
reader = InterleavingDatasetReader(
readers, dataset_field_name="source", scheme="all_at_once"
)
data_dir = self.FIXTURES_ROOT / "data"
file_path = f"""{{
"f": "{data_dir / 'babi.txt'}",
"g": "{data_dir / 'conll2003.txt'}",
"h": "{data_dir / 'conll2003.txt'}"
}}"""
buckets = []
last_source = None
# Fill up a bucket until the source changes, then start a new one
for instance in reader.read(file_path):
source = instance.fields["source"].metadata
if source != last_source:
buckets.append([])
last_source = source
buckets[-1].append(instance)
# should be in 3 buckets
assert len(buckets) == 3
| allennlp-master | tests/data/dataset_readers/interleaving_dataset_reader_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.