python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.classification import StanfordSentimentTreeBankDatasetReader
from tests import FIXTURES_ROOT
class TestStanfordSentimentTreebankReader:
sst_path = FIXTURES_ROOT / "classification" / "sst.txt"
def test_read_from_file(self):
reader = StanfordSentimentTreeBankDatasetReader()
instances = reader.read(self.sst_path)
instances = ensure_list(instances)
instance1 = {"tokens": ["The", "actors", "are", "fantastic", "."], "label": "4"}
instance2 = {"tokens": ["It", "was", "terrible", "."], "label": "0"}
instance3 = {"tokens": ["Chomp", "chomp", "!"], "label": "2"}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == instance3["tokens"]
assert fields["label"].label == instance3["label"]
def test_use_subtrees(self):
reader = StanfordSentimentTreeBankDatasetReader(use_subtrees=True)
instances = reader.read(self.sst_path)
instances = ensure_list(instances)
instance1 = {"tokens": ["The", "actors", "are", "fantastic", "."], "label": "4"}
instance2 = {"tokens": ["The", "actors"], "label": "2"}
instance3 = {"tokens": ["The"], "label": "2"}
assert len(instances) == 21
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == instance3["tokens"]
assert fields["label"].label == instance3["label"]
def test_3_class(self):
reader = StanfordSentimentTreeBankDatasetReader(granularity="3-class")
instances = reader.read(self.sst_path)
instances = ensure_list(instances)
instance1 = {"tokens": ["The", "actors", "are", "fantastic", "."], "label": "2"}
instance2 = {"tokens": ["It", "was", "terrible", "."], "label": "0"}
instance3 = {"tokens": ["Chomp", "chomp", "!"], "label": "1"}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == instance3["tokens"]
assert fields["label"].label == instance3["label"]
def test_2_class(self):
reader = StanfordSentimentTreeBankDatasetReader(granularity="2-class")
instances = reader.read(self.sst_path)
instances = ensure_list(instances)
instance1 = {"tokens": ["The", "actors", "are", "fantastic", "."], "label": "1"}
instance2 = {"tokens": ["It", "was", "terrible", "."], "label": "0"}
assert len(instances) == 2
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
def test_from_params(self):
params = Params({"use_subtrees": True, "granularity": "5-class"})
reader = StanfordSentimentTreeBankDatasetReader.from_params(params)
assert reader._use_subtrees is True
assert reader._granularity == "5-class"
| allennlp-models-main | tests/classification/dataset_readers/stanford_sentiment_tree_bank_test.py |
# -*- coding: utf-8 -*-
from allennlp.common.util import ensure_list
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp_models.classification import BoolQDatasetReader
from tests import FIXTURES_ROOT
class TestBoolqReader:
boolq_path = FIXTURES_ROOT / "classification" / "boolq.jsonl"
def test_boolq_dataset_reader_default_setting(self):
reader = BoolQDatasetReader()
instances = reader.read(self.boolq_path)
instances = ensure_list(instances)
assert len(instances) == 5
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"Persian",
"language",
"--",
"Persian",
"(/ˈpɜːrʒən,",
]
assert fields["label"].label == 1
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"Epsom",
"railway",
"station",
"--",
"Epsom",
]
assert fields["label"].label == 0
def test_boolq_dataset_reader_roberta_setting(self):
reader = BoolQDatasetReader(
tokenizer=PretrainedTransformerTokenizer("roberta-base", add_special_tokens=False),
token_indexers={"tokens": PretrainedTransformerIndexer("roberta-base")},
)
instances = reader.read(self.boolq_path)
instances = ensure_list(instances)
assert len(instances) == 5
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"<s>",
"Pers",
"ian",
"Ġlanguage",
"Ġ--",
]
assert [t.text for t in fields["tokens"].tokens][-5:] == [
"Ġspeak",
"Ġthe",
"Ġsame",
"Ġlanguage",
"</s>",
]
assert fields["label"].label == 1
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"<s>",
"E",
"ps",
"om",
"Ġrailway",
]
assert [t.text for t in fields["tokens"].tokens][-5:] == [
"Ġe",
"ps",
"om",
"Ġstation",
"</s>",
]
assert fields["label"].label == 0
| allennlp-models-main | tests/classification/dataset_readers/boolq.py |
from copy import deepcopy
import pytest
from allennlp.common.testing import ModelTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.models import Model
from tests import FIXTURES_ROOT
class BiattentiveClassificationNetworkTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT
/ "classification"
/ "biattentive_classification_network"
/ "experiment.json",
FIXTURES_ROOT / "classification" / "sst.txt",
)
def test_maxout_bcn_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_feedforward_bcn_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(
FIXTURES_ROOT
/ "classification"
/ "biattentive_classification_network"
/ "feedforward_experiment.json"
)
def test_input_and_output_elmo_bcn_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(
FIXTURES_ROOT
/ "classification"
/ "biattentive_classification_network"
/ "elmo_experiment.json"
)
def test_output_only_elmo_bcn_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(
FIXTURES_ROOT
/ "classification"
/ "biattentive_classification_network"
/ "output_only_elmo_experiment.json"
)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the encoder wrong - it should be 2 to match
# the embedding dimension from the text_field_embedder.
params["model"]["encoder"]["input_size"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.get("model"))
def test_no_elmo_but_set_flags_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# There is no elmo specified in self.param_file, but set
# use_input_elmo and use_integrator_output_elmo to True.
# use_input_elmo set to True
tmp_params = deepcopy(params)
tmp_params["model"]["use_input_elmo"] = True
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=tmp_params.get("model"))
# use_integrator_output_elmo set to True
tmp_params = deepcopy(params)
tmp_params["model"]["use_input_elmo"] = False
tmp_params["model"]["use_integrator_output_elmo"] = True
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=tmp_params.get("model"))
# both use_input_elmo and use_integrator_output_elmo set to True
tmp_params = deepcopy(params)
tmp_params["model"]["use_input_elmo"] = True
tmp_params["model"]["use_integrator_output_elmo"] = True
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=tmp_params.get("model"))
def test_elmo_but_no_set_flags_throws_configuration_error(self):
params = Params.from_file(
FIXTURES_ROOT
/ "classification"
/ "biattentive_classification_network"
/ "elmo_experiment.json"
)
# Elmo is specified in the model, but set both flags to false.
params["model"]["use_input_elmo"] = False
params["model"]["use_integrator_output_elmo"] = False
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.get("model"))
def test_elmo_num_repr_set_flags_mismatch_throws_configuration_error(self):
params = Params.from_file(
FIXTURES_ROOT
/ "classification"
/ "biattentive_classification_network"
/ "elmo_experiment.json"
)
# Elmo is specified in the model, with num_output_representations=2. Set
# only one flag to true.
tmp_params = deepcopy(params)
tmp_params["model"]["use_input_elmo"] = False
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=tmp_params.get("model"))
tmp_params = deepcopy(params)
tmp_params["model"]["use_input_elmo"] = True
tmp_params["model"]["use_integrator_output_elmo"] = False
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=tmp_params.get("model"))
# set num_output_representations to 1, and set both flags to True.
tmp_params = deepcopy(params)
tmp_params["model"]["elmo"]["num_output_representations"] = 1
tmp_params["model"]["use_input_elmo"] = True
tmp_params["model"]["use_integrator_output_elmo"] = True
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=tmp_params.get("model"))
def test_no_elmo_tokenizer_throws_configuration_error(self):
with pytest.raises(ConfigurationError):
self.ensure_model_can_train_save_and_load(
FIXTURES_ROOT
/ "classification"
/ "biattentive_classification_network"
/ "broken_experiments"
/ "no_elmo_tokenizer_for_elmo.json"
)
def test_elmo_in_text_field_embedder_throws_configuration_error(self):
with pytest.raises(ConfigurationError):
self.ensure_model_can_train_save_and_load(
FIXTURES_ROOT
/ "classification"
/ "biattentive_classification_network"
/ "broken_experiments"
/ "elmo_in_text_field_embedder.json"
)
| allennlp-models-main | tests/classification/models/biattentive_classification_network_test.py |
allennlp-models-main | tests/classification/models/__init__.py |
|
import pytest
def test_gradient_visualization():
from allennlp.predictors.predictor import Predictor
predictor = Predictor.from_path(
"https://storage.googleapis.com/allennlp-public-models/sst-roberta-large-2020.06.08.tar.gz"
)
sentence = "a very well-made, funny and entertaining picture."
inputs = {"sentence": sentence}
from allennlp.interpret.saliency_interpreters import SimpleGradient
simple_gradient_interpreter = SimpleGradient(predictor)
simple_gradient_interpretation = simple_gradient_interpreter.saliency_interpret_from_json(
inputs
)
gradients = simple_gradient_interpretation["instance_1"]["grad_input_1"]
assert max(gradients) - min(gradients) < 0.75
| allennlp-models-main | tests/classification/interpret/sst_test.py |
allennlp-models-main | tests/classification/interpret/__init__.py |
|
allennlp-models-main | tests/structured_prediction/__init__.py |
|
allennlp-models-main | tests/structured_prediction/metrics/__init__.py |
|
import pytest
from numpy.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
global_distributed_metric,
run_distributed_test,
)
from allennlp_models.structured_prediction.metrics.srl_eval_scorer import SrlEvalScorer
class SrlEvalScorerTest(AllenNlpTestCase):
def test_srl_eval_correctly_scores_identical_tags(self):
batch_verb_indices = [3, 8, 2, 0]
batch_sentences = [
[
"Mali",
"government",
"officials",
"say",
"the",
"woman",
"'s",
"confession",
"was",
"forced",
".",
],
[
"Mali",
"government",
"officials",
"say",
"the",
"woman",
"'s",
"confession",
"was",
"forced",
".",
],
[
"The",
"prosecution",
"rested",
"its",
"case",
"last",
"month",
"after",
"four",
"months",
"of",
"hearings",
".",
],
["Come", "in", "and", "buy", "."],
]
batch_bio_predicted_tags = [
[
"B-ARG0",
"I-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
],
["O", "O", "O", "O", "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "B-V", "B-ARG2", "O"],
[
"B-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"B-ARGM-TMP",
"I-ARGM-TMP",
"B-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"O",
],
["B-V", "B-AM-DIR", "O", "O", "O"],
]
from allennlp_models.structured_prediction.models.srl import (
convert_bio_tags_to_conll_format,
)
batch_conll_predicted_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_predicted_tags
]
batch_bio_gold_tags = [
[
"B-ARG0",
"I-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
],
["O", "O", "O", "O", "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "B-V", "B-ARG2", "O"],
[
"B-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"B-ARGM-TMP",
"I-ARGM-TMP",
"B-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"O",
],
["B-V", "B-AM-DIR", "O", "O", "O"],
]
batch_conll_gold_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_gold_tags
]
srl_scorer = SrlEvalScorer(ignore_classes=["V"])
srl_scorer(
batch_verb_indices, batch_sentences, batch_conll_predicted_tags, batch_conll_gold_tags
)
metrics = srl_scorer.get_metric()
assert len(metrics) == 18
assert_allclose(metrics["precision-ARG0"], 1.0)
assert_allclose(metrics["recall-ARG0"], 1.0)
assert_allclose(metrics["f1-measure-ARG0"], 1.0)
assert_allclose(metrics["precision-ARG1"], 1.0)
assert_allclose(metrics["recall-ARG1"], 1.0)
assert_allclose(metrics["f1-measure-ARG1"], 1.0)
assert_allclose(metrics["precision-ARG2"], 1.0)
assert_allclose(metrics["recall-ARG2"], 1.0)
assert_allclose(metrics["f1-measure-ARG2"], 1.0)
assert_allclose(metrics["precision-ARGM-TMP"], 1.0)
assert_allclose(metrics["recall-ARGM-TMP"], 1.0)
assert_allclose(metrics["f1-measure-ARGM-TMP"], 1.0)
assert_allclose(metrics["precision-AM-DIR"], 1.0)
assert_allclose(metrics["recall-AM-DIR"], 1.0)
assert_allclose(metrics["f1-measure-AM-DIR"], 1.0)
assert_allclose(metrics["precision-overall"], 1.0)
assert_allclose(metrics["recall-overall"], 1.0)
assert_allclose(metrics["f1-measure-overall"], 1.0)
def test_span_metrics_are_computed_correctly(self):
from allennlp_models.structured_prediction.models.srl import (
convert_bio_tags_to_conll_format,
)
batch_verb_indices = [2]
batch_sentences = [["The", "cat", "loves", "hats", "."]]
batch_bio_predicted_tags = [["B-ARG0", "B-ARG1", "B-V", "B-ARG1", "O"]]
batch_conll_predicted_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_predicted_tags
]
batch_bio_gold_tags = [["B-ARG0", "I-ARG0", "B-V", "B-ARG1", "O"]]
batch_conll_gold_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_gold_tags
]
srl_scorer = SrlEvalScorer(ignore_classes=["V"])
srl_scorer(
batch_verb_indices, batch_sentences, batch_conll_predicted_tags, batch_conll_gold_tags
)
metrics = srl_scorer.get_metric()
assert len(metrics) == 9
assert_allclose(metrics["precision-ARG0"], 0.0)
assert_allclose(metrics["recall-ARG0"], 0.0)
assert_allclose(metrics["f1-measure-ARG0"], 0.0)
assert_allclose(metrics["precision-ARG1"], 0.5)
assert_allclose(metrics["recall-ARG1"], 1.0)
assert_allclose(metrics["f1-measure-ARG1"], 2 / 3)
assert_allclose(metrics["precision-overall"], 1 / 3)
assert_allclose(metrics["recall-overall"], 1 / 2)
assert_allclose(
metrics["f1-measure-overall"], (2 * (1 / 3) * (1 / 2)) / ((1 / 3) + (1 / 2))
)
def test_distributed_setting_throws_an_error(self):
from allennlp_models.structured_prediction.models.srl import (
convert_bio_tags_to_conll_format,
)
batch_verb_indices = [2]
batch_sentences = [["The", "cat", "loves", "hats", "."]]
batch_bio_predicted_tags = [["B-ARG0", "B-ARG1", "B-V", "B-ARG1", "O"]]
batch_conll_predicted_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_predicted_tags
]
batch_bio_gold_tags = [["B-ARG0", "I-ARG0", "B-V", "B-ARG1", "O"]]
batch_conll_gold_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_gold_tags
]
metric_kwargs = {
"batch_verb_indices": [batch_verb_indices, batch_verb_indices],
"batch_sentences": [batch_sentences, batch_sentences],
"batch_conll_formatted_predicted_tags": [
batch_conll_predicted_tags,
batch_conll_predicted_tags,
],
"batch_conll_formatted_gold_tags": [batch_conll_gold_tags, batch_conll_gold_tags],
}
desired_values = {} # it does not matter, we expect the run to fail.
with pytest.raises(Exception) as exc:
run_distributed_test(
[-1, -1],
global_distributed_metric,
SrlEvalScorer(ignore_classes=["V"]),
metric_kwargs,
desired_values,
exact=True,
)
assert (
"RuntimeError: Distributed aggregation for `SrlEvalScorer` is currently not supported."
in str(exc.value)
)
| allennlp-models-main | tests/structured_prediction/metrics/srl_eval_scorer_test.py |
import os
import subprocess
import torch
from allennlp.data import Vocabulary
from torch.testing import assert_allclose
from allennlp.common.testing import AllenNlpTestCase, multi_device
from allennlp.training.metrics import SpanBasedF1Measure
from tests import PROJECT_ROOT
class SpanBasedF1Test(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
vocab = Vocabulary()
vocab.add_token_to_namespace("O", "tags")
vocab.add_token_to_namespace("B-ARG1", "tags")
vocab.add_token_to_namespace("I-ARG1", "tags")
vocab.add_token_to_namespace("B-ARG2", "tags")
vocab.add_token_to_namespace("I-ARG2", "tags")
vocab.add_token_to_namespace("B-V", "tags")
vocab.add_token_to_namespace("I-V", "tags")
vocab.add_token_to_namespace("U-ARG1", "tags")
vocab.add_token_to_namespace("U-ARG2", "tags")
vocab.add_token_to_namespace("B-C-ARG1", "tags")
vocab.add_token_to_namespace("I-C-ARG1", "tags")
vocab.add_token_to_namespace("B-ARGM-ADJ", "tags")
vocab.add_token_to_namespace("I-ARGM-ADJ", "tags")
# BMES.
vocab.add_token_to_namespace("B", "bmes_tags")
vocab.add_token_to_namespace("M", "bmes_tags")
vocab.add_token_to_namespace("E", "bmes_tags")
vocab.add_token_to_namespace("S", "bmes_tags")
self.vocab = vocab
@multi_device
def test_span_f1_matches_perl_script_for_continued_arguments(self, device: str):
bio_tags = ["B-ARG1", "O", "B-C-ARG1", "B-V", "B-ARGM-ADJ", "O"]
sentence = ["Mark", "and", "Matt", "were", "running", "fast", "."]
gold_indices = [self.vocab.get_token_index(x, "tags") for x in bio_tags]
gold_tensor = torch.tensor([gold_indices], device=device)
prediction_tensor = torch.rand([1, 6, self.vocab.get_vocab_size("tags")], device=device)
mask = torch.tensor([[True, True, True, True, True, True, True, True, True]], device=device)
# Make prediction so that it is exactly correct.
for i, tag_index in enumerate(gold_indices):
prediction_tensor[0, i, tag_index] = 1
metric = SpanBasedF1Measure(self.vocab, "tags")
metric(prediction_tensor, gold_tensor, mask)
metric_dict = metric.get_metric()
# We merged the continued ARG1 label into a single span, so there should
# be exactly 1 true positive for ARG1 and nothing present for C-ARG1
assert metric._true_positives["ARG1"] == 1
# The labels containing continuation references get merged into
# the labels that they continue, so they should never appear in
# the precision/recall counts.
assert "C-ARG1" not in metric._true_positives.keys()
assert metric._true_positives["V"] == 1
assert metric._true_positives["ARGM-ADJ"] == 1
assert_allclose(metric_dict["recall-ARG1"], 1.0)
assert_allclose(metric_dict["precision-ARG1"], 1.0)
assert_allclose(metric_dict["f1-measure-ARG1"], 1.0)
assert_allclose(metric_dict["recall-V"], 1.0)
assert_allclose(metric_dict["precision-V"], 1.0)
assert_allclose(metric_dict["f1-measure-V"], 1.0)
assert_allclose(metric_dict["recall-ARGM-ADJ"], 1.0)
assert_allclose(metric_dict["precision-ARGM-ADJ"], 1.0)
assert_allclose(metric_dict["f1-measure-ARGM-ADJ"], 1.0)
assert_allclose(metric_dict["recall-overall"], 1.0)
assert_allclose(metric_dict["precision-overall"], 1.0)
assert_allclose(metric_dict["f1-measure-overall"], 1.0)
# Check that the number of true positive ARG1 labels is the same as the perl script's output:
gold_file_path = os.path.join(self.TEST_DIR, "gold_conll_eval.txt")
prediction_file_path = os.path.join(self.TEST_DIR, "prediction_conll_eval.txt")
with open(gold_file_path, "w") as gold_file, open(
prediction_file_path, "w"
) as prediction_file:
# Use the same bio tags as prediction vs gold to make it obvious by looking
# at the perl script output if something is wrong.
from allennlp_models.structured_prediction.models.srl import (
write_bio_formatted_tags_to_file,
)
write_bio_formatted_tags_to_file(
gold_file, prediction_file, 4, sentence, bio_tags, bio_tags
)
# Run the official perl script and collect stdout.
perl_script_command = [
"perl",
str(
PROJECT_ROOT / "allennlp_models" / "structured_prediction" / "tools" / "srl-eval.pl"
),
prediction_file_path,
gold_file_path,
]
stdout = subprocess.check_output(perl_script_command, universal_newlines=True)
stdout_lines = stdout.split("\n")
# Parse the stdout of the perl script to find the ARG1 row (this happens to be line 8).
num_correct_arg1_instances_from_perl_evaluation = int(
[token for token in stdout_lines[8].split(" ") if token][1]
)
assert num_correct_arg1_instances_from_perl_evaluation == metric._true_positives["ARG1"]
| allennlp-models-main | tests/structured_prediction/metrics/span_based_f1_test.py |
allennlp-models-main | tests/structured_prediction/dataset_readers/__init__.py |
|
from nltk.tree import Tree
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers.dataset_utils.span_utils import enumerate_spans
from allennlp_models.structured_prediction.dataset_readers.penn_tree_bank import (
PennTreeBankConstituencySpanDatasetReader,
)
from tests import FIXTURES_ROOT
class TestPennTreeBankConstituencySpanReader(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.span_width = 5
def test_read_from_file(self):
ptb_reader = PennTreeBankConstituencySpanDatasetReader()
instances = list(
ptb_reader.read(str(FIXTURES_ROOT / "structured_prediction" / "example_ptb.trees"))
)
assert len(instances) == 2
fields = instances[0].fields
tokens = [x.text for x in fields["tokens"].tokens]
pos_tags = fields["pos_tags"].labels
spans = [(x.span_start, x.span_end) for x in fields["spans"].field_list]
span_labels = fields["span_labels"].labels
assert tokens == [
"Also",
",",
"because",
"UAL",
"Chairman",
"Stephen",
"Wolf",
"and",
"other",
"UAL",
"executives",
"have",
"joined",
"the",
"pilots",
"'",
"bid",
",",
"the",
"board",
"might",
"be",
"forced",
"to",
"exclude",
"him",
"from",
"its",
"deliberations",
"in",
"order",
"to",
"be",
"fair",
"to",
"other",
"bidders",
".",
]
assert pos_tags == [
"RB",
",",
"IN",
"NNP",
"NNP",
"NNP",
"NNP",
"CC",
"JJ",
"NNP",
"NNS",
"VBP",
"VBN",
"DT",
"NNS",
"POS",
"NN",
",",
"DT",
"NN",
"MD",
"VB",
"VBN",
"TO",
"VB",
"PRP",
"IN",
"PRP$",
"NNS",
"IN",
"NN",
"TO",
"VB",
"JJ",
"TO",
"JJ",
"NNS",
".",
]
assert spans == enumerate_spans(tokens)
gold_tree = Tree.fromstring(
"(S(ADVP(RB Also))(, ,)(SBAR(IN because)"
"(S(NP(NP(NNP UAL)(NNP Chairman)(NNP Stephen)(NNP Wolf))"
"(CC and)(NP(JJ other)(NNP UAL)(NNS executives)))(VP(VBP have)"
"(VP(VBN joined)(NP(NP(DT the)(NNS pilots)(POS '))(NN bid))))))"
"(, ,)(NP(DT the)(NN board))(VP(MD might)(VP(VB be)(VP(VBN "
"forced)(S(VP(TO to)(VP(VB exclude)(NP(PRP him))(PP(IN from)"
"(NP(PRP$ its)(NNS deliberations)))(SBAR(IN in)(NN order)(S("
"VP(TO to)(VP(VB be)(ADJP(JJ fair)(PP(TO to)(NP(JJ other)(NNS "
"bidders))))))))))))))(. .))"
)
assert fields["metadata"].metadata["gold_tree"] == gold_tree
assert fields["metadata"].metadata["tokens"] == tokens
correct_spans_and_labels = {}
ptb_reader._get_gold_spans(gold_tree, 0, correct_spans_and_labels)
for span, label in zip(spans, span_labels):
if label != "NO-LABEL":
assert correct_spans_and_labels[span] == label
fields = instances[1].fields
tokens = [x.text for x in fields["tokens"].tokens]
pos_tags = fields["pos_tags"].labels
spans = [(x.span_start, x.span_end) for x in fields["spans"].field_list]
span_labels = fields["span_labels"].labels
assert tokens == [
"That",
"could",
"cost",
"him",
"the",
"chance",
"to",
"influence",
"the",
"outcome",
"and",
"perhaps",
"join",
"the",
"winning",
"bidder",
".",
]
assert pos_tags == [
"DT",
"MD",
"VB",
"PRP",
"DT",
"NN",
"TO",
"VB",
"DT",
"NN",
"CC",
"RB",
"VB",
"DT",
"VBG",
"NN",
".",
]
assert spans == enumerate_spans(tokens)
gold_tree = Tree.fromstring(
"(S(NP(DT That))(VP(MD could)(VP(VB cost)(NP(PRP him))"
"(NP(DT the)(NN chance)(S(VP(TO to)(VP(VP(VB influence)(NP(DT the)"
"(NN outcome)))(CC and)(VP(ADVP(RB perhaps))(VB join)(NP(DT the)"
"(VBG winning)(NN bidder)))))))))(. .))"
)
assert fields["metadata"].metadata["gold_tree"] == gold_tree
assert fields["metadata"].metadata["tokens"] == tokens
correct_spans_and_labels = {}
ptb_reader._get_gold_spans(gold_tree, 0, correct_spans_and_labels)
for span, label in zip(spans, span_labels):
if label != "NO-LABEL":
assert correct_spans_and_labels[span] == label
def test_strip_functional_tags(self):
ptb_reader = PennTreeBankConstituencySpanDatasetReader()
# Get gold spans should strip off all the functional tags.
tree = Tree.fromstring(
"(S (NP=PRP (D the) (N dog)) (VP-0 (V chased) (NP|FUN-TAGS (D the) (N cat))))"
)
ptb_reader._strip_functional_tags(tree)
assert tree == Tree.fromstring(
"(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))"
)
def test_get_gold_spans_correctly_extracts_spans(self):
ptb_reader = PennTreeBankConstituencySpanDatasetReader()
tree = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
span_dict = {}
ptb_reader._get_gold_spans(tree, 0, span_dict)
spans = list(span_dict.items())
assert spans == [((0, 1), "NP"), ((3, 4), "NP"), ((2, 4), "VP"), ((0, 4), "S")]
def test_get_gold_spans_correctly_extracts_spans_with_nested_labels(self):
ptb_reader = PennTreeBankConstituencySpanDatasetReader()
# Here we have a parse with several nested labels - particularly the (WHNP (WHNP (WP What)))
# fragment. These should be concatenated into a single label by get_gold_spans.
tree = Tree.fromstring(
"""
(S
(` ``)
(S-TPC
(NP-SBJ (PRP We))
(VP
(VBP have)
(S
(VP
(TO to)
(VP
(VP
(VB clear)
(PRT (RP up))
(NP (DT these) (NNS issues)))
(CC and)
(VP
(VB find)
(PRT (RP out))
(SBAR-NOM
(WHNP (WHNP (WP what)))
(S
(VP
(VBZ is)
(ADJP-PRD (JJ present))
(SBAR
(WHNP (WDT that))
(S
(VP
(VBZ is)
(VP
(VBG creating)
(NP (JJ artificial) (NN volatility)))))))))))))))
(, ,)
('' '')
(NP-SBJ (NNP Mr.) (NNP Fisher))
(VP (VBD said))
(. .))
"""
)
span_dict = {}
ptb_reader._strip_functional_tags(tree)
ptb_reader._get_gold_spans(tree, 0, span_dict)
assert span_dict == {
(1, 1): "NP",
(5, 5): "PRT",
(6, 7): "NP",
(4, 7): "VP",
(10, 10): "PRT",
(11, 11): "WHNP-WHNP",
(13, 13): "ADJP",
(14, 14): "WHNP",
(17, 18): "NP",
(16, 18): "VP",
(15, 18): "S-VP",
(14, 18): "SBAR",
(12, 18): "S-VP",
(11, 18): "SBAR",
(9, 18): "VP",
(4, 18): "VP",
(3, 18): "S-VP",
(2, 18): "VP",
(1, 18): "S",
(21, 22): "NP",
(23, 23): "VP",
(0, 24): "S",
}
| allennlp-models-main | tests/structured_prediction/dataset_readers/penn_tree_bank_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.structured_prediction import UniversalDependenciesDatasetReader
from tests import FIXTURES_ROOT
class TestUniversalDependenciesDatasetReader(AllenNlpTestCase):
data_path = FIXTURES_ROOT / "structured_prediction" / "dependencies.conllu"
def test_read_from_file(self):
reader = UniversalDependenciesDatasetReader()
instances = list(reader.read(str(self.data_path)))
instance = instances[0]
fields = instance.fields
assert [t.text for t in fields["words"].tokens] == [
"What",
"if",
"Google",
"Morphed",
"Into",
"GoogleOS",
"?",
]
assert fields["pos_tags"].labels == [
"PRON",
"SCONJ",
"PROPN",
"VERB",
"ADP",
"PROPN",
"PUNCT",
]
assert fields["head_tags"].labels == [
"root",
"mark",
"nsubj",
"advcl",
"case",
"obl",
"punct",
]
assert fields["head_indices"].labels == [0, 4, 4, 1, 6, 4, 4]
instance = instances[1]
fields = instance.fields
assert [t.text for t in fields["words"].tokens] == [
"What",
"if",
"Google",
"expanded",
"on",
"its",
"search",
"-",
"engine",
"(",
"and",
"now",
"e-mail",
")",
"wares",
"into",
"a",
"full",
"-",
"fledged",
"operating",
"system",
"?",
]
assert fields["pos_tags"].labels == [
"PRON",
"SCONJ",
"PROPN",
"VERB",
"ADP",
"PRON",
"NOUN",
"PUNCT",
"NOUN",
"PUNCT",
"CCONJ",
"ADV",
"NOUN",
"PUNCT",
"NOUN",
"ADP",
"DET",
"ADV",
"PUNCT",
"ADJ",
"NOUN",
"NOUN",
"PUNCT",
]
assert fields["head_tags"].labels == [
"root",
"mark",
"nsubj",
"advcl",
"case",
"nmod:poss",
"compound",
"punct",
"compound",
"punct",
"cc",
"advmod",
"conj",
"punct",
"obl",
"case",
"det",
"advmod",
"punct",
"amod",
"compound",
"obl",
"punct",
]
assert fields["head_indices"].labels == [
0,
4,
4,
1,
15,
15,
9,
9,
15,
9,
13,
13,
9,
15,
4,
22,
22,
20,
20,
22,
22,
4,
4,
]
instance = instances[2]
fields = instance.fields
assert [t.text for t in fields["words"].tokens] == [
"[",
"via",
"Microsoft",
"Watch",
"from",
"Mary",
"Jo",
"Foley",
"]",
]
assert fields["pos_tags"].labels == [
"PUNCT",
"ADP",
"PROPN",
"PROPN",
"ADP",
"PROPN",
"PROPN",
"PROPN",
"PUNCT",
]
assert fields["head_tags"].labels == [
"punct",
"case",
"compound",
"root",
"case",
"nmod",
"flat",
"flat",
"punct",
]
assert fields["head_indices"].labels == [4, 4, 4, 0, 6, 4, 6, 6, 4]
# This instance tests specifically for filtering of elipsis:
# https://universaldependencies.org/u/overview/specific-syntax.html#ellipsis
# The original sentence is:
# "Over 300 Iraqis are reported dead and 500 [reported] wounded in Fallujah alone."
# But the second "reported" is elided, and as such isn't included in the syntax tree.
instance = instances[3]
fields = instance.fields
assert [t.text for t in fields["words"].tokens] == [
"Over",
"300",
"Iraqis",
"are",
"reported",
"dead",
"and",
"500",
"wounded",
"in",
"Fallujah",
"alone",
".",
]
assert fields["pos_tags"].labels == [
"ADV",
"NUM",
"PROPN",
"AUX",
"VERB",
"ADJ",
"CCONJ",
"NUM",
"ADJ",
"ADP",
"PROPN",
"ADV",
"PUNCT",
]
assert fields["head_tags"].labels == [
"advmod",
"nummod",
"nsubj:pass",
"aux:pass",
"root",
"xcomp",
"cc",
"conj",
"orphan",
"case",
"obl",
"advmod",
"punct",
]
assert fields["head_indices"].labels == [2, 3, 5, 5, 0, 5, 8, 5, 8, 11, 5, 11, 5]
def test_read_from_file_with_language_specific_pos(self):
reader = UniversalDependenciesDatasetReader(use_language_specific_pos=True)
instances = list(reader.read(str(self.data_path)))
instance = instances[0]
fields = instance.fields
assert [t.text for t in fields["words"].tokens] == [
"What",
"if",
"Google",
"Morphed",
"Into",
"GoogleOS",
"?",
]
assert fields["pos_tags"].labels == ["WP", "IN", "NNP", "VBD", "IN", "NNP", "."]
assert fields["head_tags"].labels == [
"root",
"mark",
"nsubj",
"advcl",
"case",
"obl",
"punct",
]
assert fields["head_indices"].labels == [0, 4, 4, 1, 6, 4, 4]
| allennlp-models-main | tests/structured_prediction/dataset_readers/universal_dependencies_test.py |
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.structured_prediction.dataset_readers.srl import (
SrlReader,
_convert_tags_to_wordpiece_tags,
)
from tests import FIXTURES_ROOT
class TestSrlReader:
def test_read_from_file(self):
conll_reader = SrlReader()
instances = conll_reader.read(
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012" / "subdomain"
)
instances = ensure_list(instances)
fields = instances[0].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"Mali",
"government",
"officials",
"say",
"the",
"woman",
"'s",
"confession",
"was",
"forced",
".",
]
assert fields["verb_indicator"].labels[3] == 1
assert fields["tags"].labels == [
"B-ARG0",
"I-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
]
assert fields["metadata"].metadata["words"] == tokens
assert fields["metadata"].metadata["verb"] == tokens[3]
assert fields["metadata"].metadata["gold_tags"] == fields["tags"].labels
fields = instances[1].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"Mali",
"government",
"officials",
"say",
"the",
"woman",
"'s",
"confession",
"was",
"forced",
".",
]
assert fields["verb_indicator"].labels[8] == 1
assert fields["tags"].labels == [
"O",
"O",
"O",
"O",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"B-V",
"B-ARG2",
"O",
]
assert fields["metadata"].metadata["words"] == tokens
assert fields["metadata"].metadata["verb"] == tokens[8]
assert fields["metadata"].metadata["gold_tags"] == fields["tags"].labels
fields = instances[2].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"The",
"prosecution",
"rested",
"its",
"case",
"last",
"month",
"after",
"four",
"months",
"of",
"hearings",
".",
]
assert fields["verb_indicator"].labels[2] == 1
assert fields["tags"].labels == [
"B-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"B-ARGM-TMP",
"I-ARGM-TMP",
"B-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"O",
]
assert fields["metadata"].metadata["words"] == tokens
assert fields["metadata"].metadata["verb"] == tokens[2]
assert fields["metadata"].metadata["gold_tags"] == fields["tags"].labels
fields = instances[3].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"The",
"prosecution",
"rested",
"its",
"case",
"last",
"month",
"after",
"four",
"months",
"of",
"hearings",
".",
]
assert fields["verb_indicator"].labels[11] == 1
assert fields["tags"].labels == [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"B-V",
"O",
]
assert fields["metadata"].metadata["words"] == tokens
assert fields["metadata"].metadata["verb"] == tokens[11]
assert fields["metadata"].metadata["gold_tags"] == fields["tags"].labels
# Tests a sentence with no verbal predicates.
fields = instances[4].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == ["Denise", "Dillon", "Headline", "News", "."]
assert fields["verb_indicator"].labels == [0, 0, 0, 0, 0]
assert fields["tags"].labels == ["O", "O", "O", "O", "O"]
assert fields["metadata"].metadata["words"] == tokens
assert fields["metadata"].metadata["verb"] is None
assert fields["metadata"].metadata["gold_tags"] == fields["tags"].labels
def test_srl_reader_can_filter_by_domain(self):
conll_reader = SrlReader(domain_identifier="subdomain2")
instances = conll_reader.read(
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012"
)
instances = ensure_list(instances)
# If we'd included the folder, we'd have 9 instances.
assert len(instances) == 2
class TestBertSrlReader(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.reader = SrlReader(bert_model_name="bert-base-uncased")
def test_convert_tags_to_wordpiece_tags(self):
offsets = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
offsets = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
original = [
"B-ARG0",
"I-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
]
wordpiece_tags = [
"O",
"B-ARG0",
"I-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
"O",
]
converted = _convert_tags_to_wordpiece_tags(original, offsets)
assert converted == wordpiece_tags
offsets = [2, 3, 4, 5, 6, 7, 8, 9, 10, 12]
offsets = [1, 2, 3, 4, 5, 6, 7, 8, 9, 11]
converted = _convert_tags_to_wordpiece_tags(original, offsets)
assert converted == [
"O",
"B-ARG0",
"I-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
]
offsets = [1, 3, 5]
original = ["B-ARG", "B-V", "O"]
converted = _convert_tags_to_wordpiece_tags(original, offsets)
assert converted == ["O", "B-ARG", "B-V", "I-V", "O", "O", "O"]
offsets = [2, 3, 5]
original = ["B-ARG", "I-ARG", "O"]
converted = _convert_tags_to_wordpiece_tags(original, offsets)
assert converted == ["O", "B-ARG", "I-ARG", "I-ARG", "O", "O", "O"]
def test_wordpiece_tokenize_input(self):
wordpieces, offsets, start_offsets = self.reader._wordpiece_tokenize_input(
"This is a sentenceandsomepieces with a reallylongword".split(" ")
)
assert wordpieces == [
"[CLS]",
"this",
"is",
"a",
"sentence",
"##ands",
"##ome",
"##piece",
"##s",
"with",
"a",
"really",
"##long",
"##word",
"[SEP]",
]
assert [wordpieces[i] for i in offsets] == ["this", "is", "a", "##s", "with", "a", "##word"]
assert [wordpieces[i] for i in start_offsets] == [
"this",
"is",
"a",
"sentence",
"with",
"a",
"really",
]
def test_read_from_file(self):
conll_reader = self.reader
instances = conll_reader.read(
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012" / "subdomain"
)
instances = ensure_list(instances)
fields = instances[0].fields
tokens = fields["metadata"]["words"]
assert tokens == [
"Mali",
"government",
"officials",
"say",
"the",
"woman",
"'s",
"confession",
"was",
"forced",
".",
]
assert fields["verb_indicator"].labels[4] == 1
assert fields["tags"].labels == [
"O",
"B-ARG0",
"I-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
"O",
]
fields = instances[1].fields
tokens = fields["metadata"]["words"]
assert tokens == [
"Mali",
"government",
"officials",
"say",
"the",
"woman",
"'s",
"confession",
"was",
"forced",
".",
]
assert fields["verb_indicator"].labels[10] == 1
assert fields["tags"].labels == [
"O",
"O",
"O",
"O",
"O",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"B-V",
"B-ARG2",
"O",
"O",
]
fields = instances[2].fields
tokens = fields["metadata"]["words"]
assert tokens == [
"The",
"prosecution",
"rested",
"its",
"case",
"last",
"month",
"after",
"four",
"months",
"of",
"hearings",
".",
]
assert fields["verb_indicator"].labels[3] == 1
assert fields["tags"].labels == [
"O",
"B-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"B-ARGM-TMP",
"I-ARGM-TMP",
"B-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"O",
"O",
]
fields = instances[3].fields
tokens = fields["metadata"]["words"]
assert tokens == [
"The",
"prosecution",
"rested",
"its",
"case",
"last",
"month",
"after",
"four",
"months",
"of",
"hearings",
".",
]
assert fields["verb_indicator"].labels[12] == 1
assert fields["tags"].labels == [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"B-V",
"O",
"O",
]
# Tests a sentence with no verbal predicates.
fields = instances[4].fields
tokens = fields["metadata"]["words"]
assert tokens == ["Denise", "Dillon", "Headline", "News", "."]
assert fields["verb_indicator"].labels == [0, 0, 0, 0, 0, 0, 0]
assert fields["tags"].labels == ["O", "O", "O", "O", "O", "O", "O"]
| allennlp-models-main | tests/structured_prediction/dataset_readers/srl_test.py |
allennlp-models-main | tests/structured_prediction/predictors/__init__.py |
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
import spacy
from allennlp_models.structured_prediction import BiaffineDependencyParserPredictor
from tests import FIXTURES_ROOT
class TestBiaffineDependencyParser(AllenNlpTestCase):
def test_uses_named_inputs(self):
inputs = {"sentence": "Please could you parse this sentence?"}
archive = load_archive(
FIXTURES_ROOT
/ "structured_prediction"
/ "biaffine_dependency_parser"
/ "serialization"
/ "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "biaffine_dependency_parser")
result = predictor.predict_json(inputs)
words = result.get("words")
predicted_heads = result.get("predicted_heads")
assert len(predicted_heads) == len(words)
predicted_dependencies = result.get("predicted_dependencies")
assert len(predicted_dependencies) == len(words)
assert isinstance(predicted_dependencies, list)
assert all(isinstance(x, str) for x in predicted_dependencies)
assert result.get("loss") is not None
assert result.get("arc_loss") is not None
assert result.get("tag_loss") is not None
hierplane_tree = result.get("hierplane_tree")
hierplane_tree.pop("nodeTypeToStyle")
hierplane_tree.pop("linkToPosition")
assert result.get("hierplane_tree") == {
"text": "Please could you parse this sentence ?",
"root": {
"word": "Please",
"nodeType": "det",
"attributes": ["INTJ"],
"link": "det",
"spans": [{"start": 0, "end": 7}],
"children": [
{
"word": "could",
"nodeType": "nummod",
"attributes": ["VERB" if spacy.__version__ < "3.0" else "AUX"],
"link": "nummod",
"spans": [{"start": 7, "end": 13}],
},
{
"word": "you",
"nodeType": "nummod",
"attributes": ["PRON"],
"link": "nummod",
"spans": [{"start": 13, "end": 17}],
},
{
"word": "parse",
"nodeType": "nummod",
"attributes": ["VERB"],
"link": "nummod",
"spans": [{"start": 17, "end": 23}],
},
{
"word": "this",
"nodeType": "nummod",
"attributes": ["DET"],
"link": "nummod",
"spans": [{"start": 23, "end": 28}],
},
{
"word": "sentence",
"nodeType": "nummod",
"attributes": ["NOUN"],
"link": "nummod",
"spans": [{"start": 28, "end": 37}],
},
{
"word": "?",
"nodeType": "nummod",
"attributes": ["PUNCT"],
"link": "nummod",
"spans": [{"start": 37, "end": 39}],
},
],
},
}
def test_batch_prediction(self):
inputs = [
{"sentence": "What kind of test succeeded on its first attempt?"},
{"sentence": "What kind of test succeeded on its first attempt at batch processing?"},
{"sentence": "James ate some cheese whilst thinking about the play."},
]
archive = load_archive(
FIXTURES_ROOT
/ "structured_prediction"
/ "biaffine_dependency_parser"
/ "serialization"
/ "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "biaffine_dependency_parser")
results = predictor.predict_batch_json(inputs)
assert len(results) == 3
for result in results:
sequence_length = len(result.get("words"))
predicted_heads = result.get("predicted_heads")
assert len(predicted_heads) == sequence_length
predicted_dependencies = result.get("predicted_dependencies")
assert len(predicted_dependencies) == sequence_length
assert isinstance(predicted_dependencies, list)
assert all(isinstance(x, str) for x in predicted_dependencies)
def test_predictor_uses_dataset_reader_to_determine_pos_set(self):
archive = load_archive(
FIXTURES_ROOT
/ "structured_prediction"
/ "biaffine_dependency_parser"
/ "serialization"
/ "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "biaffine_dependency_parser")
inputs = {"sentence": "Dogs eat cats."}
instance_with_ud_pos = predictor._json_to_instance(inputs)
tags = instance_with_ud_pos.fields["pos_tags"].labels
if "3.0" < spacy.__version__ <= "3.1":
assert tags == ["PROPN", "VERB", "NOUN", "PUNCT"]
else:
assert tags == ["NOUN", "VERB", "NOUN", "PUNCT"]
predictor._dataset_reader.use_language_specific_pos = True
instance_with_ptb_pos = predictor._json_to_instance(inputs)
tags = instance_with_ptb_pos.fields["pos_tags"].labels
if "3.0" < spacy.__version__ <= "3.1":
assert tags == ["NNP", "VBP", "NNS", "."]
else:
assert tags == ["NNS", "VBP", "NNS", "."]
| allennlp-models-main | tests/structured_prediction/predictors/biaffine_dependency_parser_test.py |
from nltk import Tree
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp_models.structured_prediction.predictors.constituency_parser import (
LINK_TO_LABEL,
NODE_TYPE_TO_STYLE,
)
from tests import FIXTURES_ROOT
class TestConstituencyParserPredictor(AllenNlpTestCase):
def test_uses_named_inputs(self):
inputs = {"sentence": "What a great test sentence."}
archive = load_archive(
FIXTURES_ROOT
/ "structured_prediction"
/ "constituency_parser"
/ "serialization"
/ "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "constituency_parser")
result = predictor.predict_json(inputs)
assert len(result["spans"]) == 21 # number of possible substrings of the sentence.
assert len(result["class_probabilities"]) == 21
assert result["tokens"] == ["What", "a", "great", "test", "sentence", "."]
assert isinstance(result["trees"], str)
for class_distribution in result["class_probabilities"]:
assert sum(class_distribution) == pytest.approx(1.0, rel=1e-3)
def test_batch_prediction(self):
inputs = [
{"sentence": "What a great test sentence."},
{"sentence": "Here's another good, interesting one."},
]
archive = load_archive(
FIXTURES_ROOT
/ "structured_prediction"
/ "constituency_parser"
/ "serialization"
/ "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "constituency_parser")
results = predictor.predict_batch_json(inputs)
result = results[0]
assert len(result["spans"]) == 21 # number of possible substrings of the sentence.
assert len(result["class_probabilities"]) == 21
assert result["tokens"] == ["What", "a", "great", "test", "sentence", "."]
assert isinstance(result["trees"], str)
for class_distribution in result["class_probabilities"]:
assert sum(class_distribution) == pytest.approx(1.0, rel=1e-3)
result = results[1]
assert len(result["spans"]) == 36 # number of possible substrings of the sentence.
assert len(result["class_probabilities"]) == 36
assert result["tokens"] == ["Here", "'s", "another", "good", ",", "interesting", "one", "."]
assert isinstance(result["trees"], str)
for class_distribution in result["class_probabilities"]:
assert sum(class_distribution) == pytest.approx(1.0, rel=1e-3)
def test_build_hierplane_tree(self):
tree = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
archive = load_archive(
FIXTURES_ROOT
/ "structured_prediction"
/ "constituency_parser"
/ "serialization"
/ "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "constituency_parser")
hierplane_tree = predictor._build_hierplane_tree(tree, 0, is_root=True)
correct_tree = {
"text": "the dog chased the cat",
"linkNameToLabel": LINK_TO_LABEL,
"nodeTypeToStyle": NODE_TYPE_TO_STYLE,
"root": {
"word": "the dog chased the cat",
"nodeType": "S",
"attributes": ["S"],
"link": "S",
"children": [
{
"word": "the dog",
"nodeType": "NP",
"attributes": ["NP"],
"link": "NP",
"children": [
{"word": "the", "nodeType": "D", "attributes": ["D"], "link": "D"},
{"word": "dog", "nodeType": "N", "attributes": ["N"], "link": "N"},
],
},
{
"word": "chased the cat",
"nodeType": "VP",
"attributes": ["VP"],
"link": "VP",
"children": [
{"word": "chased", "nodeType": "V", "attributes": ["V"], "link": "V"},
{
"word": "the cat",
"nodeType": "NP",
"attributes": ["NP"],
"link": "NP",
"children": [
{
"word": "the",
"nodeType": "D",
"attributes": ["D"],
"link": "D",
},
{
"word": "cat",
"nodeType": "N",
"attributes": ["N"],
"link": "N",
},
],
},
],
},
],
},
}
assert correct_tree == hierplane_tree
| allennlp-models-main | tests/structured_prediction/predictors/constituency_parser_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp_models.structured_prediction.predictors.openie import (
sanitize_label,
consolidate_predictions,
get_predicate_text,
)
from tests import FIXTURES_ROOT
class TestOpenIePredictor(AllenNlpTestCase):
def test_uses_named_inputs(self):
"""
Tests whether the model outputs conform to the expected format.
"""
inputs = {
"sentence": "Angela Merkel met and spoke to her EU counterparts during the climate summit."
}
archive = load_archive(
FIXTURES_ROOT / "structured_prediction" / "srl" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "open_information_extraction")
result = predictor.predict_json(inputs)
words = result.get("words")
assert words == [
"Angela",
"Merkel",
"met",
"and",
"spoke",
"to",
"her",
"EU",
"counterparts",
"during",
"the",
"climate",
"summit",
".",
]
num_words = len(words)
verbs = result.get("verbs")
assert verbs is not None
assert isinstance(verbs, list)
for verb in verbs:
tags = verb.get("tags")
assert tags is not None
assert isinstance(tags, list)
assert all(isinstance(tag, str) for tag in tags)
assert len(tags) == num_words
def test_sanitize_label(self):
assert sanitize_label("B-ARGV-MOD") == "B-ARGV-MOD"
def test_prediction_with_no_verbs(self):
"""
Tests whether the model copes with sentences without verbs.
"""
input1 = {"sentence": "Blah no verb sentence."}
archive = load_archive(
FIXTURES_ROOT / "structured_prediction" / "srl" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "open_information_extraction")
result = predictor.predict_json(input1)
assert result == {"words": ["Blah", "no", "verb", "sentence", "."], "verbs": []}
def test_predicate_consolidation(self):
"""
Test whether the predictor can correctly consolidate multiword
predicates.
"""
tokenizer = SpacyTokenizer(pos_tags=True)
sent_tokens = tokenizer.tokenize("In December, John decided to join the party.")
# Emulate predications - for both "decided" and "join"
predictions = [
["B-ARG2", "I-ARG2", "O", "B-ARG0", "B-V", "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "O"],
["O", "O", "O", "B-ARG0", "B-BV", "I-BV", "B-V", "B-ARG1", "I-ARG1", "O"],
]
# Consolidate
pred_dict = consolidate_predictions(predictions, sent_tokens)
# Check that only "decided to join" is left
assert len(pred_dict) == 1
tags = list(pred_dict.values())[0]
assert get_predicate_text(sent_tokens, tags) == "decided to join"
def test_more_than_two_overlapping_predicates(self):
"""
Test whether the predictor can correctly consolidate multiword
predicates.
"""
tokenizer = SpacyTokenizer(pos_tags=True)
sent_tokens = tokenizer.tokenize("John refused to consider joining the club.")
# Emulate predications - for "refused" and "consider" and "joining"
predictions = [
["B-ARG0", "B-V", "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "O"],
["B-ARG0", "B-BV", "I-BV", "B-V", "B-ARG1", "I-ARG1", "I-ARG1", "O"],
["B-ARG0", "B-BV", "I-BV", "I-BV", "B-V", "B-ARG1", "I-ARG1", "O"],
]
# Consolidate
pred_dict = consolidate_predictions(predictions, sent_tokens)
# Check that only "refused to consider to join" is left
assert len(pred_dict) == 1
tags = list(pred_dict.values())[0]
assert get_predicate_text(sent_tokens, tags) == "refused to consider joining"
def test_aux_verb(self):
inputs = {"sentence": "Yellowstone National Park is in the United States of America."}
archive = load_archive(
FIXTURES_ROOT / "structured_prediction" / "srl" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "open_information_extraction")
result = predictor.predict_json(inputs)
verbs = result.get("verbs")
assert verbs is not None
assert isinstance(verbs, list)
for verb in verbs:
tags = verb.get("tags")
assert tags is not None
assert isinstance(tags, list)
assert all(isinstance(tag, str) for tag in tags)
| allennlp-models-main | tests/structured_prediction/predictors/openie_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from tests import FIXTURES_ROOT
class TestSrlPredictor(AllenNlpTestCase):
def test_uses_named_inputs(self):
inputs = {
"sentence": "The squirrel wrote a unit test to make sure its nuts worked as designed."
}
archive = load_archive(
FIXTURES_ROOT / "structured_prediction" / "srl" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "semantic_role_labeling")
result_json = predictor.predict_json(inputs)
self.assert_predict_result(result_json)
words = [
"The",
"squirrel",
"wrote",
"a",
"unit",
"test",
"to",
"make",
"sure",
"its",
"nuts",
"worked",
"as",
"designed",
".",
]
result_words = predictor.predict_tokenized(words)
self.assert_predict_result(result_words)
@staticmethod
def assert_predict_result(result):
words = result.get("words")
assert words == [
"The",
"squirrel",
"wrote",
"a",
"unit",
"test",
"to",
"make",
"sure",
"its",
"nuts",
"worked",
"as",
"designed",
".",
]
num_words = len(words)
verbs = result.get("verbs")
assert verbs is not None
assert isinstance(verbs, list)
assert any(v["verb"] == "wrote" for v in verbs)
assert any(v["verb"] == "make" for v in verbs)
assert any(v["verb"] == "worked" for v in verbs)
for verb in verbs:
tags = verb.get("tags")
assert tags is not None
assert isinstance(tags, list)
assert all(isinstance(tag, str) for tag in tags)
assert len(tags) == num_words
def test_batch_prediction(self):
inputs = {
"sentence": "The squirrel wrote a unit test to make sure its nuts worked as designed."
}
archive = load_archive(
FIXTURES_ROOT / "structured_prediction" / "srl" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "semantic_role_labeling")
result = predictor.predict_batch_json([inputs, inputs])
assert result[0] == result[1]
def test_prediction_with_no_verbs(self):
input1 = {"sentence": "Blah no verb sentence."}
archive = load_archive(
FIXTURES_ROOT / "structured_prediction" / "srl" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "semantic_role_labeling")
result = predictor.predict_json(input1)
assert result == {"words": ["Blah", "no", "verb", "sentence", "."], "verbs": []}
input2 = {"sentence": "This sentence seems to have a verb."}
results = predictor.predict_batch_json([input1, input2])
assert results[0] == {"words": ["Blah", "no", "verb", "sentence", "."], "verbs": []}
assert results[1] == {
"verbs": [
{
"verb": "seems",
"description": "This sentence seems to have a verb .",
"tags": ["O", "O", "O", "O", "O", "O", "O", "O"],
},
{
"verb": "have",
"description": "This sentence seems to have a verb .",
"tags": ["O", "O", "O", "O", "O", "O", "O", "O"],
},
],
"words": ["This", "sentence", "seems", "to", "have", "a", "verb", "."],
}
| allennlp-models-main | tests/structured_prediction/predictors/srl_test.py |
import json
import numpy
import pytest
from transformers.models.bert.modeling_bert import BertConfig, BertModel
from transformers.models.bert.tokenization_bert import BertTokenizer
from allennlp.common.testing import ModelTestCase
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from allennlp.data.dataset_readers.dataset_utils.span_utils import to_bioul
from tests import FIXTURES_ROOT
from allennlp_models.structured_prediction import SrlBert
class BertSrlTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "structured_prediction" / "srl" / "bert_srl.jsonnet",
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012",
)
def test_bert_srl_model_can_train_save_and_load(self):
ignore_grads = {"bert_model.pooler.dense.weight", "bert_model.pooler.dense.bias"}
self.ensure_model_can_train_save_and_load(self.param_file, gradients_to_ignore=ignore_grads)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
class_probs = output_dict["class_probabilities"][0].data.numpy()
numpy.testing.assert_almost_equal(
numpy.sum(class_probs, -1), numpy.ones(class_probs.shape[0]), decimal=6
)
@pytest.mark.skip("test-install fails on this test in some environments")
def test_decode_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
lengths = get_lengths_from_binary_sequence_mask(decode_output_dict["mask"]).data.tolist()
# Hard to check anything concrete which we haven't checked in the above
# test, so we'll just check that the tags are equal to the lengths
# of the individual instances, rather than the max length.
for prediction, length in zip(decode_output_dict["wordpiece_tags"], lengths):
assert len(prediction) == length
for prediction, length in zip(decode_output_dict["tags"], lengths):
# to_bioul throws an exception if the tag sequence is not well formed,
# so here we can easily check that the sequence we produce is good.
to_bioul(prediction, encoding="BIO")
class BertSrlFromLocalFilesTest(ModelTestCase):
def test_init_from_local_files(self):
with pytest.warns(
UserWarning, match=r"Initializing BertModel without pretrained weights.*"
):
self.set_up_model(
FIXTURES_ROOT / "structured_prediction" / "srl" / "bert_srl_local_files.jsonnet",
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012",
)
| allennlp-models-main | tests/structured_prediction/models/bert_srl_test.py |
from allennlp.common.testing.model_test_case import ModelTestCase
from tests import FIXTURES_ROOT
import allennlp_models.structured_prediction
class GraphParserTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "structured_prediction" / "semantic_dependencies" / "experiment.json",
FIXTURES_ROOT / "structured_prediction" / "semantic_dependencies" / "dm.sdp",
)
def test_graph_parser_can_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_model_can_decode(self):
self.model.eval()
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
assert set(decode_output_dict.keys()) == {
"arc_loss",
"tag_loss",
"loss",
"arcs",
"arc_tags",
"arc_tag_probs",
"arc_probs",
"tokens",
"mask",
}
| allennlp-models-main | tests/structured_prediction/models/graph_parser_test.py |
import subprocess
import os
from flaky import flaky
import pytest
import numpy
from allennlp.common.testing import ModelTestCase
from allennlp.common.params import Params
from allennlp.common.checks import ConfigurationError
from allennlp.models import Model
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from allennlp.data.dataset_readers.dataset_utils.span_utils import to_bioul
from tests import FIXTURES_ROOT, PROJECT_ROOT
class SemanticRoleLabelerTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "structured_prediction" / "srl" / "experiment.json",
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012",
)
def test_srl_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
@flaky
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
class_probs = output_dict["class_probabilities"][0].data.numpy()
numpy.testing.assert_almost_equal(
numpy.sum(class_probs, -1), numpy.ones(class_probs.shape[0]), decimal=6
)
def test_decode_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
lengths = get_lengths_from_binary_sequence_mask(decode_output_dict["mask"]).data.tolist()
# Hard to check anything concrete which we haven't checked in the above
# test, so we'll just check that the tags are equal to the lengths
# of the individual instances, rather than the max length.
for prediction, length in zip(decode_output_dict["tags"], lengths):
assert len(prediction) == length
# Checks that the output is a well formed BIO sequence,
# as otherwise an exception is thrown.
to_bioul(prediction, encoding="BIO")
def test_bio_tags_correctly_convert_to_conll_format(self):
bio_tags = ["B-ARG-1", "I-ARG-1", "O", "B-V", "B-ARGM-ADJ", "O"]
from allennlp_models.structured_prediction.models.srl import (
convert_bio_tags_to_conll_format,
)
conll_tags = convert_bio_tags_to_conll_format(bio_tags)
assert conll_tags == ["(ARG-1*", "*)", "*", "(V*)", "(ARGM-ADJ*)", "*"]
def test_perl_eval_script_can_run_on_printed_conll_files(self):
bio_tags = ["B-ARG-1", "I-ARG-1", "O", "B-V", "B-ARGM-ADJ", "O"]
sentence = ["Mark", "and", "Matt", "were", "running", "fast", "."]
gold_file_path = os.path.join(self.TEST_DIR, "gold_conll_eval.txt")
prediction_file_path = os.path.join(self.TEST_DIR, "prediction_conll_eval.txt")
with open(gold_file_path, "a+") as gold_file, open(
prediction_file_path, "a+"
) as prediction_file:
# Use the same bio tags as prediction vs gold to make it obvious by looking
# at the perl script output if something is wrong. Write them twice to
# ensure that the perl script deals with multiple sentences.
from allennlp_models.structured_prediction.models.srl import (
write_bio_formatted_tags_to_file,
)
write_bio_formatted_tags_to_file(
gold_file, prediction_file, 4, sentence, bio_tags, bio_tags
)
write_bio_formatted_tags_to_file(
gold_file, prediction_file, 4, sentence, bio_tags, bio_tags
)
perl_script_command = [
"perl",
str(
PROJECT_ROOT / "allennlp_models" / "structured_prediction" / "tools" / "srl-eval.pl"
),
prediction_file_path,
gold_file_path,
]
exit_code = subprocess.check_call(perl_script_command)
assert exit_code == 0
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the phrase layer wrong - it should be 150 to match
# the embedding + binary feature dimensions.
params["model"]["encoder"]["input_size"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
| allennlp-models-main | tests/structured_prediction/models/semantic_role_labeling_test.py |
allennlp-models-main | tests/structured_prediction/models/__init__.py |
|
import torch
from allennlp.common.testing import ModelTestCase
from allennlp.nn.chu_liu_edmonds import decode_mst
from tests import FIXTURES_ROOT
from allennlp_models.structured_prediction import BiaffineDependencyParser
class BiaffineDependencyParserTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT
/ "structured_prediction"
/ "biaffine_dependency_parser"
/ "experiment.json",
FIXTURES_ROOT / "structured_prediction" / "dependencies.conllu",
)
def test_dependency_parser_can_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_mst_decoding_can_run_forward(self):
self.model.use_mst_decoding_for_validation = True
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_decode_runs(self):
self.model.eval()
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
assert set(decode_output_dict.keys()) == {
"arc_loss",
"tag_loss",
"loss",
"predicted_dependencies",
"predicted_heads",
"words",
"pos",
}
def test_mst_respects_no_outgoing_root_edges_constraint(self):
# This energy tensor expresses the following relation:
# energy[i,j] = "Score that i is the head of j". In this
# case, we have heads pointing to their children.
# We want to construct a case that has 2 children for the ROOT node,
# because in a typical dependency parse there should only be one
# word which has the ROOT as it's head.
energy = torch.Tensor([[0, 9, 5], [2, 0, 4], [3, 1, 0]])
length = torch.LongTensor([3])
heads, _ = decode_mst(energy.numpy(), length.item(), has_labels=False)
# This is the correct MST, but not desirable for dependency parsing.
assert list(heads) == [-1, 0, 0]
# If we run the decoding with the model, it should enforce
# the constraint.
heads_model, _ = self.model._run_mst_decoding(energy.view(1, 1, 3, 3), length)
assert heads_model.tolist()[0] == [0, 0, 1]
def test_mst_decodes_arc_labels_with_respect_to_unconstrained_scores(self):
energy = (
torch.Tensor([[0, 2, 1], [10, 0, 0.5], [9, 0.2, 0]])
.view(1, 1, 3, 3)
.expand(1, 2, 3, 3)
.contiguous()
)
# Make the score for the root label for arcs to the root token be higher - it
# will be masked for the MST, but we want to make sure that the tags are with
# respect to the unmasked tensor. If the masking was incorrect, we would decode all
# zeros as the labels, because torch takes the first index in the case that all the
# values are equal, which would be the case if the labels were calculated from
# the masked score.
energy[:, 1, 0, :] = 3
length = torch.LongTensor([3])
heads, tags = self.model._run_mst_decoding(energy, length)
assert heads.tolist()[0] == [0, 0, 1]
# This test produces different results under PyTorch 0.4.1 and 1.0.
# Almost certainly this is because it's underspecified.
# TODO(markn): modify this test to have a single correct result
assert tags.tolist()[0] in ([0, 1, 0], [0, 1, 1])
| allennlp-models-main | tests/structured_prediction/models/biaffine_dependency_parser_test.py |
from nltk import Tree
import torch
from allennlp.common.testing.model_test_case import ModelTestCase
from allennlp.training.metrics import EvalbBracketingScorer
from allennlp_models.structured_prediction.models.constituency_parser import SpanInformation
from tests import FIXTURES_ROOT
class SpanConstituencyParserTest(ModelTestCase):
def setup_method(self):
EvalbBracketingScorer.compile_evalb()
super().setup_method()
self.set_up_model(
FIXTURES_ROOT
/ "structured_prediction"
/ "constituency_parser"
/ "constituency_parser.json",
FIXTURES_ROOT / "structured_prediction" / "example_ptb.trees",
)
def tearDown(self):
EvalbBracketingScorer.clean_evalb()
super().tearDown()
def test_span_parser_can_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_can_handle_a_single_word_as_input(self):
# A very annoying edge case: the PTB has several single word sentences.
# when running with a batch size 1, we have to be very careful
# about how we .squeeze/.unsqueeze things to make sure it still runs.
text = {"tokens": {"tokens": torch.LongTensor([[1]])}}
pos_tags = torch.LongTensor([[1]])
spans = torch.LongTensor([[[0, 0]]])
label = torch.LongTensor([[1]])
self.model(text, spans, [{"tokens": ["hello"]}], pos_tags, label)
def test_decode_runs(self):
self.model.eval()
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
assert set(decode_output_dict.keys()) == {
"spans",
"class_probabilities",
"trees",
"tokens",
"pos_tags",
"num_spans",
"loss",
}
metrics = self.model.get_metrics(reset=True)
metric_keys = set(metrics.keys())
assert "evalb_precision" in metric_keys
assert "evalb_recall" in metric_keys
assert "evalb_f1_measure" in metric_keys
def test_resolve_overlap_conflicts_greedily(self):
spans = [
SpanInformation(start=1, end=5, no_label_prob=0.7, label_prob=0.2, label_index=2),
SpanInformation(start=2, end=7, no_label_prob=0.5, label_prob=0.3, label_index=4),
]
resolved_spans = self.model.resolve_overlap_conflicts_greedily(spans)
assert resolved_spans == [
SpanInformation(start=2, end=7, no_label_prob=0.5, label_prob=0.3, label_index=4)
]
def test_construct_tree_from_spans(self):
# (S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))
tree_spans = [
((0, 1), "D"),
((1, 2), "N"),
((0, 2), "NP"),
((2, 3), "V"),
((3, 4), "D"),
((4, 5), "N"),
((3, 5), "NP"),
((2, 5), "VP"),
((0, 5), "S"),
]
sentence = ["the", "dog", "chased", "the", "cat"]
tree = self.model.construct_tree_from_spans({x: y for x, y in tree_spans}, sentence)
correct_tree = Tree.fromstring(
"(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))"
)
assert tree == correct_tree
def test_construct_tree_from_spans_handles_nested_labels(self):
# The tree construction should split the "S-NP" into (S (NP ...)).
tree_spans = [((0, 1), "D"), ((1, 2), "N"), ((0, 2), "S-NP")]
sentence = ["the", "dog"]
tree = self.model.construct_tree_from_spans({x: y for x, y in tree_spans}, sentence)
correct_tree = Tree.fromstring("(S (NP (D the) (N dog)))")
assert tree == correct_tree
def test_tree_construction_with_too_few_spans_creates_trees_with_depth_one_word_nodes(self):
# We only have a partial tree here: (S (NP (D the) (N dog)). Decoding should
# recover this from the spans, whilst attaching all other words to the root node with
# XX POS tag labels, as the right hand side splits will not occur in tree_spans.
tree_spans = [((0, 1), "D"), ((1, 2), "N"), ((0, 2), "NP"), ((0, 5), "S")]
sentence = ["the", "dog", "chased", "the", "cat"]
tree = self.model.construct_tree_from_spans({x: y for x, y in tree_spans}, sentence)
correct_tree = Tree.fromstring("(S (NP (D the) (N dog)) (XX chased) (XX the) (XX cat))")
assert tree == correct_tree
| allennlp-models-main | tests/structured_prediction/models/constituency_parser_test.py |
allennlp-models-main | tests/lm/__init__.py |
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp_models.lm import MaskedLanguageModelingReader
class TestMaskedLanguageModelingDatasetReader(AllenNlpTestCase):
def test_text_to_instance_with_basic_tokenizer_and_indexer(self):
reader = MaskedLanguageModelingReader()
vocab = Vocabulary()
vocab.add_tokens_to_namespace(["This", "is", "a", "[MASK]", "token", "."], "tokens")
instance = reader.text_to_instance(sentence="This is a [MASK] token .", targets=["This"])
assert [t.text for t in instance["tokens"]] == ["This", "is", "a", "[MASK]", "token", "."]
assert [i.sequence_index for i in instance["mask_positions"]] == [3]
assert [t.text for t in instance["target_ids"]] == ["This"]
instance.index_fields(vocab)
tensor_dict = instance.as_tensor_dict(instance.get_padding_lengths())
assert tensor_dict.keys() == {"tokens", "mask_positions", "target_ids"}
assert tensor_dict["tokens"]["tokens"]["tokens"].numpy().tolist() == [2, 3, 4, 5, 6, 7]
assert tensor_dict["target_ids"]["tokens"]["tokens"].numpy().tolist() == [2]
assert tensor_dict["mask_positions"].numpy().tolist() == [[3]]
def test_text_to_instance_with_bert_tokenizer_and_indexer(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
token_indexer = PretrainedTransformerIndexer("bert-base-cased")
reader = MaskedLanguageModelingReader(tokenizer, {"bert": token_indexer})
instance = reader.text_to_instance(
sentence="This is AllenNLP [MASK] token .", targets=["This"]
)
assert [t.text for t in instance["tokens"]] == [
"[CLS]",
"This",
"is",
"Allen",
"##NL",
"##P",
"[MASK]",
"token",
".",
"[SEP]",
]
assert [i.sequence_index for i in instance["mask_positions"]] == [6]
assert [t.text for t in instance["target_ids"]] == ["This"]
vocab = Vocabulary()
instance.index_fields(vocab)
tensor_dict = instance.as_tensor_dict(instance.get_padding_lengths())
assert tensor_dict.keys() == {"tokens", "mask_positions", "target_ids"}
bert_token_ids = tensor_dict["tokens"]["bert"]["token_ids"].numpy().tolist()
target_ids = tensor_dict["target_ids"]["bert"]["token_ids"].numpy().tolist()
# I don't know what wordpiece id BERT is going to assign to 'This', but it at least should
# be the same between the input and the target.
assert target_ids[0] == bert_token_ids[1]
| allennlp-models-main | tests/lm/dataset_readers/masked_language_modeling_test.py |
allennlp-models-main | tests/lm/dataset_readers/__init__.py |
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp_models.lm import NextTokenLMReader
class TestNextTokenLMReader(AllenNlpTestCase):
def test_text_to_instance_with_basic_tokenizer_and_indexer(self):
reader = NextTokenLMReader()
vocab = Vocabulary()
vocab.add_tokens_to_namespace(["This", "is", "a"], "tokens")
instance = reader.text_to_instance(sentence="This is a", target="This")
assert [t.text for t in instance["tokens"]] == ["This", "is", "a"]
assert [t.text for t in instance["target_ids"]] == ["This"]
instance.index_fields(vocab)
tensor_dict = instance.as_tensor_dict(instance.get_padding_lengths())
assert tensor_dict.keys() == {"tokens", "target_ids"}
assert tensor_dict["tokens"]["tokens"]["tokens"].numpy().tolist() == [2, 3, 4]
assert tensor_dict["target_ids"]["tokens"]["tokens"].numpy().tolist() == [2]
def test_text_to_instance_with_bert_tokenizer_and_indexer(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
token_indexer = PretrainedTransformerIndexer("bert-base-cased")
reader = NextTokenLMReader(tokenizer, {"bert": token_indexer})
instance = reader.text_to_instance(sentence="AllenNLP is very", target="very")
assert [t.text for t in instance["tokens"]] == [
"[CLS]",
"Allen",
"##NL",
"##P",
"is",
"very",
"[SEP]",
]
assert [t.text for t in instance["target_ids"]] == ["very"]
vocab = Vocabulary()
instance.index_fields(vocab)
tensor_dict = instance.as_tensor_dict(instance.get_padding_lengths())
assert tensor_dict.keys() == {"tokens", "target_ids"}
bert_token_ids = tensor_dict["tokens"]["bert"]["token_ids"].numpy().tolist()
target_ids = tensor_dict["target_ids"]["bert"]["token_ids"].numpy().tolist()
# I don't know what wordpiece id BERT is going to assign to 'This', but it at least should
# be the same between the input and the target.
assert target_ids[0] == bert_token_ids[5]
| allennlp-models-main | tests/lm/dataset_readers/next_token_lm_test.py |
import os
from typing import cast
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import TextField
from allennlp_models.lm import SimpleLanguageModelingDatasetReader
from tests import FIXTURES_ROOT
class TestSimpleLanguageModelingDatasetReader(AllenNlpTestCase):
FIXTURES = FIXTURES_ROOT / "lm" / "language_modeling"
def test_text_to_instance(self):
dataset = SimpleLanguageModelingDatasetReader(start_tokens=["<S>"], end_tokens=["</S>"])
instance = dataset.text_to_instance("The only sentence.")
text = [t.text for t in cast(TextField, instance.fields["source"]).tokens]
assert text == ["<S>", "The", "only", "sentence", ".", "</S>"]
def test_read_single_sentence(self):
prefix = os.path.join(self.FIXTURES, "single_sentence.txt")
dataset = SimpleLanguageModelingDatasetReader()
with open(prefix, "r") as fin:
sentence = fin.read().strip()
expected_batch = dataset.text_to_instance(sentence)
batch = None
for batch in dataset.read(prefix):
break
assert sorted(list(expected_batch.fields.keys())) == sorted(list(batch.fields.keys()))
for k in expected_batch.fields.keys():
assert str(expected_batch.fields[k]) == str(batch.fields[k])
def test_read_multiple_sentences(self):
prefix = os.path.join(self.FIXTURES, "shards/shard0")
dataset = SimpleLanguageModelingDatasetReader()
k = -1
for k, _ in enumerate(dataset.read(prefix)):
pass
assert k == 99
def test_max_sequence_length(self):
prefix = os.path.join(self.FIXTURES, "shards/shard0")
dataset = SimpleLanguageModelingDatasetReader(
max_sequence_length=10, start_tokens=["<S>"], end_tokens=["</S>"]
)
k = -1
for k, _ in enumerate(dataset.read(prefix)):
pass
assert k == 7
| allennlp-models-main | tests/lm/dataset_readers/simple_language_modeling_test.py |
allennlp-models-main | tests/lm/predictors/__init__.py |
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from tests import FIXTURES_ROOT
class TestMaskedLanguageModelPredictor(AllenNlpTestCase):
def test_predictions_to_labeled_instances(self):
inputs = {"sentence": "Eric [MASK] was an intern at [MASK]"}
archive = load_archive(
FIXTURES_ROOT / "lm" / "masked_language_model" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "masked_language_model")
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert len(new_instances) == 1
assert "target_ids" in new_instances[0]
assert len(new_instances[0]["target_ids"].tokens) == 2 # should have added two words
| allennlp-models-main | tests/lm/predictors/masked_language_model_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from tests import FIXTURES_ROOT
from allennlp_models import lm # noqa: F401
class TestNextTokenLMPredictor(AllenNlpTestCase):
def test_predictions_to_labeled_instances(self):
inputs = {"sentence": "Eric Wallace was an intern at"}
archive = load_archive(
FIXTURES_ROOT / "lm" / "next_token_lm" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "next_token_lm")
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert len(new_instances) == 1
assert "target_ids" in new_instances[0]
assert len(new_instances[0]["target_ids"].tokens) == 1 # should have added one word
| allennlp-models-main | tests/lm/predictors/next_token_lm_test.py |
allennlp-models-main | tests/lm/models/__init__.py |
|
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
class TestMaskedLanguageModel(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "lm" / "masked_language_model" / "experiment.json",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
| allennlp-models-main | tests/lm/models/masked_language_model_test.py |
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
from allennlp_models import lm # noqa: F401
class TestNextTokenLanguageModel(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "lm" / "next_token_lm" / "experiment.json",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
class TestNextTokenTransformerLm(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "lm" / "next_token_lm" / "experiment_transformer.json",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(
self.param_file,
tolerance=1e-3,
gradients_to_ignore={
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.weight",
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.bias",
},
)
| allennlp-models-main | tests/lm/models/next_token_lm_test.py |
import numpy as np
import pytest
from allennlp.common.testing import ModelTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.models import Model
from tests import FIXTURES_ROOT
class LmBaseTestCase(ModelTestCase):
def setup_method(self):
super().setup_method()
self.expected_embedding_shape = (2, 8, 7)
self.bidirectional = False
self.result_keys = {
"loss",
"forward_loss",
"lm_embeddings",
"noncontextual_token_embeddings",
"mask",
"batch_weight",
}
def test_unidirectional_language_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent(keys_to_ignore=["batch_weight"])
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
print(training_tensors)
result = self.model(**training_tensors)
# Unidirectional models should not have backward_loss; bidirectional models should have it.
assert set(result) == self.result_keys
# The model should preserve the BOS / EOS tokens.
embeddings = result["lm_embeddings"]
assert tuple(embeddings.shape) == self.expected_embedding_shape
loss = result["loss"].item()
forward_loss = result["forward_loss"].item()
if self.bidirectional:
backward_loss = result["backward_loss"].item()
np.testing.assert_almost_equal(loss, (forward_loss + backward_loss) / 2, decimal=3)
else:
np.testing.assert_almost_equal(loss, forward_loss, decimal=3)
def test_mismatching_contextualizer_unidirectionality_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the contextualizer unidirectionality wrong - it should be
# False to match the language model.
params["model"]["contextualizer"]["bidirectional"] = not self.bidirectional
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.get("model"))
def test_language_model_forward_on_instances(self):
instances = self.dataset.instances
predictions = self.model.forward_on_instances(instances)
assert predictions is not None
class TestUnidirectionalLanguageModel(LmBaseTestCase):
def setup_method(self):
super().setup_method()
self.expected_embedding_shape = (2, 8, 7)
self.bidirectional = False
self.result_keys = {
"loss",
"forward_loss",
"lm_embeddings",
"noncontextual_token_embeddings",
"mask",
"batch_weight",
}
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_unidirectional.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
class TestUnidirectionalLanguageModelUnsampled(LmBaseTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_unidirectional_unsampled.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
class TestUnidirectionalLanguageModelTransformer(LmBaseTestCase):
def setup_method(self):
super().setup_method()
self.expected_embedding_shape = (2, 8, 16)
self.set_up_model(
FIXTURES_ROOT
/ "lm"
/ "language_model"
/ "experiment_unidirectional_transformer.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
def test_unidirectional_language_model_can_train_save_and_load(self):
# Ignore layer 0 feedforward layer norm parameters, since
# they are not used.
self.ensure_model_can_train_save_and_load(
self.param_file,
gradients_to_ignore={
"_contextualizer.feedforward_layer_norm_0.gamma",
"_contextualizer.feedforward_layer_norm_0.beta",
},
)
class TestBidirectionalLanguageModel(LmBaseTestCase):
def setup_method(self):
super().setup_method()
self.expected_embedding_shape = (2, 8, 14)
self.bidirectional = True
self.result_keys.add("backward_loss")
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
class TestBidirectionalLanguageModelUnsampled(LmBaseTestCase):
def setup_method(self):
super().setup_method()
self.expected_embedding_shape = (2, 8, 14)
self.bidirectional = True
self.result_keys.add("backward_loss")
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_unsampled.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
class TestBidirectionalLanguageModelTransformer(LmBaseTestCase):
def setup_method(self):
super().setup_method()
self.expected_embedding_shape = (2, 8, 32)
self.bidirectional = True
self.result_keys.add("backward_loss")
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_transformer.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
| allennlp-models-main | tests/lm/models/language_model_test.py |
import numpy as np
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
import allennlp_models.lm
class TestBidirectionalLanguageModel(ModelTestCase):
def setup_method(self):
super().setup_method()
self.expected_embedding_shape = (2, 8, 14)
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_bidirectional.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
def test_bidirectional_lm_can_train_save_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent(keys_to_ignore=["batch_weight"])
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
result = self.model(**training_tensors)
assert set(result) == {
"loss",
"forward_loss",
"backward_loss",
"lm_embeddings",
"noncontextual_token_embeddings",
"mask",
"batch_weight",
}
# The model should preserve the BOS / EOS tokens.
embeddings = result["lm_embeddings"]
assert tuple(embeddings.shape) == self.expected_embedding_shape
loss = result["loss"].item()
forward_loss = result["forward_loss"].item()
backward_loss = result["backward_loss"].item()
np.testing.assert_almost_equal(loss, (forward_loss + backward_loss) / 2, decimal=3)
class TestBidirectionalLanguageModelUnsampled(TestBidirectionalLanguageModel):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_bidirectional_unsampled.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
| allennlp-models-main | tests/lm/models/bidirectional_lm_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.interpret.attackers import Hotflip
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp_models.lm import LinearLanguageModelHead
from tests import FIXTURES_ROOT
class TestHotflip(AllenNlpTestCase):
def test_targeted_attack_from_json(self):
inputs = {"sentence": "The doctor ran to the emergency room to see [MASK] patient."}
archive = load_archive(
FIXTURES_ROOT / "lm" / "masked_language_model" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "masked_language_model")
hotflipper = Hotflip(predictor, vocab_namespace="tokens")
hotflipper.initialize()
attack = hotflipper.attack_from_json(inputs, target={"words": ["hi"]})
assert attack is not None
assert "final" in attack
assert "original" in attack
assert "outputs" in attack
assert len(attack["final"][0]) == len(
attack["original"]
) # hotflip replaces words without removing
assert attack["final"][0] != attack["original"]
| allennlp-models-main | tests/lm/interpret/lm_hotflip_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.interpret.saliency_interpreters import SimpleGradient
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from tests import FIXTURES_ROOT
class TestSimpleGradient(AllenNlpTestCase):
def test_simple_gradient_masked_lm(self):
inputs = {
"sentence": "This is a single string [MASK] about a test . Sometimes it "
"contains coreferent parts ."
}
archive = load_archive(
FIXTURES_ROOT / "lm" / "masked_language_model" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "masked_language_model")
interpreter = SimpleGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
assert len(grad_input_1) == 16 # 16 words in input
| allennlp-models-main | tests/lm/interpret/simple_gradient_test.py |
allennlp-models-main | tests/lm/interpret/__init__.py |
|
allennlp-models-main | tests/lm/modules/__init__.py |
|
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.lm import BidirectionalLanguageModelTransformer
class TestBidirectionalLanguageModelTransformer(AllenNlpTestCase):
def test_bidirectional_transformer_encoder(self):
transformer_encoder = BidirectionalLanguageModelTransformer(
input_dim=32, hidden_dim=64, num_layers=2
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = transformer_encoder(token_embeddings, mask)
assert list(output.size()) == [5, 10, 64]
def test_bidirectional_transfomer_all_layers(self):
transformer_encoder = BidirectionalLanguageModelTransformer(
input_dim=32, hidden_dim=64, num_layers=2, return_all_layers=True
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = transformer_encoder(token_embeddings, mask)
assert len(output) == 2
concat_layers = torch.cat([layer.unsqueeze(1) for layer in output], dim=1)
# (batch_size, num_layers, timesteps, 2*input_dim)
assert list(concat_layers.size()) == [5, 2, 10, 64]
def test_attention_masks(self):
transformer_encoder = BidirectionalLanguageModelTransformer(
input_dim=32, hidden_dim=64, num_layers=2
)
mask = torch.ones(3, 6).bool()
mask[0, 3:] = False
mask[1, 5:] = False
forward_mask, backward_mask = transformer_encoder.get_attention_masks(mask)
# rows = position in sequence
# columns = positions used for attention
assert (
forward_mask[0].data
== torch.IntTensor(
[
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
)
).all()
assert (
forward_mask[1].data
== torch.IntTensor(
[
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
]
)
).all()
assert (
forward_mask[2].data
== torch.IntTensor(
[
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1],
]
)
).all()
assert (
backward_mask[0].data
== torch.IntTensor(
[
[1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
)
).all()
assert (
backward_mask[1].data
== torch.IntTensor(
[
[1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
]
)
).all()
assert (
backward_mask[2].data
== torch.IntTensor(
[
[1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1],
]
)
).all()
| allennlp-models-main | tests/lm/modules/seq2seq_encoders/bidirectional_lm_transformer_test.py |
allennlp-models-main | tests/lm/modules/seq2seq_encoders/__init__.py |
|
allennlp-models-main | tests/lm/modules/language_model_heads/__init__.py |
|
import torch
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp_models.lm.modules.language_model_heads import LanguageModelHead, BertLanguageModelHead
class TestBertLanguageModelHead(AllenNlpTestCase):
def test_can_init_and_run(self):
# The LM head code reads a module from somewhere else; we're basically just testing here
# that we can initialize the expected model `from_params`.
head = LanguageModelHead.from_params(
Params({"type": "bert", "model_name": "bert-base-uncased"})
)
assert isinstance(head, BertLanguageModelHead)
assert head.get_input_dim() == 768
assert head.get_output_dim() == 30522
tensor = torch.rand(1, 768)
logits = head(tensor)
assert tuple(logits.size()) == (1, 30522)
| allennlp-models-main | tests/lm/modules/language_model_heads/bert_test.py |
import torch
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp_models.lm.modules.language_model_heads import LanguageModelHead, Gpt2LanguageModelHead
class TestGpt2LanguageModelHead(AllenNlpTestCase):
def test_can_init_and_run(self):
# The LM head code reads a module from somewhere else; we're basically just testing here
# that we can initialize the expected model `from_params`.
head = LanguageModelHead.from_params(Params({"type": "gpt2", "model_name": "gpt2"}))
assert isinstance(head, Gpt2LanguageModelHead)
assert head.get_input_dim() == 768
assert head.get_output_dim() == 50257
tensor = torch.rand(1, 768)
logits = head(tensor)
assert tuple(logits.size()) == (1, 50257)
| allennlp-models-main | tests/lm/modules/language_model_heads/gpt2_test.py |
allennlp-models-main | tests/lm/modules/token_embedders/__init__.py |
|
from allennlp.common.testing import ModelTestCase
from allennlp.data.batch import Batch
from tests import FIXTURES_ROOT
class TestLanguageModelTokenEmbedder(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "characters_token_embedder.json",
FIXTURES_ROOT / "lm" / "conll2003.txt",
)
def test_tagger_with_language_model_token_embedder_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_tagger_with_language_model_token_embedder_forward_pass_runs_correctly(self):
dataset = Batch(self.instances)
dataset.index_instances(self.vocab)
training_tensors = dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
tags = output_dict["tags"]
assert len(tags) == 2
assert len(tags[0]) == 7
assert len(tags[1]) == 7
for example_tags in tags:
for tag_id in example_tags:
tag = self.model.vocab.get_token_from_index(tag_id, namespace="labels")
assert tag in {"O", "I-ORG", "I-PER", "I-LOC"}
class TestLanguageModelTokenEmbedderWithoutBosEos(TestLanguageModelTokenEmbedder):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT
/ "lm"
/ "language_model"
/ "characters_token_embedder_without_bos_eos.jsonnet",
FIXTURES_ROOT / "lm" / "conll2003.txt",
)
| allennlp-models-main | tests/lm/modules/token_embedders/language_model_test.py |
from tests import FIXTURES_ROOT
from tests.lm.modules.token_embedders.language_model_test import TestLanguageModelTokenEmbedder
class TestBidirectionalLanguageModelTokenEmbedder(TestLanguageModelTokenEmbedder):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT
/ "lm"
/ "language_model"
/ "bidirectional_lm_characters_token_embedder.jsonnet",
FIXTURES_ROOT / "lm" / "conll2003.txt",
)
class TestBidirectionalLanguageModelTokenEmbedderWithoutBosEos(TestLanguageModelTokenEmbedder):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT
/ "lm"
/ "language_model"
/ "bidirectional_lm_characters_token_embedder_without_bos_eos.jsonnet",
FIXTURES_ROOT / "lm" / "conll2003.txt",
)
| allennlp-models-main | tests/lm/modules/token_embedders/bidirectional_lm_test.py |
allennlp-models-main | tests/vision/__init__.py |
|
from typing import Any, Dict, List, Tuple, Union
import pytest
import torch
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp_models.vision import VqaMeasure
class VqaMeasureTest(AllenNlpTestCase):
@multi_device
def test_vqa(self, device: str):
vqa = VqaMeasure()
logits = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]], device=device
)
labels = torch.tensor([[0], [3]], device=device)
label_weights = torch.tensor([[1 / 3], [2 / 3]], device=device)
vqa(logits, labels, label_weights)
vqa_score = vqa.get_metric()["score"]
assert vqa_score == pytest.approx((1 / 3) / 2)
@multi_device
def test_vqa_accumulates_and_resets_correctly(self, device: str):
vqa = VqaMeasure()
logits = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]], device=device
)
labels = torch.tensor([[0], [3]], device=device)
labels2 = torch.tensor([[4], [4]], device=device)
label_weights = torch.tensor([[1 / 3], [2 / 3]], device=device)
vqa(logits, labels, label_weights)
vqa(logits, labels, label_weights)
vqa(logits, labels2, label_weights)
vqa(logits, labels2, label_weights)
vqa_score = vqa.get_metric(reset=True)["score"]
assert vqa_score == pytest.approx((1 / 3 + 1 / 3 + 0 + 0) / 8)
vqa(logits, labels, label_weights)
vqa_score = vqa.get_metric(reset=True)["score"]
assert vqa_score == pytest.approx((1 / 3) / 2)
@multi_device
def test_does_not_divide_by_zero_with_no_count(self, device: str):
vqa = VqaMeasure()
assert vqa.get_metric()["score"] == pytest.approx(0.0)
def test_distributed_accuracy(self):
logits = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
labels = [torch.tensor([[0]]), torch.tensor([[3]])]
label_weights = [torch.tensor([[1 / 3]]), torch.tensor([[2 / 3]])]
metric_kwargs = {"logits": logits, "labels": labels, "label_weights": label_weights}
desired_accuracy = {"score": (1 / 3) / 2}
run_distributed_test(
[-1, -1],
global_distributed_metric,
VqaMeasure(),
metric_kwargs,
desired_accuracy,
exact=False,
)
def test_distributed_accuracy_unequal_batches(self):
logits = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2], [0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
labels = [torch.tensor([[0], [0]]), torch.tensor([[3]])]
label_weights = [torch.tensor([[1], [1]]), torch.tensor([[1 / 3]])]
metric_kwargs = {"logits": logits, "labels": labels, "label_weights": label_weights}
desired_accuracy = {"score": (1 + 1 + 0) / 3}
run_distributed_test(
[-1, -1],
global_distributed_metric,
VqaMeasure(),
metric_kwargs,
desired_accuracy,
exact=False,
)
def test_multiple_distributed_runs(self):
logits = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
labels = [torch.tensor([[0]]), torch.tensor([[3]])]
label_weights = [torch.tensor([[1 / 3]]), torch.tensor([[2 / 3]])]
metric_kwargs = {"logits": logits, "labels": labels, "label_weights": label_weights}
desired_accuracy = {"score": (1 / 3) / 2}
run_distributed_test(
[-1, -1],
global_distributed_metric,
VqaMeasure(),
metric_kwargs,
desired_accuracy,
exact=True,
number_of_runs=200,
)
| allennlp-models-main | tests/vision/metrics/vqa_test.py |
from allennlp.common.lazy import Lazy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.image_loader import TorchImageLoader
from allennlp.modules.vision.grid_embedder import NullGridEmbedder
from allennlp.modules.vision.region_detector import RandomRegionDetector
from allennlp_models.vision.dataset_readers.vision_reader import VisionReader
from tests import FIXTURES_ROOT
class TestVisionReader(AllenNlpTestCase):
def test_load_images(self):
reader = VisionReader(
image_dir=FIXTURES_ROOT / "vision" / "images" / "vision_reader",
image_loader=TorchImageLoader(),
image_featurizer=Lazy(NullGridEmbedder),
region_detector=Lazy(RandomRegionDetector),
)
assert len(reader.images) == 3
assert set(reader.images.keys()) == {
"png_example.png",
"jpg_example.jpg",
"jpeg_example.jpeg",
}
| allennlp-models-main | tests/vision/dataset_readers/vision_reader_test.py |
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.lazy import Lazy
from allennlp.data import Batch, Vocabulary
from allennlp.data.image_loader import TorchImageLoader
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.modules.vision.grid_embedder import NullGridEmbedder
from allennlp.modules.vision.region_detector import RandomRegionDetector
from tests import FIXTURES_ROOT
class TestVGQAReader(AllenNlpTestCase):
def test_read(self):
from allennlp_models.vision.dataset_readers.vgqa import VGQAReader
reader = VGQAReader(
image_dir=FIXTURES_ROOT / "vision" / "images" / "vgqa",
image_loader=TorchImageLoader(),
image_featurizer=Lazy(NullGridEmbedder),
region_detector=Lazy(RandomRegionDetector),
tokenizer=WhitespaceTokenizer(),
token_indexers={"tokens": SingleIdTokenIndexer()},
)
instances = list(reader.read("test_fixtures/vision/vgqa/question_answers.json"))
assert len(instances) == 8
instance = instances[0]
assert len(instance.fields) == 6
assert len(instance["question"]) == 5
question_tokens = [t.text for t in instance["question"]]
assert question_tokens == ["What", "is", "on", "the", "curtains?"]
assert len(instance["labels"]) == 1
labels = [field.label for field in instance["labels"].field_list]
assert labels == ["sailboats"]
batch = Batch(instances)
batch.index_instances(Vocabulary())
tensors = batch.as_tensor_dict()
# (batch size, num boxes (fake), num features (fake))
assert tensors["box_features"].size() == (8, 2, 10)
# (batch size, num boxes (fake), 4 coords)
assert tensors["box_coordinates"].size() == (8, 2, 4)
# (batch size, num boxes (fake))
assert tensors["box_mask"].size() == (8, 2)
# Nothing should be masked out since the number of fake boxes is the same
# for each item in the batch.
assert tensors["box_mask"].all()
| allennlp-models-main | tests/vision/dataset_readers/vgqa_test.py |
from allennlp.common.lazy import Lazy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Batch, Vocabulary
from allennlp.data.image_loader import TorchImageLoader
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.modules.vision.grid_embedder import NullGridEmbedder
from allennlp.modules.vision.region_detector import RandomRegionDetector
from tests import FIXTURES_ROOT
class TestVisualEntailmentReader(AllenNlpTestCase):
def test_read(self):
from allennlp_models.vision.dataset_readers.visual_entailment import VisualEntailmentReader
reader = VisualEntailmentReader(
image_dir=FIXTURES_ROOT / "vision" / "images" / "visual_entailment",
image_loader=TorchImageLoader(),
image_featurizer=Lazy(NullGridEmbedder),
region_detector=Lazy(RandomRegionDetector),
tokenizer=WhitespaceTokenizer(),
token_indexers={"tokens": SingleIdTokenIndexer()},
)
instances = list(reader.read("test_fixtures/vision/visual_entailment/sample_pairs.jsonl"))
assert len(instances) == 16
instance = instances[0]
assert len(instance.fields) == 5
assert len(instance["hypothesis"]) == 4
sentence_tokens = [t.text for t in instance["hypothesis"]]
assert sentence_tokens == ["A", "toddler", "sleeps", "outside."]
assert instance["labels"].label == "contradiction"
batch = Batch(instances)
vocab = Vocabulary()
vocab.add_tokens_to_namespace(["entailment", "contradiction", "neutral"], "labels")
batch.index_instances(vocab)
tensors = batch.as_tensor_dict()
# (batch size, num boxes (fake), num features (fake))
assert tensors["box_features"].size() == (16, 2, 10)
# (batch size, num boxes (fake), 4 coords)
assert tensors["box_coordinates"].size() == (16, 2, 4)
# (batch_size, num boxes (fake),)
assert tensors["box_mask"].size() == (16, 2)
| allennlp-models-main | tests/vision/dataset_readers/visual_entailment_test.py |
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.lazy import Lazy
from allennlp.data import Batch, Vocabulary
from allennlp.data.image_loader import TorchImageLoader
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.modules.vision.grid_embedder import NullGridEmbedder
from allennlp.modules.vision.region_detector import RandomRegionDetector
from tests import FIXTURES_ROOT
class TestVQAv2Reader(AllenNlpTestCase):
def test_read(self):
from allennlp_models.vision.dataset_readers.vqav2 import VQAv2Reader
reader = VQAv2Reader(
image_dir=FIXTURES_ROOT / "vision" / "images" / "vqav2",
image_loader=TorchImageLoader(),
image_featurizer=Lazy(NullGridEmbedder),
region_detector=Lazy(RandomRegionDetector),
tokenizer=WhitespaceTokenizer(),
token_indexers={"tokens": SingleIdTokenIndexer()},
)
instances = list(reader.read("unittest"))
assert len(instances) == 3
instance = instances[0]
assert len(instance.fields) == 6
assert len(instance["question"]) == 7
question_tokens = [t.text for t in instance["question"]]
assert question_tokens == ["What", "is", "this", "photo", "taken", "looking", "through?"]
assert len(instance["labels"]) == 5
labels = [field.label for field in instance["labels"].field_list]
assert labels == ["net", "netting", "mesh", "pitcher", "orange"]
assert torch.allclose(
instance["label_weights"].tensor,
torch.tensor([1.0, 1.0 / 3, 1.0 / 3, 1.0 / 3, 1.0 / 3]),
)
batch = Batch(instances)
batch.index_instances(Vocabulary())
tensors = batch.as_tensor_dict()
# (batch size, num boxes (fake), num features (fake))
assert tensors["box_features"].size() == (3, 2, 10)
# (batch size, num boxes (fake), 4 coords)
assert tensors["box_coordinates"].size() == (3, 2, 4)
# (batch size, num boxes (fake),)
assert tensors["box_mask"].size() == (3, 2)
# Nothing should be masked out since the number of fake boxes is the same
# for each item in the batch.
assert tensors["box_mask"].all()
def test_read_without_images(self):
from allennlp_models.vision.dataset_readers.vqav2 import VQAv2Reader
reader = VQAv2Reader(
tokenizer=WhitespaceTokenizer(),
token_indexers={"tokens": SingleIdTokenIndexer()},
)
instances = list(reader.read("unittest"))
assert len(instances) == 3
assert "box_coordinates" not in instances[0]
assert "box_features" not in instances[0]
assert "box_mask" not in instances[0]
| allennlp-models-main | tests/vision/dataset_readers/vqav2_test.py |
from allennlp.common.lazy import Lazy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Batch, Vocabulary
from allennlp.data.image_loader import TorchImageLoader
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.modules.vision.grid_embedder import NullGridEmbedder
from allennlp.modules.vision.region_detector import RandomRegionDetector
from tests import FIXTURES_ROOT
class TestGQAReader(AllenNlpTestCase):
def setup_method(self):
from allennlp_models.vision.dataset_readers.gqa import GQAReader
super().setup_method()
self.reader = GQAReader(
image_dir=FIXTURES_ROOT / "vision" / "images" / "gqa",
image_loader=TorchImageLoader(),
image_featurizer=Lazy(NullGridEmbedder),
region_detector=Lazy(RandomRegionDetector),
tokenizer=WhitespaceTokenizer(),
token_indexers={"tokens": SingleIdTokenIndexer()},
)
def test_read(self):
instances = list(self.reader.read("test_fixtures/vision/gqa/questions.json"))
assert len(instances) == 1
instance = instances[0]
assert len(instance.fields) == 6
assert len(instance["question"]) == 6
question_tokens = [t.text for t in instance["question"]]
assert question_tokens == ["What", "is", "hanging", "above", "the", "chalkboard?"]
assert instance["labels"][0].label == "picture"
batch = Batch(instances)
batch.index_instances(Vocabulary())
tensors = batch.as_tensor_dict()
# (batch size, num boxes (fake), num features (fake))
assert tensors["box_features"].size() == (1, 2, 10)
# (batch size, num boxes (fake), 4 coords)
assert tensors["box_coordinates"].size() == (1, 2, 4)
# (batch size, num boxes (fake),)
assert tensors["box_mask"].size() == (1, 2)
def test_read_from_dir(self):
# Test reading from multiple files in a directory
instances = list(self.reader.read("test_fixtures/vision/gqa/question_dir/"))
assert len(instances) == 2
instance = instances[1]
assert len(instance.fields) == 6
assert len(instance["question"]) == 10
question_tokens = [t.text for t in instance["question"]]
assert question_tokens == [
"Does",
"the",
"table",
"below",
"the",
"water",
"look",
"wooden",
"and",
"round?",
]
assert instance["labels"][0].label == "yes"
batch = Batch(instances)
batch.index_instances(Vocabulary())
tensors = batch.as_tensor_dict()
# (batch size, num boxes (fake), num features (fake))
assert tensors["box_features"].size() == (2, 2, 10)
# (batch size, num boxes (fake), 4 coords)
assert tensors["box_coordinates"].size() == (2, 2, 4)
# (batch size, num boxes (fake),)
assert tensors["box_mask"].size() == (2, 2)
| allennlp-models-main | tests/vision/dataset_readers/gqa_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.lazy import Lazy
from allennlp.data import Batch, Vocabulary
from allennlp.data.image_loader import TorchImageLoader
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.modules.vision.grid_embedder import NullGridEmbedder
from allennlp.modules.vision.region_detector import RandomRegionDetector
from tests import FIXTURES_ROOT
class TestNlvr2Reader(AllenNlpTestCase):
def test_read(self):
from allennlp_models.vision.dataset_readers.nlvr2 import Nlvr2Reader
reader = Nlvr2Reader(
image_dir=FIXTURES_ROOT / "vision" / "images" / "nlvr2",
image_loader=TorchImageLoader(),
image_featurizer=Lazy(NullGridEmbedder),
region_detector=Lazy(RandomRegionDetector),
tokenizer=WhitespaceTokenizer(),
token_indexers={"tokens": SingleIdTokenIndexer()},
)
instances = list(reader.read("test_fixtures/vision/nlvr2/tiny-dev.json"))
assert len(instances) == 8
instance = instances[0]
assert len(instance.fields) == 6
assert instance["hypothesis"][0] == instance["hypothesis"][1]
assert len(instance["hypothesis"][0]) == 18
hypothesis_tokens = [t.text for t in instance["hypothesis"][0]]
assert hypothesis_tokens[:6] == ["The", "right", "image", "shows", "a", "curving"]
assert instance["label"].label == 0
assert instances[1]["label"].label == 1
assert instance["identifier"].metadata == "dev-850-0-0"
batch = Batch(instances)
batch.index_instances(Vocabulary())
tensors = batch.as_tensor_dict()
# (batch size, 2 images per instance, num boxes (fake), num features (fake))
assert tensors["box_features"].size() == (8, 2, 2, 10)
# (batch size, 2 images per instance, num boxes (fake), 4 coords)
assert tensors["box_coordinates"].size() == (8, 2, 2, 4)
# (batch size, 2 images per instance, num boxes (fake))
assert tensors["box_mask"].size() == (8, 2, 2)
| allennlp-models-main | tests/vision/dataset_readers/nlvr2_test.py |
from allennlp.common.lazy import Lazy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Batch, Vocabulary
from allennlp.data.image_loader import TorchImageLoader
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp_models.vision.dataset_readers.flickr30k import Flickr30kReader
from allennlp.modules.vision.grid_embedder import NullGridEmbedder
from allennlp.modules.vision.region_detector import RandomRegionDetector
import random
from tests import FIXTURES_ROOT
class TestFlickr30kReader(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
def test_train_read(self):
self.reader = Flickr30kReader(
image_dir=FIXTURES_ROOT / "vision" / "images" / "flickr30k",
image_loader=TorchImageLoader(),
image_featurizer=Lazy(NullGridEmbedder),
data_dir=FIXTURES_ROOT / "vision" / "flickr30k" / "sentences",
region_detector=Lazy(RandomRegionDetector),
tokenizer=WhitespaceTokenizer(),
token_indexers={"tokens": SingleIdTokenIndexer()},
featurize_captions=False,
num_potential_hard_negatives=4,
)
instances = list(self.reader.read("test_fixtures/vision/flickr30k/test.txt"))
assert len(instances) == 25
instance = instances[5]
assert len(instance.fields) == 5
assert len(instance["caption"]) == 4
assert len(instance["caption"][0]) == 12 # 16
assert instance["caption"][0] != instance["caption"][1]
assert instance["caption"][0] == instance["caption"][2]
assert instance["caption"][0] == instance["caption"][3]
question_tokens = [t.text for t in instance["caption"][0]]
assert question_tokens == [
"girl",
"with",
"brown",
"hair",
"sits",
"on",
"edge",
"of",
"concrete",
"area",
"overlooking",
"water",
]
batch = Batch(instances)
batch.index_instances(Vocabulary())
tensors = batch.as_tensor_dict()
# (batch size, num images (3 hard negatives + gold image), num boxes (fake), num features (fake))
assert tensors["box_features"].size() == (25, 4, 2, 10)
# (batch size, num images (3 hard negatives + gold image), num boxes (fake), 4 coords)
assert tensors["box_coordinates"].size() == (25, 4, 2, 4)
# (batch size, num images (3 hard negatives + gold image), num boxes (fake),)
assert tensors["box_mask"].size() == (25, 4, 2)
# (batch size)
assert tensors["label"].size() == (25,)
def test_evaluation_read(self):
self.reader = Flickr30kReader(
image_dir=FIXTURES_ROOT / "vision" / "images" / "flickr30k",
image_loader=TorchImageLoader(),
image_featurizer=Lazy(NullGridEmbedder),
data_dir=FIXTURES_ROOT / "vision" / "flickr30k" / "sentences",
region_detector=Lazy(RandomRegionDetector),
tokenizer=WhitespaceTokenizer(),
token_indexers={"tokens": SingleIdTokenIndexer()},
featurize_captions=False,
is_evaluation=True,
num_potential_hard_negatives=4,
)
instances = list(self.reader.read("test_fixtures/vision/flickr30k/test.txt"))
assert len(instances) == 25
instance = instances[5]
assert len(instance.fields) == 5
assert len(instance["caption"]) == 5
assert len(instance["caption"][0]) == 12
question_tokens = [t.text for t in instance["caption"][0]]
assert question_tokens == [
"girl",
"with",
"brown",
"hair",
"sits",
"on",
"edge",
"of",
"concrete",
"area",
"overlooking",
"water",
]
batch = Batch(instances)
batch.index_instances(Vocabulary())
tensors = batch.as_tensor_dict()
# (batch size, num images (total), num boxes (fake), num features (fake))
assert tensors["box_features"].size() == (25, 5, 2, 10)
# (batch size, num images (total), num boxes (fake), 4 coords)
assert tensors["box_coordinates"].size() == (25, 5, 2, 4)
# (batch size, num images (total), num boxes (fake),)
assert tensors["box_mask"].size() == (25, 5, 2)
# (batch size)
assert tensors["label"].size() == (25,)
| allennlp-models-main | tests/vision/dataset_readers/flickr30k_test.py |
from torch.testing import assert_allclose
from transformers import AutoModel
from allennlp.common.testing import ModelTestCase
from allennlp.data import Vocabulary
from allennlp.common.testing import assert_equal_parameters
from allennlp_models import vision # noqa: F401
from tests import FIXTURES_ROOT
class TestIRVilbert(ModelTestCase):
def test_model_can_train_save_and_load_small_model(self):
param_file = FIXTURES_ROOT / "vision" / "flickr30k" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(
param_file, gradients_to_ignore={"classifier.weight", "classifier.bias"}, seed=12345
)
def test_model_can_train_save_and_load_with_cache(self):
import tempfile
with tempfile.TemporaryDirectory(prefix=self.__class__.__name__) as d:
overrides = {"dataset_reader.feature_cache_dir": str(d)}
import json
overrides = json.dumps(overrides)
param_file = FIXTURES_ROOT / "vision" / "flickr30k" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(
param_file,
overrides=overrides,
gradients_to_ignore={"classifier.weight", "classifier.bias"},
seed=12345,
)
def test_model_can_train_save_and_load_from_huggingface(self):
param_file = FIXTURES_ROOT / "vision" / "flickr30k" / "experiment_from_huggingface.jsonnet"
self.ensure_model_can_train_save_and_load(
param_file, gradients_to_ignore={"classifier.weight", "classifier.bias"}, seed=12345
)
def test_model_loads_weights_correctly(self):
from allennlp_models.vision.models.vilbert_image_retrieval import ImageRetrievalVilbert
vocab = Vocabulary()
model_name = "epwalsh/bert-xsmall-dummy"
model = ImageRetrievalVilbert.from_huggingface_model_name(
vocab=vocab,
model_name=model_name,
image_feature_dim=2048,
image_num_hidden_layers=1,
image_hidden_size=3,
image_num_attention_heads=1,
combined_num_attention_heads=1,
combined_hidden_size=5,
pooled_output_dim=7,
image_intermediate_size=11,
image_attention_dropout=0.0,
image_hidden_dropout=0.0,
image_biattention_id=[0, 1],
text_biattention_id=[0, 1],
text_fixed_layer=0,
image_fixed_layer=0,
)
transformer = AutoModel.from_pretrained(model_name)
# compare embedding parameters
assert_allclose(
transformer.embeddings.word_embeddings.weight.data,
model.backbone.text_embeddings.embeddings.word_embeddings.weight.data,
)
# compare encoder parameters
assert_allclose(
transformer.encoder.layer[0].intermediate.dense.weight.data,
model.backbone.encoder.layers1[0].intermediate.dense.weight.data,
)
| allennlp-models-main | tests/vision/models/vilbert_ir_test.py |
from torch.testing import assert_allclose
from transformers import AutoModel
from allennlp.common.testing import ModelTestCase
from allennlp.data import Vocabulary
from allennlp_models.vision.models.vilbert_vqa import VqaVilbert
from tests import FIXTURES_ROOT
class TestVqaVilbert(ModelTestCase):
def test_model_can_train_save_and_load_small_model(self):
param_file = FIXTURES_ROOT / "vision" / "vilbert_vqa" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(param_file)
def test_model_can_train_save_and_load_with_cache(self):
import tempfile
with tempfile.TemporaryDirectory(prefix=self.__class__.__name__) as d:
overrides = {"dataset_reader.feature_cache_dir": str(d)}
import json
overrides = json.dumps(overrides)
param_file = FIXTURES_ROOT / "vision" / "vilbert_vqa" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(param_file, overrides=overrides)
def test_model_can_train_save_and_load_from_huggingface(self):
param_file = (
FIXTURES_ROOT / "vision" / "vilbert_vqa" / "experiment_from_huggingface.jsonnet"
)
self.ensure_model_can_train_save_and_load(param_file)
def test_model_loads_weights_correctly(self):
vocab = Vocabulary()
vocab.add_tokens_to_namespace(["orange", "net", "netting", "pitcher", "catcher"], "answers")
model_name = "epwalsh/bert-xsmall-dummy"
model = VqaVilbert.from_huggingface_model_name(
vocab=vocab,
model_name=model_name,
image_feature_dim=2048,
image_num_hidden_layers=1,
image_hidden_size=6,
combined_hidden_size=10,
pooled_output_dim=7,
image_intermediate_size=11,
image_attention_dropout=0.0,
image_hidden_dropout=0.0,
image_biattention_id=[0, 1],
text_biattention_id=[0, 1],
text_fixed_layer=0,
image_fixed_layer=0,
image_num_attention_heads=3,
combined_num_attention_heads=2,
)
transformer = AutoModel.from_pretrained(model_name)
# compare embedding parameters
assert_allclose(
transformer.embeddings.word_embeddings.weight.data,
model.backbone.text_embeddings.embeddings.word_embeddings.weight.data,
)
# compare encoder parameters
assert_allclose(
transformer.encoder.layer[0].intermediate.dense.weight.data,
model.backbone.encoder.layers1[0].intermediate.dense.weight.data,
)
| allennlp-models-main | tests/vision/models/vilbert_vqa_test.py |
from torch.testing import assert_allclose
from transformers import AutoModel
from allennlp.common.testing import ModelTestCase
from allennlp.data import Vocabulary
from allennlp.common.testing import assert_equal_parameters
from allennlp_models import vision # noqa: F401
from tests import FIXTURES_ROOT
class TestNlvr2Vilbert(ModelTestCase):
def test_model_can_train_save_and_load_small_model(self):
param_file = FIXTURES_ROOT / "vision" / "nlvr2" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(
param_file, gradients_to_ignore={"classifier.weight", "classifier.bias"}
)
def test_model_can_train_save_and_load_with_cache(self):
import tempfile
with tempfile.TemporaryDirectory(prefix=self.__class__.__name__) as d:
overrides = {"dataset_reader.feature_cache_dir": str(d)}
import json
overrides = json.dumps(overrides)
param_file = FIXTURES_ROOT / "vision" / "nlvr2" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(
param_file,
overrides=overrides,
gradients_to_ignore={"classifier.weight", "classifier.bias"},
)
def test_model_can_train_save_and_load_from_huggingface(self):
param_file = FIXTURES_ROOT / "vision" / "nlvr2" / "experiment_from_huggingface.jsonnet"
self.ensure_model_can_train_save_and_load(
param_file, gradients_to_ignore={"classifier.weight", "classifier.bias"}
)
def test_model_loads_weights_correctly(self):
from allennlp_models.vision.models.nlvr2 import Nlvr2Model
vocab = Vocabulary()
model_name = "epwalsh/bert-xsmall-dummy"
model = Nlvr2Model.from_huggingface_model_name(
vocab=vocab,
model_name=model_name,
image_feature_dim=2048,
image_num_hidden_layers=1,
image_hidden_size=3,
image_num_attention_heads=1,
combined_num_attention_heads=1,
combined_hidden_size=5,
pooled_output_dim=7,
image_intermediate_size=11,
image_attention_dropout=0.0,
image_hidden_dropout=0.0,
image_biattention_id=[0, 1],
text_biattention_id=[0, 1],
text_fixed_layer=0,
image_fixed_layer=0,
)
transformer = AutoModel.from_pretrained(model_name)
# compare embedding parameters
assert_allclose(
transformer.embeddings.word_embeddings.weight.data,
model.backbone.text_embeddings.embeddings.word_embeddings.weight.data,
)
# compare encoder parameters
assert_allclose(
transformer.encoder.layer[0].intermediate.dense.weight.data,
model.backbone.encoder.layers1[0].intermediate.dense.weight.data,
)
| allennlp-models-main | tests/vision/models/vilbert_nlvr2_test.py |
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
class TestVilbertMultitask(ModelTestCase):
def test_predict(self):
from allennlp.models import load_archive
from allennlp.predictors import Predictor
import allennlp_models.vision
archive = load_archive(FIXTURES_ROOT / "vision" / "vilbert_multitask" / "model.tar.gz")
predictor = Predictor.from_archive(archive)
with open(
FIXTURES_ROOT / "vision" / "vilbert_multitask" / "dataset.json", "r"
) as file_input:
json_input = [predictor.load_line(line) for line in file_input if not line.isspace()]
predictions = predictor.predict_batch_json(json_input)
assert all(
"gqa_best_answer" in p or "vqa_best_answer" in p or "ve_entailment_answer" in p
for p in predictions
)
def test_model_can_train_save_and_load_small_model(self):
param_file = FIXTURES_ROOT / "vision" / "vilbert_multitask" / "experiment.jsonnet"
# The VQA weights are going to be zero because the last batch is Visual Entailment only,
# and so the gradients for VQA don't get set.
self.ensure_model_can_train_save_and_load(
param_file,
gradients_to_ignore={"_heads.vqa.classifier.bias", "_heads.vqa.classifier.weight"},
)
| allennlp-models-main | tests/vision/models/vilbert_multitask_test.py |
allennlp-models-main | tests/vision/models/__init__.py |
|
from torch.testing import assert_allclose
from transformers import AutoModel
from allennlp.common.testing import ModelTestCase
from allennlp.data import Vocabulary
from allennlp_models import vision # noqa: F401
from tests import FIXTURES_ROOT
class TestVEVilbert(ModelTestCase):
def test_model_can_train_save_and_load_small_model(self):
param_file = FIXTURES_ROOT / "vision" / "vilbert_ve" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(param_file)
def test_model_can_train_save_and_load_with_cache(self):
import tempfile
with tempfile.TemporaryDirectory(prefix=self.__class__.__name__) as d:
overrides = {"dataset_reader.feature_cache_dir": str(d)}
import json
overrides = json.dumps(overrides)
param_file = FIXTURES_ROOT / "vision" / "vilbert_ve" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(param_file, overrides=overrides)
def test_model_can_train_save_and_load_from_huggingface(self):
param_file = FIXTURES_ROOT / "vision" / "vilbert_ve" / "experiment_from_huggingface.jsonnet"
self.ensure_model_can_train_save_and_load(param_file)
def test_model_loads_weights_correctly(self):
from allennlp_models.vision.models.visual_entailment import VisualEntailmentModel
vocab = Vocabulary()
model_name = "epwalsh/bert-xsmall-dummy"
model = VisualEntailmentModel.from_huggingface_model_name(
vocab=vocab,
model_name=model_name,
image_feature_dim=2048,
image_num_hidden_layers=1,
image_hidden_size=3,
image_num_attention_heads=1,
combined_num_attention_heads=1,
combined_hidden_size=5,
pooled_output_dim=7,
image_intermediate_size=11,
image_attention_dropout=0.0,
image_hidden_dropout=0.0,
image_biattention_id=[0, 1],
text_biattention_id=[0, 1],
text_fixed_layer=0,
image_fixed_layer=0,
)
transformer = AutoModel.from_pretrained(model_name)
# compare embedding parameters
assert_allclose(
transformer.embeddings.word_embeddings.weight.data,
model.backbone.text_embeddings.embeddings.word_embeddings.weight.data,
)
# compare encoder parameters
assert_allclose(
transformer.encoder.layer[0].intermediate.dense.weight.data,
model.backbone.encoder.layers1[0].intermediate.dense.weight.data,
)
| allennlp-models-main | tests/vision/models/visual_entailment_test.py |
allennlp-models-main | tests/pair_classification/__init__.py |
|
import random
import pytest
from allennlp.confidence_checks.task_checklists.textual_entailment_suite import (
TextualEntailmentSuite,
)
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
import torch
import numpy
from allennlp_models.pair_classification.predictors import * # noqa: F403
from tests import FIXTURES_ROOT
class TestTextualEntailmentSuite(AllenNlpTestCase):
@pytest.mark.parametrize(
"model",
[
"decomposable_attention",
"esim",
],
)
def test_run(self, model: str):
torch.manual_seed(1)
numpy.random.seed(1)
random.seed(1)
archive = load_archive(
FIXTURES_ROOT / "pair_classification" / model / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive)
data = [
("Alice and Bob are friends.", "Alice is Bob's friend."),
("The park had children playing", "The park was empty."),
]
suite = TextualEntailmentSuite(probs_key="label_probs", add_default_tests=True, data=data)
suite.run(predictor, max_examples=10)
| allennlp-models-main | tests/pair_classification/task_checklists/textual_entailment_suite_test.py |
allennlp-models-main | tests/pair_classification/task_checklists/__init__.py |
|
from allennlp.common.params import Params
from allennlp.common.util import ensure_list
from allennlp.data import DatasetReader
import pytest
from allennlp_models.pair_classification import TransformerSuperGlueRteReader
from tests import FIXTURES_ROOT
class TestTransformerSuperGlueRteReader:
def test_read_from_file_superglue_rte(self):
reader = TransformerSuperGlueRteReader()
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "superglue_rte.jsonl"))
assert len(instances) == 4
token_text = [t.text for t in instances[0].fields["tokens"].tokens]
assert token_text[:3] == ["<s>", "No", "ĠWeapons"]
assert token_text[10:14] == [".", "</s>", "</s>", "Weapons"]
assert token_text[-3:] == ["ĠIraq", ".", "</s>"]
assert instances[0].fields["label"].human_readable_repr() == "not_entailment"
assert instances[0].fields["metadata"]["label"] == "not_entailment"
assert instances[0].fields["metadata"]["index"] == 0
def test_read_from_file_superglue_rte_no_label(self):
reader = TransformerSuperGlueRteReader()
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "superglue_rte_no_labels.jsonl"))
assert len(instances) == 4
token_text = [t.text for t in instances[0].fields["tokens"].tokens]
assert token_text[:3] == ["<s>", "No", "ĠWeapons"]
assert token_text[10:14] == [".", "</s>", "</s>", "Weapons"]
assert token_text[-3:] == ["ĠIraq", ".", "</s>"]
assert "label" not in instances[0].fields
assert "label" not in instances[0].fields["metadata"]
assert instances[0].fields["metadata"]["index"] == 0
| allennlp-models-main | tests/pair_classification/dataset_readers/transformer_superglue_rte_test.py |
import pytest
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.common.util import ensure_list
from allennlp_models.pair_classification import SnliReader
from tests import FIXTURES_ROOT
class TestSnliReader:
def test_read_from_file(self):
reader = SnliReader()
instances = reader.read(FIXTURES_ROOT / "pair_classification" / "snli.jsonl")
instances = ensure_list(instances)
instance1 = {
"premise": [
"A",
"person",
"on",
"a",
"horse",
"jumps",
"over",
"a",
"broken",
"down",
"airplane",
".",
],
"hypothesis": [
"A",
"person",
"is",
"training",
"his",
"horse",
"for",
"a",
"competition",
".",
],
"label": "neutral",
}
instance2 = {
"premise": [
"A",
"person",
"on",
"a",
"horse",
"jumps",
"over",
"a",
"broken",
"down",
"airplane",
".",
],
"hypothesis": [
"A",
"person",
"is",
"at",
"a",
"diner",
",",
"ordering",
"an",
"omelette",
".",
],
"label": "contradiction",
}
instance3 = {
"premise": [
"A",
"person",
"on",
"a",
"horse",
"jumps",
"over",
"a",
"broken",
"down",
"airplane",
".",
],
"hypothesis": ["A", "person", "is", "outdoors", ",", "on", "a", "horse", "."],
"label": "entailment",
}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["premise"].tokens] == instance1["premise"]
assert [t.text for t in fields["hypothesis"].tokens] == instance1["hypothesis"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["premise"].tokens] == instance2["premise"]
assert [t.text for t in fields["hypothesis"].tokens] == instance2["hypothesis"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
assert [t.text for t in fields["premise"].tokens] == instance3["premise"]
assert [t.text for t in fields["hypothesis"].tokens] == instance3["hypothesis"]
assert fields["label"].label == instance3["label"]
def test_combine_input_fields(self):
reader = SnliReader(
tokenizer=PretrainedTransformerTokenizer("bert-base-uncased", add_special_tokens=False),
combine_input_fields=True,
)
instances = reader.read(FIXTURES_ROOT / "pair_classification" / "snli.jsonl")
instances = ensure_list(instances)
instance1 = {
"tokens": [
"[CLS]",
"a",
"person",
"on",
"a",
"horse",
"jumps",
"over",
"a",
"broken",
"down",
"airplane",
".",
"[SEP]",
"a",
"person",
"is",
"training",
"his",
"horse",
"for",
"a",
"competition",
".",
"[SEP]",
],
"label": "neutral",
}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
def test_collapse_output_field(self):
reader = SnliReader(collapse_labels=True)
instances = reader.read(FIXTURES_ROOT / "pair_classification" / "snli.jsonl")
instances = ensure_list(instances)
assert len(instances) == 3
fields = instances[0].fields
assert fields["label"].label == "non-entailment"
fields = instances[1].fields
assert fields["label"].label == "non-entailment"
fields = instances[2].fields
assert fields["label"].label == "entailment"
| allennlp-models-main | tests/pair_classification/dataset_readers/snli_test.py |
from allennlp.common.util import ensure_list
from allennlp_models.pair_classification.dataset_readers.quora_paraphrase import (
QuoraParaphraseDatasetReader,
)
from tests import FIXTURES_ROOT
class TestQuoraParaphraseReader:
def test_read_from_file(self):
reader = QuoraParaphraseDatasetReader()
instances = reader.read(FIXTURES_ROOT / "pair_classification" / "quora_paraphrase.tsv")
instances = ensure_list(instances)
instance1 = {
"premise": "What should I do to avoid sleeping in class ?".split(),
"hypothesis": "How do I not sleep in a boring class ?".split(),
"label": "1",
}
instance2 = {
"premise": "Do women support each other more than men do ?".split(),
"hypothesis": "Do women need more compliments than men ?".split(),
"label": "0",
}
instance3 = {
"premise": "How can one root android devices ?".split(),
"hypothesis": "How do I root an Android device ?".split(),
"label": "1",
}
assert len(instances) == 3
for instance, expected_instance in zip(instances, [instance1, instance2, instance3]):
fields = instance.fields
assert [t.text for t in fields["premise"].tokens] == expected_instance["premise"]
assert [t.text for t in fields["hypothesis"].tokens] == expected_instance["hypothesis"]
assert fields["label"].label == expected_instance["label"]
| allennlp-models-main | tests/pair_classification/dataset_readers/quora_paraphrase_test.py |
allennlp-models-main | tests/pair_classification/dataset_readers/__init__.py |
|
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
class TestBiMPM(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "pair_classification" / "bimpm" / "experiment.json",
FIXTURES_ROOT / "pair_classification" / "quora_paraphrase.tsv",
)
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
assert "logits" in output_dict and "loss" in output_dict
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_decode_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
assert "label" in decode_output_dict
| allennlp-models-main | tests/pair_classification/models/bimpm_test.py |
from flaky import flaky
import numpy
from tests import FIXTURES_ROOT
from allennlp.common.testing import ModelTestCase
from allennlp_models.pair_classification import SnliReader
from allennlp.fairness import (
AdversarialBiasMitigator,
FeedForwardRegressionAdversary,
AdversarialBiasMitigatorBackwardCallback,
)
class AdversarialBiasMitigatorTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT
/ "pair_classification"
/ "bias_mitigation"
/ "adversarial_experiment.json",
FIXTURES_ROOT / "pair_classification" / "bias_mitigation" / "snli_train.jsonl",
)
def test_adversarial_bias_mitigator_can_train_save_and_load(self):
# BertModel pooler output is discarded so grads not computed
self.ensure_model_can_train_save_and_load(
self.param_file,
gradients_to_ignore=set(
[
"predictor._text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.weight",
"predictor._text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.bias",
]
),
which_loss="adversary_loss",
)
@flaky
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
output_dict = self.model.make_output_human_readable(output_dict)
assert "label" in output_dict.keys()
probs = output_dict["probs"][0].data.numpy()
numpy.testing.assert_almost_equal(numpy.sum(probs, -1), numpy.array([1]))
def test_forward_on_instances_ignores_loss_key_when_batched(self):
batch_outputs = self.model.forward_on_instances(self.dataset.instances)
for output in batch_outputs:
assert "loss" not in output.keys()
# It should be in the single batch case, because we special case it.
single_output = self.model.forward_on_instance(self.dataset.instances[0])
assert "loss" in single_output.keys()
| allennlp-models-main | tests/pair_classification/models/adversarial_bias_mitigator_test.py |
from flaky import flaky
import numpy
from tests import FIXTURES_ROOT
from allennlp.common.testing import ModelTestCase
from allennlp_models.pair_classification import SnliReader
from allennlp.fairness.bias_mitigator_applicator import BiasMitigatorApplicator
class BiasMitigatorApplicatorTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "pair_classification" / "bias_mitigation" / "experiment.json",
FIXTURES_ROOT / "pair_classification" / "bias_mitigation" / "snli_train.jsonl",
)
def test_bias_mitigator_applicator_can_train_save_and_load(self):
# BertModel pooler output is discarded so grads not computed
self.ensure_model_can_train_save_and_load(
self.param_file,
gradients_to_ignore=set(
[
"base_model._text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.weight",
"base_model._text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.bias",
]
),
)
@flaky
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
output_dict = self.model.make_output_human_readable(output_dict)
assert "label" in output_dict.keys()
probs = output_dict["probs"][0].data.numpy()
numpy.testing.assert_almost_equal(numpy.sum(probs, -1), numpy.array([1]))
def test_forward_on_instances_ignores_loss_key_when_batched(self):
batch_outputs = self.model.forward_on_instances(self.dataset.instances)
for output in batch_outputs:
assert "loss" not in output.keys()
# It should be in the single batch case, because we special case it.
single_output = self.model.forward_on_instance(self.dataset.instances[0])
assert "loss" in single_output.keys()
| allennlp-models-main | tests/pair_classification/models/bias_mitigator_applicator_test.py |
allennlp-models-main | tests/pair_classification/models/__init__.py |
|
from allennlp.models import Model
from flaky import flaky
import pytest
import numpy
from numpy.testing import assert_almost_equal
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import ModelTestCase
from allennlp_models.pair_classification import DecomposableAttention
from tests import FIXTURES_ROOT
class TestDecomposableAttention(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "pair_classification" / "decomposable_attention" / "experiment.json",
FIXTURES_ROOT / "pair_classification" / "snli.jsonl",
)
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
assert_almost_equal(numpy.sum(output_dict["label_probs"][0].data.numpy(), -1), 1, decimal=6)
@flaky
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
@flaky
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_model_load(self):
params = Params.from_file(
FIXTURES_ROOT / "pair_classification" / "decomposable_attention" / "experiment.json"
)
model = Model.load(
params,
serialization_dir=FIXTURES_ROOT
/ "pair_classification"
/ "decomposable_attention"
/ "serialization",
)
assert isinstance(model, DecomposableAttention)
def test_mismatched_dimensions_raise_configuration_errors(self):
params = Params.from_file(self.param_file)
# Make the input_dim to the first feedforward_layer wrong - it should be 2.
params["model"]["attend_feedforward"]["input_dim"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
params = Params.from_file(self.param_file)
# Make the projection output_dim of the last layer wrong - it should be
# 3, equal to the number of classes.
params["model"]["aggregate_feedforward"]["output_dim"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
def test_decode_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
assert "label" in decode_output_dict
| allennlp-models-main | tests/pair_classification/models/decomposable_attention_test.py |
import numpy
from numpy.testing import assert_almost_equal
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
class TestESIM(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "pair_classification" / "esim" / "experiment.json",
FIXTURES_ROOT / "pair_classification" / "snli.jsonl",
)
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
assert_almost_equal(numpy.sum(output_dict["label_probs"][0].data.numpy(), -1), 1, decimal=6)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_decode_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
assert "label" in decode_output_dict
| allennlp-models-main | tests/pair_classification/models/esim_test.py |
allennlp-models-main | tests/tagging/__init__.py |
|
allennlp-models-main | tests/tagging/dataset_readers/__init__.py |
|
from allennlp.common.util import ensure_list
from allennlp_models.tagging.dataset_readers.ontonotes_ner import OntonotesNamedEntityRecognition
from tests import FIXTURES_ROOT
class TestOntonotesNamedEntityRecognitionReader:
def test_read_from_file(self):
conll_reader = OntonotesNamedEntityRecognition()
instances = conll_reader.read(
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012" / "subdomain"
)
instances = ensure_list(instances)
fields = instances[0].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"Mali",
"government",
"officials",
"say",
"the",
"woman",
"'s",
"confession",
"was",
"forced",
".",
]
assert fields["tags"].labels == ["B-GPE", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O"]
fields = instances[1].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"The",
"prosecution",
"rested",
"its",
"case",
"last",
"month",
"after",
"four",
"months",
"of",
"hearings",
".",
]
assert fields["tags"].labels == [
"O",
"O",
"O",
"O",
"O",
"B-DATE",
"I-DATE",
"O",
"B-DATE",
"I-DATE",
"O",
"O",
"O",
]
fields = instances[2].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == ["Denise", "Dillon", "Headline", "News", "."]
assert fields["tags"].labels == [
"B-PERSON",
"I-PERSON",
"B-WORK_OF_ART",
"I-WORK_OF_ART",
"O",
]
def test_ner_reader_can_filter_by_domain(self):
conll_reader = OntonotesNamedEntityRecognition(domain_identifier="subdomain2")
instances = conll_reader.read(
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012"
)
instances = ensure_list(instances)
assert len(instances) == 1
| allennlp-models-main | tests/tagging/dataset_readers/ontonotes_ner_test.py |
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.vocabulary import Vocabulary
from allennlp_models.tagging import CcgBankDatasetReader
from tests import FIXTURES_ROOT
class TestCcgBankReader(AllenNlpTestCase):
def test_read_from_file(self):
reader = CcgBankDatasetReader(
feature_labels=["modified_pos", "original_pos", "predicate_arg"]
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "tagging" / "ccgbank.txt"))
assert len(instances) == 2
instance = instances[0]
fields = instance.fields
tokens = [token.text for token in fields["tokens"].tokens]
assert tokens == [
"Pierre",
"Vinken",
",",
"61",
"years",
"old",
",",
"will",
"join",
"the",
"board",
"as",
"a",
"nonexecutive",
"director",
"Nov.",
"29",
".",
]
ccg_categories = fields["tags"].labels
assert ccg_categories == [
"N/N",
"N",
",",
"N/N",
"N",
"(S[adj]\\NP)\\NP",
",",
"(S[dcl]\\NP)/(S[b]\\NP)",
"(S[b]\\NP)/NP",
"NP[nb]/N",
"N",
"((S\\NP)\\(S\\NP))/NP",
"NP[nb]/N",
"N/N",
"N",
"((S\\NP)\\(S\\NP))/N[num]",
"N[num]",
".",
]
original_pos_tags = fields["original_pos_tags"].labels
assert original_pos_tags == [
"NNP",
"NNP",
",",
"CD",
"NNS",
"JJ",
",",
"MD",
"VB",
"DT",
"NN",
"IN",
"DT",
"JJ",
"NN",
"NNP",
"CD",
".",
]
modified_pos_tags = fields["modified_pos_tags"].labels
assert modified_pos_tags == [
"NNP",
"NNP",
",",
"CD",
"NNS",
"JJ",
",",
"MD",
"VB",
"DT",
"NN",
"IN",
"DT",
"JJ",
"NN",
"NNP",
"CD",
".",
]
predicate_arg_categories = fields["predicate_arg_tags"].labels
assert predicate_arg_categories == [
"N_73/N_73",
"N",
",",
"N_93/N_93",
"N",
"(S[adj]\\NP_83)\\NP_84",
",",
"(S[dcl]\\NP_10)/(S[b]_11\\NP_10:B)_11",
"(S[b]\\NP)/NP",
"NP[nb]_29/N_29",
"N",
"((S_1\\NP_2)_1\\(S_1\\NP_2)_1)/NP",
"NP[nb]_48/N_48",
"N_43/N_43",
"N",
"((S_61\\NP_56)_61\\(S_61\\NP_56)_61)/N[num]_62",
"N[num]",
".",
]
def test_vocab_from_instances_namespaces(self):
reader = CcgBankDatasetReader(
feature_labels=["modified_pos", "original_pos", "predicate_arg"]
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "tagging" / "ccgbank.txt"))
# check that we didn't clobber the labels namespace
vocab = Vocabulary.from_instances(instances)
assert set(vocab._token_to_index.keys()) == {
"tokens",
"labels",
"modified_pos_tags",
"original_pos_tags",
"predicate_arg_tags",
}
| allennlp-models-main | tests/tagging/dataset_readers/ccgbank_test.py |
import pytest
from allennlp_models.tagging import Conll2000DatasetReader
from allennlp.common.util import ensure_list
from tests import FIXTURES_ROOT
class TestConll2000Reader:
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
@pytest.mark.parametrize("coding_scheme", ("BIO", "BIOUL"))
def test_read_from_file_with_deprecated_parameter(self, coding_scheme):
conll_reader = Conll2000DatasetReader(coding_scheme=coding_scheme)
instances = conll_reader.read(str(FIXTURES_ROOT / "tagging" / "conll2000.txt"))
instances = ensure_list(instances)
assert len(instances) == 2
if coding_scheme == "BIO":
expected_labels = [
"B-NP",
"B-PP",
"B-NP",
"I-NP",
"B-VP",
"I-VP",
"I-VP",
"I-VP",
"I-VP",
"B-NP",
"I-NP",
"I-NP",
"B-SBAR",
"B-NP",
"I-NP",
"B-PP",
"B-NP",
"O",
"B-ADJP",
"B-PP",
"B-NP",
"B-NP",
"O",
"B-VP",
"I-VP",
"I-VP",
"B-NP",
"I-NP",
"I-NP",
"B-PP",
"B-NP",
"I-NP",
"I-NP",
"B-NP",
"I-NP",
"I-NP",
"O",
]
else:
expected_labels = [
"U-NP",
"U-PP",
"B-NP",
"L-NP",
"B-VP",
"I-VP",
"I-VP",
"I-VP",
"L-VP",
"B-NP",
"I-NP",
"L-NP",
"U-SBAR",
"B-NP",
"L-NP",
"U-PP",
"U-NP",
"O",
"U-ADJP",
"U-PP",
"U-NP",
"U-NP",
"O",
"B-VP",
"I-VP",
"L-VP",
"B-NP",
"I-NP",
"L-NP",
"U-PP",
"B-NP",
"I-NP",
"L-NP",
"B-NP",
"I-NP",
"L-NP",
"O",
]
fields = instances[0].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"Confidence",
"in",
"the",
"pound",
"is",
"widely",
"expected",
"to",
"take",
"another",
"sharp",
"dive",
"if",
"trade",
"figures",
"for",
"September",
",",
"due",
"for",
"release",
"tomorrow",
",",
"fail",
"to",
"show",
"a",
"substantial",
"improvement",
"from",
"July",
"and",
"August",
"'s",
"near-record",
"deficits",
".",
]
assert fields["tags"].labels == expected_labels
if coding_scheme == "BIO":
expected_labels = [
"O",
"B-PP",
"B-NP",
"I-NP",
"B-NP",
"I-NP",
"B-NP",
"I-NP",
"I-NP",
"B-PP",
"B-NP",
"I-NP",
"I-NP",
"I-NP",
"B-VP",
"I-VP",
"I-VP",
"I-VP",
"B-NP",
"I-NP",
"B-PP",
"B-NP",
"B-PP",
"B-NP",
"I-NP",
"I-NP",
"O",
]
else:
expected_labels = [
"O",
"U-PP",
"B-NP",
"L-NP",
"B-NP",
"L-NP",
"B-NP",
"I-NP",
"L-NP",
"U-PP",
"B-NP",
"I-NP",
"I-NP",
"L-NP",
"B-VP",
"I-VP",
"I-VP",
"L-VP",
"B-NP",
"L-NP",
"U-PP",
"U-NP",
"U-PP",
"B-NP",
"I-NP",
"L-NP",
"O",
]
fields = instances[1].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"Chancellor",
"of",
"the",
"Exchequer",
"Nigel",
"Lawson",
"'s",
"restated",
"commitment",
"to",
"a",
"firm",
"monetary",
"policy",
"has",
"helped",
"to",
"prevent",
"a",
"freefall",
"in",
"sterling",
"over",
"the",
"past",
"week",
".",
]
assert fields["tags"].labels == expected_labels
@pytest.mark.parametrize("convert_to_coding_scheme", (None, "BIOUL"))
def test_read_from_file(self, convert_to_coding_scheme):
conll_reader = Conll2000DatasetReader(convert_to_coding_scheme=convert_to_coding_scheme)
instances = conll_reader.read(str(FIXTURES_ROOT / "tagging" / "conll2000.txt"))
instances = ensure_list(instances)
assert len(instances) == 2
if convert_to_coding_scheme is None:
expected_labels = [
"B-NP",
"B-PP",
"B-NP",
"I-NP",
"B-VP",
"I-VP",
"I-VP",
"I-VP",
"I-VP",
"B-NP",
"I-NP",
"I-NP",
"B-SBAR",
"B-NP",
"I-NP",
"B-PP",
"B-NP",
"O",
"B-ADJP",
"B-PP",
"B-NP",
"B-NP",
"O",
"B-VP",
"I-VP",
"I-VP",
"B-NP",
"I-NP",
"I-NP",
"B-PP",
"B-NP",
"I-NP",
"I-NP",
"B-NP",
"I-NP",
"I-NP",
"O",
]
else:
expected_labels = [
"U-NP",
"U-PP",
"B-NP",
"L-NP",
"B-VP",
"I-VP",
"I-VP",
"I-VP",
"L-VP",
"B-NP",
"I-NP",
"L-NP",
"U-SBAR",
"B-NP",
"L-NP",
"U-PP",
"U-NP",
"O",
"U-ADJP",
"U-PP",
"U-NP",
"U-NP",
"O",
"B-VP",
"I-VP",
"L-VP",
"B-NP",
"I-NP",
"L-NP",
"U-PP",
"B-NP",
"I-NP",
"L-NP",
"B-NP",
"I-NP",
"L-NP",
"O",
]
fields = instances[0].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"Confidence",
"in",
"the",
"pound",
"is",
"widely",
"expected",
"to",
"take",
"another",
"sharp",
"dive",
"if",
"trade",
"figures",
"for",
"September",
",",
"due",
"for",
"release",
"tomorrow",
",",
"fail",
"to",
"show",
"a",
"substantial",
"improvement",
"from",
"July",
"and",
"August",
"'s",
"near-record",
"deficits",
".",
]
assert fields["tags"].labels == expected_labels
if convert_to_coding_scheme is None:
expected_labels = [
"O",
"B-PP",
"B-NP",
"I-NP",
"B-NP",
"I-NP",
"B-NP",
"I-NP",
"I-NP",
"B-PP",
"B-NP",
"I-NP",
"I-NP",
"I-NP",
"B-VP",
"I-VP",
"I-VP",
"I-VP",
"B-NP",
"I-NP",
"B-PP",
"B-NP",
"B-PP",
"B-NP",
"I-NP",
"I-NP",
"O",
]
else:
expected_labels = [
"O",
"U-PP",
"B-NP",
"L-NP",
"B-NP",
"L-NP",
"B-NP",
"I-NP",
"L-NP",
"U-PP",
"B-NP",
"I-NP",
"I-NP",
"L-NP",
"B-VP",
"I-VP",
"I-VP",
"L-VP",
"B-NP",
"L-NP",
"U-PP",
"U-NP",
"U-PP",
"B-NP",
"I-NP",
"L-NP",
"O",
]
fields = instances[1].fields
tokens = [t.text for t in fields["tokens"].tokens]
assert tokens == [
"Chancellor",
"of",
"the",
"Exchequer",
"Nigel",
"Lawson",
"'s",
"restated",
"commitment",
"to",
"a",
"firm",
"monetary",
"policy",
"has",
"helped",
"to",
"prevent",
"a",
"freefall",
"in",
"sterling",
"over",
"the",
"past",
"week",
".",
]
assert fields["tags"].labels == expected_labels
| allennlp-models-main | tests/tagging/dataset_readers/conll2000_test.py |
from flaky import flaky
import pytest
from allennlp.commands.train import train_model_from_file
from allennlp.common.testing import ModelTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.models import Model
from tests import FIXTURES_ROOT
class CrfTaggerTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "tagging" / "crf_tagger" / "experiment.json",
FIXTURES_ROOT / "tagging" / "conll2003.txt",
)
def test_simple_tagger_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_simple_tagger_can_train_save_and_load_ccgbank(self):
self.ensure_model_can_train_save_and_load(
FIXTURES_ROOT / "tagging" / "crf_tagger" / "experiment_ccgbank.json"
)
def test_simple_tagger_can_train_save_and_conll2000(self):
self.ensure_model_can_train_save_and_load(
FIXTURES_ROOT / "tagging" / "crf_tagger" / "experiment_conll2000.json"
)
@flaky
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
tags = output_dict["tags"]
assert len(tags) == 2
assert len(tags[0]) == 7
assert len(tags[1]) == 7
for example_tags in tags:
for tag_id in example_tags:
tag = self.model.vocab.get_token_from_index(tag_id, namespace="labels")
assert tag in {"O", "I-ORG", "I-PER", "I-LOC"}
def test_low_loss_for_pretrained_transformers(self):
self.set_up_model(
FIXTURES_ROOT / "tagging" / "crf_tagger" / "experiment_albert.json",
FIXTURES_ROOT / "tagging" / "conll2003.txt",
)
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
assert output_dict["loss"] < 50
def test_forward_pass_top_k(self):
training_tensors = self.dataset.as_tensor_dict()
self.model.top_k = 5
output_dict = self.model.make_output_human_readable(self.model(**training_tensors))
top_k_tags = [[x["tags"] for x in item_topk] for item_topk in output_dict["top_k_tags"]]
first_choices = [x[0] for x in top_k_tags]
assert first_choices == output_dict["tags"]
lengths = [len(x) for x in top_k_tags]
assert set(lengths) == {5}
tags_used = set(
tag for item_top_k in top_k_tags for tag_seq in item_top_k for tag in tag_seq
)
assert all(tag in {"O", "I-ORG", "I-PER", "I-LOC"} for tag in tags_used)
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the encoder wrong - it should be 2 to match
# the embedding dimension from the text_field_embedder.
params["model"]["encoder"]["input_size"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
def test_token_based_verbose_metrics(self):
training_tensors = self.dataset.as_tensor_dict()
save_dir = self.TEST_DIR / "save_and_load_test"
model = train_model_from_file(
self.param_file,
save_dir,
overrides={
"model.calculate_span_f1": False,
"model.verbose_metrics": True,
},
force=True,
return_model=True,
)
model(**training_tensors)
metrics = model.get_metrics()
# assert that metrics contain all verbose keys
for tag in ["O", "I-PER", "I-ORG", "I-LOC", "micro", "macro", "weighted"]:
for m in ["precision", "recall", "fscore"]:
assert f"{tag}-{m}" in metrics
| allennlp-models-main | tests/tagging/models/crf_tagger_test.py |
allennlp-models-main | tests/tagging/models/__init__.py |
|
from flaky import flaky
import pytest
from allennlp.commands.train import train_model_from_file
from allennlp.common.testing import ModelTestCase
from allennlp.common.checks import ConfigurationError
from tests import FIXTURES_ROOT
class CrfTaggerLabelWeightsTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "tagging" / "crf_tagger" / "experiment.json",
FIXTURES_ROOT / "tagging" / "conll2003.txt",
)
def test_label_weights_effectiveness(self):
training_tensors = self.dataset.as_tensor_dict()
save_dir = self.TEST_DIR / "save_and_load_test"
# original CRF
output_dict_original = self.model(**training_tensors)
# weighted CRF
model_weighted = train_model_from_file(
self.param_file,
save_dir,
overrides={"model.label_weights": {"I-ORG": 10.0}},
force=True,
return_model=True,
)
output_dict_weighted = model_weighted(**training_tensors)
# assert that logits are substantially different
assert (
output_dict_weighted["logits"].isclose(output_dict_original["logits"]).sum()
< output_dict_original["logits"].numel() / 2
)
def test_label_weights_effectiveness_emission_transition(self):
training_tensors = self.dataset.as_tensor_dict()
save_dir = self.TEST_DIR / "save_and_load_test"
# original CRF
output_dict_original = self.model(**training_tensors)
# weighted CRF
model_weighted = train_model_from_file(
self.param_file,
save_dir,
overrides={
"model.label_weights": {"I-ORG": 10.0},
"model.weight_strategy": "emission_transition",
},
force=True,
return_model=True,
)
output_dict_weighted = model_weighted(**training_tensors)
# assert that logits are substantially different
assert (
output_dict_weighted["logits"].isclose(output_dict_original["logits"]).sum()
< output_dict_original["logits"].numel() / 2
)
def test_label_weights_effectiveness_lannoy(self):
training_tensors = self.dataset.as_tensor_dict()
save_dir = self.TEST_DIR / "save_and_load_test"
# original CRF
output_dict_original = self.model(**training_tensors)
# weighted CRF
model_weighted = train_model_from_file(
self.param_file,
save_dir,
overrides={
"model.label_weights": {"I-ORG": 10.0},
"model.weight_strategy": "lannoy",
},
force=True,
return_model=True,
)
output_dict_weighted = model_weighted(**training_tensors)
# assert that logits are substantially different
assert (
output_dict_weighted["logits"].isclose(output_dict_original["logits"]).sum()
< output_dict_original["logits"].numel() / 2
)
def test_config_error_invalid_label(self):
save_dir = self.TEST_DIR / "save_and_load_test"
with pytest.raises(ConfigurationError):
train_model_from_file(
self.param_file,
save_dir,
overrides={"model.label_weights": {"BLA": 10.0}},
force=True,
return_model=True,
)
def test_config_error_strategy_without_weights(self):
save_dir = self.TEST_DIR / "save_and_load_test"
with pytest.raises(ConfigurationError):
train_model_from_file(
self.param_file,
save_dir,
overrides={"model.weight_strategy": "emission"},
force=True,
return_model=True,
)
def test_config_error_invalid_strategy(self):
save_dir = self.TEST_DIR / "save_and_load_test"
with pytest.raises(ConfigurationError):
train_model_from_file(
self.param_file,
save_dir,
overrides={
"model.label_weights": {"I-ORG": 10.0},
"model.weight_strategy": "invalid",
},
force=True,
return_model=True,
)
| allennlp-models-main | tests/tagging/models/crf_tagger_label_weights_test.py |
from nltk import Tree
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.common.ontonotes import Ontonotes
from tests import FIXTURES_ROOT
CONLL_PATH = FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012"
class TestOntonotes(AllenNlpTestCase):
def test_dataset_iterator(self):
reader = Ontonotes()
annotated_sentences = list(reader.dataset_iterator(CONLL_PATH / "subdomain"))
annotation = annotated_sentences[0]
assert annotation.document_id == "test/test/01/test_001"
assert annotation.sentence_id == 0
assert annotation.words == [
"Mali",
"government",
"officials",
"say",
"the",
"woman",
"'s",
"confession",
"was",
"forced",
".",
]
assert annotation.pos_tags == [
"NNP",
"NN",
"NNS",
"VBP",
"DT",
"NN",
"POS",
"NN",
"VBD",
"JJ",
".",
]
assert annotation.word_senses == [None, None, 1, 1, None, 2, None, None, 1, None, None]
assert annotation.predicate_framenet_ids == [
None,
None,
None,
"01",
None,
None,
None,
None,
"01",
None,
None,
]
assert annotation.srl_frames == [
(
"say",
[
"B-ARG0",
"I-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
],
),
(
"was",
["O", "O", "O", "O", "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "B-V", "B-ARG2", "O"],
),
]
assert annotation.named_entities == [
"B-GPE",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
]
assert annotation.predicate_lemmas == [
None,
None,
"official",
"say",
None,
"man",
None,
None,
"be",
None,
None,
]
assert annotation.speakers == [
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
]
assert annotation.parse_tree == Tree.fromstring(
"(TOP(S(NP(NML (NNP Mali) (NN government) )"
" (NNS officials) )(VP (VBP say) (SBAR(S(NP(NP"
" (DT the) (NN woman) (POS 's) ) (NN "
"confession) )(VP (VBD was) (ADJP (JJ "
"forced) ))))) (. .) ))"
)
assert annotation.coref_spans == {(1, (4, 6)), (3, (4, 7))}
annotation = annotated_sentences[1]
assert annotation.document_id == "test/test/02/test_002"
assert annotation.sentence_id == 0
assert annotation.words == [
"The",
"prosecution",
"rested",
"its",
"case",
"last",
"month",
"after",
"four",
"months",
"of",
"hearings",
".",
]
assert annotation.pos_tags == [
"DT",
"NN",
"VBD",
"PRP$",
"NN",
"JJ",
"NN",
"IN",
"CD",
"NNS",
"IN",
"NNS",
".",
]
assert annotation.word_senses == [
None,
2,
5,
None,
2,
None,
None,
None,
None,
1,
None,
1,
None,
]
assert annotation.predicate_framenet_ids == [
None,
None,
"01",
None,
None,
None,
None,
None,
None,
None,
None,
"01",
None,
]
assert annotation.srl_frames == [
(
"rested",
[
"B-ARG0",
"I-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"B-ARGM-TMP",
"I-ARGM-TMP",
"B-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"I-ARGM-TMP",
"O",
],
),
("hearings", ["O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "B-V", "O"]),
]
assert annotation.named_entities == [
"O",
"O",
"O",
"O",
"O",
"B-DATE",
"I-DATE",
"O",
"B-DATE",
"I-DATE",
"O",
"O",
"O",
]
assert annotation.predicate_lemmas == [
None,
"prosecution",
"rest",
None,
"case",
None,
None,
None,
None,
"month",
None,
"hearing",
None,
]
assert annotation.speakers == [
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
]
assert annotation.parse_tree == Tree.fromstring(
"(TOP(S(NP (DT The) (NN prosecution) )(VP "
"(VBD rested) (NP (PRP$ its) (NN case) )"
"(NP (JJ last) (NN month) )(PP (IN after) "
"(NP(NP (CD four) (NNS months) )(PP (IN"
" of) (NP (NNS hearings) ))))) (. .) ))"
)
assert annotation.coref_spans == {(2, (0, 1)), (2, (3, 3))}
# Check we can handle sentences without verbs.
annotation = annotated_sentences[2]
assert annotation.document_id == "test/test/03/test_003"
assert annotation.sentence_id == 0
assert annotation.words == ["Denise", "Dillon", "Headline", "News", "."]
assert annotation.pos_tags == ["NNP", "NNP", "NNP", "NNP", "."]
assert annotation.word_senses == [None, None, None, None, None]
assert annotation.predicate_framenet_ids == [None, None, None, None, None]
assert annotation.srl_frames == []
assert annotation.named_entities == [
"B-PERSON",
"I-PERSON",
"B-WORK_OF_ART",
"I-WORK_OF_ART",
"O",
]
assert annotation.predicate_lemmas == [None, None, None, None, None]
assert annotation.speakers == [None, None, None, None, None]
assert annotation.parse_tree == Tree.fromstring(
"(TOP(FRAG(NP (NNP Denise) "
" (NNP Dillon) )(NP (NNP Headline) "
"(NNP News) ) (. .) ))"
)
assert annotation.coref_spans == {(2, (0, 1))}
# Check we can handle sentences with 2 identical verbs.
annotation = annotated_sentences[3]
assert annotation.document_id == "test/test/04/test_004"
assert annotation.sentence_id == 0
assert annotation.words == [
"and",
"that",
"wildness",
"is",
"still",
"in",
"him",
",",
"as",
"it",
"is",
"with",
"all",
"children",
".",
]
assert annotation.pos_tags == [
"CC",
"DT",
"NN",
"VBZ",
"RB",
"IN",
"PRP",
",",
"IN",
"PRP",
"VBZ",
"IN",
"DT",
"NNS",
".",
]
assert annotation.word_senses == [
None,
None,
None,
4.0,
None,
None,
None,
None,
None,
None,
5.0,
None,
None,
None,
None,
]
assert annotation.predicate_framenet_ids == [
None,
None,
None,
"01",
None,
None,
None,
None,
None,
None,
"01",
None,
None,
None,
None,
]
assert annotation.srl_frames == [
(
"is",
[
"B-ARGM-DIS",
"B-ARG1",
"I-ARG1",
"B-V",
"B-ARGM-TMP",
"B-ARG2",
"I-ARG2",
"O",
"B-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"O",
],
),
(
"is",
[
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"B-ARG1",
"B-V",
"B-ARG2",
"I-ARG2",
"I-ARG2",
"O",
],
),
]
assert annotation.named_entities == [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
]
assert annotation.predicate_lemmas == [
None,
None,
None,
"be",
None,
None,
None,
None,
None,
None,
"be",
None,
None,
None,
None,
]
assert annotation.speakers == [
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
"_Avalon_",
]
assert annotation.parse_tree == Tree.fromstring(
"(TOP (S (CC and) (NP (DT that) (NN wildness)) "
"(VP (VBZ is) (ADVP (RB still)) (PP (IN in) (NP "
"(PRP him))) (, ,) (SBAR (IN as) (S (NP (PRP it)) "
"(VP (VBZ is) (PP (IN with) (NP (DT all) (NNS "
"children))))))) (. .)))"
)
assert annotation.coref_spans == {(14, (6, 6))}
def test_dataset_path_iterator(self):
reader = Ontonotes()
files = list(reader.dataset_path_iterator(CONLL_PATH))
expected_paths = [
str(CONLL_PATH / "subdomain" / "example.gold_conll"),
str(CONLL_PATH / "subdomain2" / "example.gold_conll"),
]
assert len(files) == len(expected_paths)
assert set(files) == set(expected_paths)
def test_ontonotes_can_read_conll_file_with_multiple_documents(self):
reader = Ontonotes()
file_path = FIXTURES_ROOT / "coref" / "coref.gold_conll"
documents = list(reader.dataset_document_iterator(file_path))
assert len(documents) == 4
| allennlp-models-main | tests/common/ontonotes_test.py |
import pytest
from allennlp.common.util import ensure_list
from allennlp_models.mc.dataset_readers.commonsenseqa import CommonsenseQaReader
from tests import FIXTURES_ROOT
class TestCommonsenseQaReader:
def test_read_from_file(self):
reader = CommonsenseQaReader(transformer_model_name="bert-base-uncased")
instances = ensure_list(reader.read(FIXTURES_ROOT / "mc" / "commonsenseqa.jsonl"))
assert len(instances) == 10
instance = instances[0]
assert len(instance.fields["alternatives"]) == 5
alternative = instance.fields["alternatives"][0]
token_text = [t.text for t in alternative.tokens]
token_type_ids = [t.type_id for t in alternative.tokens]
assert token_text[:3] == ["[CLS]", "a", "revolving"]
assert token_type_ids[:3] == [0, 0, 0]
assert token_text[-3:] == ["[SEP]", "bank", "[SEP]"]
assert token_type_ids[-3:] == [0, 1, 1]
assert instance.fields["correct_alternative"] == 0
def test_length_limit_works(self):
length_limit = 20
reader = CommonsenseQaReader(
transformer_model_name="bert-base-uncased", length_limit=length_limit
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "mc" / "commonsenseqa.jsonl"))
assert len(instances) == 10
for instance in instances:
for alternative in instance.fields["alternatives"]:
assert len(alternative) <= length_limit
| allennlp-models-main | tests/mc/dataset_readers/commonsenseqa_test.py |
import pytest
from allennlp.common.util import ensure_list
from allennlp_models.mc.dataset_readers.piqa import PiqaReader
from tests import FIXTURES_ROOT
class TestCommonsenseQaReader:
def test_read_from_file(self):
reader = PiqaReader(transformer_model_name="bert-base-uncased")
instances = ensure_list(reader.read(str(FIXTURES_ROOT / "mc" / "piqa.jsonl")))
assert len(instances) == 10
instance = instances[0]
assert len(instance.fields["alternatives"]) == 2
alternative = instance.fields["alternatives"][0]
token_text = [t.text for t in alternative.tokens]
token_type_ids = [t.type_id for t in alternative.tokens]
assert token_text[:3] == ["[CLS]", "how", "do"]
assert token_type_ids[:3] == [0, 0, 0]
assert token_text[-3:] == ["dish", ".", "[SEP]"]
assert token_type_ids[-3:] == [1, 1, 1]
assert instance.fields["correct_alternative"] == 0
def test_length_limit_works(self):
length_limit = 20
reader = PiqaReader(transformer_model_name="bert-base-uncased", length_limit=length_limit)
instances = ensure_list(reader.read(str(FIXTURES_ROOT / "mc" / "piqa.jsonl")))
assert len(instances) == 10
for instance in instances:
for alternative in instance.fields["alternatives"]:
assert len(alternative) <= length_limit
| allennlp-models-main | tests/mc/dataset_readers/piqa_test.py |
import pytest
from allennlp.common.util import ensure_list
from allennlp_models.mc.dataset_readers.swag import SwagReader
from tests import FIXTURES_ROOT
class TestSwagReader:
def test_read_from_file(self):
reader = SwagReader(transformer_model_name="bert-base-uncased")
instances = ensure_list(reader.read(FIXTURES_ROOT / "mc" / "swag.csv"))
assert len(instances) == 11
instance = instances[0]
assert len(instance.fields["alternatives"]) == 4
alternative = instance.fields["alternatives"][0]
token_text = [t.text for t in alternative.tokens]
token_type_ids = [t.type_id for t in alternative.tokens]
assert token_text[:3] == ["[CLS]", "students", "lower"]
assert token_type_ids[:3] == [0, 0, 0]
assert token_text[-3:] == ["someone", ".", "[SEP]"]
assert token_type_ids[-3:] == [1, 1, 1]
assert instance.fields["correct_alternative"] == 2
def test_length_limit_works(self):
length_limit = 20
reader = SwagReader(transformer_model_name="bert-base-uncased", length_limit=length_limit)
instances = ensure_list(reader.read(FIXTURES_ROOT / "mc" / "swag.csv"))
assert len(instances) == 11
for instance in instances:
for alternative in instance.fields["alternatives"]:
assert len(alternative) <= length_limit
| allennlp-models-main | tests/mc/dataset_readers/swag_test.py |
from flaky import flaky
from allennlp.commands.train import train_model_from_file
from allennlp.common.testing import ModelTestCase, AllenNlpTestCase, requires_gpu
from allennlp.data import Batch
from tests import FIXTURES_ROOT
import pytest
import allennlp_models.mc.models
class TransformerMcTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "mc" / "transformer_mc" / "experiment.jsonnet",
FIXTURES_ROOT / "mc" / "piqa.jsonl",
)
def test_model_can_train_save_and_load(self):
# While the model uses a pooler, it does not use the pooler that comes with the token embedder.
self.ensure_model_can_train_save_and_load(
self.param_file,
gradients_to_ignore={
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.weight",
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.bias",
# Due to numerical instability, this scalar tensor might sometimes
# have zero gradient.
"_linear_layer.bias",
},
)
@flaky(max_runs=3)
def test_forward_pass_runs_correctly(self):
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
# The following asserts assume that we get a fair mix of answers, some 0, some 1, some correct, and some
# incorrect. If the model was completely un-initialized, the chance of these checks failing randomly is
# 1/1024, and there are three of them. But the model is not completely uninitialized (in fact, it contains
# no random weights), so we know these asserts pass. We still mark the test as flaky because random
# drop-out could mess things up.
assert output_dict["best_alternative"].min() == 0
assert output_dict["best_alternative"].max() == 1
metrics = self.model.get_metrics(reset=True)
assert metrics["acc"] > 0
@requires_gpu
class TransformerMcMixedPrecisionTest(AllenNlpTestCase):
def test_model_can_train_save_and_load_with_mixed_precision(self):
train_model_from_file(
FIXTURES_ROOT / "mc" / "transformer_mc" / "experiment.jsonnet",
self.TEST_DIR,
overrides="{'trainer.use_amp':true,'trainer.cuda_device':0}",
)
| allennlp-models-main | tests/mc/models/transformer_mc_test.py |
# These test should really be in the core repo, but they are here because the multitask model is
# here.
import json
import os
import pathlib
import shutil
import sys
import tempfile
import pytest
from allennlp.commands import main
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from tests import FIXTURES_ROOT
class TestMultitaskPredict(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.classifier_model_path = FIXTURES_ROOT / "vision" / "vilbert_multitask" / "model.tar.gz"
self.classifier_data_path = FIXTURES_ROOT / "vision" / "vilbert_multitask" / "dataset.json"
self.tempdir = pathlib.Path(tempfile.mkdtemp())
self.infile = self.tempdir / "inputs.txt"
self.outfile = self.tempdir / "outputs.txt"
def test_works_with_multitask_model(self):
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
str(self.classifier_data_path),
"--output-file",
str(self.outfile),
"--silent",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 3
for result in results:
assert "vqa_best_answer" in result.keys() or "ve_entailment_answer" in result.keys()
shutil.rmtree(self.tempdir)
def test_using_dataset_reader_works_with_specified_multitask_head(self):
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
"unittest", # "path" of the input data, but it's not really a path for VQA
"--output-file",
str(self.outfile),
"--silent",
"--use-dataset-reader",
"--multitask-head",
"vqa",
]
main()
assert os.path.exists(self.outfile)
with open(self.outfile, "r") as f:
results = [json.loads(line) for line in f]
assert len(results) == 3
for result in results:
assert "vqa_best_answer" in result.keys()
shutil.rmtree(self.tempdir)
def test_using_dataset_reader_fails_with_missing_parameter(self):
sys.argv = [
"__main__.py", # executable
"predict", # command
str(self.classifier_model_path),
"unittest", # "path" of the input data, but it's not really a path for VQA
"--output-file",
str(self.outfile),
"--silent",
"--use-dataset-reader",
]
with pytest.raises(ConfigurationError):
main()
| allennlp-models-main | tests/commands/multitask_predict_test.py |
allennlp-models-main | tests/rc/__init__.py |
|
from allennlp.common.testing import (
AllenNlpTestCase,
global_distributed_metric,
run_distributed_test,
)
from allennlp_models.rc.metrics import DropEmAndF1
class DropEmAndF1Test(AllenNlpTestCase):
def test_drop_em_and_f1(self):
metric = DropEmAndF1()
metric(
"this is the best span", [{"spans": ["this is a good span", "something irrelevant"]}]
)
exact_match, f1_score = metric.get_metric()
assert exact_match == 0.0
assert f1_score == 0.38
def test_distributed_drop_em_and_f1(self):
prediction = ["this is the best span", "this is another span"]
ground_truths = [
[{"spans": ["this is a good span", "something irrelevant"]}],
[{"spans": ["this is another span"]}],
]
metric_kwargs = {"prediction": prediction, "ground_truths": ground_truths}
desired_values = (1 / 2, 1.38 / 2)
run_distributed_test(
[-1, -1],
global_distributed_metric,
DropEmAndF1(),
metric_kwargs,
desired_values,
exact=True,
)
| allennlp-models-main | tests/rc/metrics/drop_em_and_f1_test.py |
from allennlp.common.testing import (
AllenNlpTestCase,
global_distributed_metric,
run_distributed_test,
)
from allennlp_models.rc.metrics import SquadEmAndF1
class SquadEmAndF1Test(AllenNlpTestCase):
def test_squad_em_and_f1(self):
metric = SquadEmAndF1()
metric("this is the best span", ["this is a good span", "something irrelevant"])
exact_match, f1_score = metric.get_metric()
assert exact_match == 0.0
assert f1_score == 0.75
def test_distributed_squad_em_and_f1(self):
best_span_strings = ["this is the best span", "this is another span"]
answer_strings = [
["this is a good span", "something irrelevant"],
["this is another span", "this one is less perfect"],
]
metric_kwargs = {"best_span_strings": best_span_strings, "answer_strings": answer_strings}
desired_values = (1 / 2, 1.75 / 2)
run_distributed_test(
[-1, -1],
global_distributed_metric,
SquadEmAndF1(),
metric_kwargs,
desired_values,
exact=True,
)
| allennlp-models-main | tests/rc/metrics/squad_em_and_f1_test.py |
import pytest
from allennlp.confidence_checks.task_checklists.question_answering_suite import (
QuestionAnsweringSuite,
)
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp_models.rc.predictors import * # noqa: F403
from tests import FIXTURES_ROOT
class TestQuestionAnsweringSuite(AllenNlpTestCase):
@pytest.mark.parametrize(
"model",
[
"bidaf",
],
)
def test_run(self, model: str):
archive = load_archive(FIXTURES_ROOT / "rc" / model / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive)
data = [
("Alice is taller than Bob.", "Who is taller?"),
("Children were playing in the park.", "Was the park empty?"),
]
suite = QuestionAnsweringSuite(context_key="passage", add_default_tests=True, data=data)
suite.run(predictor, max_examples=10)
| allennlp-models-main | tests/rc/task_checklists/question_answering_suite_test.py |
allennlp-models-main | tests/rc/task_checklists/__init__.py |
|
import os
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.rc.tools import quoref
from tests import FIXTURES_ROOT
class TestQuorefEval(AllenNlpTestCase):
"""
The actual evaluation logic in Quoref's evaluation script is from DROP's script, and the
only additional thing that Quoref's script does is handling the data properly. So this class only tests the
data handling aspects. The tests we have for DROP are fairly comprehensive.
"""
def test_quoref_eval_with_original_data_format(self):
predictions_file = FIXTURES_ROOT / "rc" / "quoref_sample_predictions.json"
gold_file = FIXTURES_ROOT / "rc" / "quoref_sample.json"
metrics = quoref.evaluate_prediction_file(predictions_file, gold_file)
assert metrics == (0.5, 0.625)
def test_quoref_eval_with_simple_format(self):
predictions_file = FIXTURES_ROOT / "rc" / "quoref_sample_predictions.json"
gold_file = FIXTURES_ROOT / "rc" / "quoref_sample_predictions.json"
metrics = quoref.evaluate_prediction_file(predictions_file, gold_file)
assert metrics == (1.0, 1.0)
def test_quoref_eval_script(self):
predictions_file = FIXTURES_ROOT / "rc" / "quoref_sample_predictions.json"
gold_file = FIXTURES_ROOT / "rc" / "quoref_sample.json"
result = os.system(
f"python -m allennlp_models.rc.tools.quoref --gold_path {gold_file} "
f"--prediction_path {predictions_file} --output_path /tmp/output.json"
)
assert result == 0
| allennlp-models-main | tests/rc/evaluations/quoref_test.py |
allennlp-models-main | tests/rc/evaluations/__init__.py |
|
import io
from contextlib import redirect_stdout
from allennlp_models.rc.tools.drop import _normalize_answer, get_metrics, evaluate_json
class TestDropEvalNormalize:
def test_number_parse(self):
assert _normalize_answer("12.0") == _normalize_answer("12.0 ")
assert _normalize_answer("12.0") == _normalize_answer("12.000")
assert _normalize_answer("12.0") == _normalize_answer("12")
assert _normalize_answer("12.0") == _normalize_answer(" 1.2e1 ")
def test_punctations(self):
assert _normalize_answer("12.0 persons") == "12.0 persons"
assert _normalize_answer("S.K. Singh") == "sk singh"
class TestDropEvalGetMetrics:
def test_float_numbers(self):
assert get_metrics(["78"], ["78.0"]) == (1.0, 1.0)
def test_metric_is_length_aware(self):
# Overall F1 should be mean([1.0, 0.0])
assert get_metrics(predicted=["td"], gold=["td", "td"]) == (0.0, 0.5)
assert get_metrics("td", ["td", "td"]) == (0.0, 0.5)
# Overall F1 should be mean ([1.0, 0.0]) = 0.5
assert get_metrics(predicted=["td", "td"], gold=["td"]) == (0.0, 0.5)
assert get_metrics(predicted=["td", "td"], gold="td") == (0.0, 0.5)
# F1 score is mean([0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert get_metrics(
predicted=["the", "fat", "cat", "the fat", "fat cat", "the fat cat"], gold=["cat"]
) == (0.0, 0.17)
assert get_metrics(
predicted=["cat"], gold=["the", "fat", "cat", "the fat", "fat cat", "the fat cat"]
) == (0.0, 0.17)
# F1 score is mean([1.0, 0.5, 0.0, 0.0, 0.0, 0.0])
assert get_metrics(
predicted=["the", "fat", "cat", "the fat", "fat cat", "the fat cat"],
gold=["cat", "cat dog"],
) == (0.0, 0.25)
def test_articles_are_ignored(self):
assert get_metrics(["td"], ["the td"]) == (1.0, 1.0)
assert get_metrics(["the a NOT an ARTICLE the an a"], ["NOT ARTICLE"]) == (1.0, 1.0)
def test_f1_ignores_word_order(self):
assert get_metrics(["John Elton"], ["Elton John"]) == (0.0, 1.0)
assert get_metrics(["50 yard"], ["yard 50"]) == (0.0, 1.0)
assert get_metrics(["order word right"], ["right word order"]) == (0.0, 1.0)
def test_periods_commas_and_spaces_are_ignored(self):
assert get_metrics(["Per.i.o.d...."], [".P....e.r,,i;;;o...d,,"]) == (1.0, 1.0)
assert get_metrics(["Spa c e s "], [" Spa c e s"]) == (1.0, 1.0)
def test_splitting_on_hyphens(self):
assert get_metrics(["78-yard"], ["78 yard"]) == (1.0, 1.0)
assert get_metrics(["78 yard"], ["78-yard"]) == (1.0, 1.0)
assert get_metrics(["78"], ["78-yard"]) == (0.0, 0.67)
assert get_metrics(["78-yard"], ["78"]) == (0.0, 0.67)
def test_casing_is_ignored(self):
assert get_metrics(["This was a triumph"], ["tHIS Was A TRIUMPH"]) == (1.0, 1.0)
def test_overlap_in_correct_cases(self):
assert get_metrics(["Green bay packers"], ["Green bay packers"]) == (1.0, 1.0)
assert get_metrics(["Green bay", "packers"], ["Green bay", "packers"]) == (1.0, 1.0)
assert get_metrics(["Green", "bay", "packers"], ["Green", "bay", "packers"]) == (1.0, 1.0)
def test_simple_overlap_in_incorrect_cases(self):
assert get_metrics([""], ["army"]) == (0.0, 0.0)
assert get_metrics(["packers"], ["Green bay packers"]) == (0.0, 0.5)
assert get_metrics(["packers"], ["Green bay"]) == (0.0, 0.0)
# if the numbers in the span don't match f1 is 0
assert get_metrics(["yard"], ["36 yard td"]) == (0.0, 0.0)
assert get_metrics(["23 yards"], ["43 yards"]) == (0.0, 0.0)
# however, if number matches its not given extra weight over the non-functional words
assert get_metrics(["56 yards"], ["56 yd"]) == (0.0, 0.5)
assert get_metrics(["26"], ["26 yard td"]) == (0.0, 0.5)
def test_multi_span_overlap_in_incorrect_cases(self):
# only consider bags with matching numbers if they are present
# F1 scores of: 1.0 2/3 0.0 0.0 0.0 0.0
# Average them to get F1 of 0.28
assert get_metrics(
["78-yard", "56", "28", "40", "44", "touchdown"],
["78-yard", "56 yard", "1 yard touchdown"],
) == (0.0, 0.28)
# two copies of same value will account for only one match (using optimal 1-1 bag alignment)
assert get_metrics(["23", "23 yard"], ["23-yard", "56 yards"]) == (0.0, 0.5)
# matching done at individual span level and not pooled into one global bag
assert get_metrics(["John Karman", "Joe Hardy"], ["Joe Karman", "John Hardy"]) == (0.0, 0.5)
# macro-averaging F1 over spans
assert get_metrics(
["ottoman", "Kantakouzenous"], ["ottoman", "army of Kantakouzenous"]
) == (0.0, 0.75)
def test_order_invariance(self):
assert get_metrics(["a"], ["a", "b"]) == (0, 0.5)
assert get_metrics(["b"], ["a", "b"]) == (0, 0.5)
assert get_metrics(["b"], ["b", "a"]) == (0, 0.5)
class TestDropEvalFunctional:
def test_json_loader(self):
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"number": "1"},
"validated_answers": [{"number": "0"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "1"}
assert evaluate_json(annotation, prediction) == (1.0, 1.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["2"]},
"validated_answers": [{"number": "2"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "2"}
assert evaluate_json(annotation, prediction) == (1.0, 1.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["0"]},
"validated_answers": [{"number": "1"}, {"number": "2"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "1"}
assert evaluate_json(annotation, prediction) == (1.0, 1.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"date": {"day": "17", "month": "August", "year": ""}},
"validated_answers": [{"spans": ["August"]}, {"number": "17"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "17 August"}
assert evaluate_json(annotation, prediction) == (1.0, 1.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["span1", "span2"]},
"validated_answers": [{"spans": ["span2"]}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "span1"}
assert evaluate_json(annotation, prediction) == (0.0, 0.5)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["1"]},
"validated_answers": [{"number": "0"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid0": "2"}
assert evaluate_json(annotation, prediction) == (0.0, 0.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["answer1"]},
"validated_answers": [{"spans": ["answer2"]}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "answer"}
assert evaluate_json(annotation, prediction) == (0.0, 0.0)
annotation = {
"pid1": {
"qa_pairs": [
{"answer": {"spans": ["answer1"]}, "query_id": "qid1"},
{"answer": {"spans": ["answer2"]}, "query_id": "qid2"},
]
}
}
prediction = {"qid1": "answer", "qid2": "answer2"}
assert evaluate_json(annotation, prediction) == (0.5, 0.5)
def test_type_partition_output(self):
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"number": "5"},
"validated_answers": [{"spans": ["7-meters"]}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "5-yard"}
with io.StringIO() as buf, redirect_stdout(buf):
evaluate_json(annotation, prediction)
output = buf.getvalue()
lines = output.strip().split("\n")
assert lines[4] == "number: 1 (100.00%)"
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["7-meters"]},
"validated_answers": [{"number": "5"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "5-yard"}
with io.StringIO() as buf, redirect_stdout(buf):
evaluate_json(annotation, prediction)
output = buf.getvalue()
lines = output.strip().split("\n")
assert lines[4] == "number: 1 (100.00%)"
| allennlp-models-main | tests/rc/evaluations/drop_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.