python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# pylint: disable=no-self-use,invalid-name
from allennlp_models.rc.tools.squad import normalize_answer as _normalize_answer_squad
from allennlp_models.rc.tools.orb_utils import get_metric_squad, get_metric_drop
from allennlp_models.rc.tools.narrativeqa import get_metric_score as get_metric_narrativeqa
from tests import FIXTURES_ROOT
import os
class TestSQUAD1:
def test_spaces_are_ignored(self):
assert _normalize_answer_squad("abcd") == _normalize_answer_squad("abcd ")
assert _normalize_answer_squad("abcd") == _normalize_answer_squad(" abcd ")
assert _normalize_answer_squad(" ABCD") == _normalize_answer_squad("ABCD")
def test_punctations_are_ignored(self):
assert _normalize_answer_squad("T.J Howard") == _normalize_answer_squad("tj howard")
assert _normalize_answer_squad("7802") == _normalize_answer_squad("78.02")
def test_articles_are_ignored(self):
assert get_metric_squad("td", ["the td"]) == (1.0, 1.0)
assert get_metric_squad("the a NOT an ARTICLE the an a", ["NOT ARTICLE"]) == (1.0, 1.0)
def test_casing_is_ignored(self):
assert get_metric_squad("This was a triumph", ["tHIS Was A TRIUMPH"]) == (1.0, 1.0)
class TestDROP:
def test_articles_are_ignored(self):
assert get_metric_drop("td", ["the td"]) == (1.0, 1.0)
assert get_metric_drop("the a NOT an ARTICLE the an a", ["NOT ARTICLE"]) == (1.0, 1.0)
def test_casing_is_ignored(self):
assert get_metric_drop("This was a triumph", ["tHIS Was A TRIUMPH"]) == (1.0, 1.0)
def test_long_answers(self):
assert (
get_metric_drop(
"David Thomas",
[
"Thomas David Arquette Thomas David Arquette Thomas \
David Arquette Thomas David Arquette"
],
)
== (0.0, 0.8)
)
def test_span_order_is_ignored(self):
assert get_metric_drop(["athlete", "unprofessional"], [["unprofessional", "athlete"]]) == (
1.0,
1.0,
)
assert get_metric_drop(
["algebra", "arithmetic"], [["arithmetic", "algebra", "geometry"]]
) == (0.0, 0.67)
def test_word_order_is_not_ignored(self):
assert get_metric_drop(["athlete unprofessional"], [["unprofessional athlete"]]) == (
0.0,
1.0,
)
def test_bag_alignment_is_optimal(self):
assert get_metric_drop(
["Thomas Jefferson", "Thomas Davidson Arquette"], [["David Thomas", "Thomas Jefferson"]]
) == (0.0, 0.7)
assert get_metric_drop(
["Thomas David Arquette"], [["David Thomas", "Thomas Jefferson"]]
) == (0.0, 0.4)
def test_multiple_gold_spans(self):
assert get_metric_drop(
["Thomas David Arquette"],
[["David Thomas"], ["Thomas Jefferson"], ["David Thomas"], ["Thomas David"]],
) == (0.0, 0.8)
def test_long_gold_spans(self):
assert get_metric_drop(
["Thomas David Arquette"], [["David Thomas was eating an apple and fell to the ground"]]
) == (0.0, 0.33)
class TestNarrativeQA:
def test_ngrams(self):
assert get_metric_narrativeqa(
"David Thomas was eating an apple",
["David Thomas was eating an apple and fell to the ground"],
) == (0.43, 0.43, 0.57, 0.75, 1.0, 0.6)
assert get_metric_narrativeqa(
"David Thomas was eating an apple and fell to the ground",
["David Thomas was eating an apple", "he fell to the ground"],
) == (0.55, 0.38, 0.92, 0.75, 0.6, 1.0)
assert get_metric_narrativeqa(
"David Thomas was eating an apple and fell to the ground",
["David Thomas was eating an apple and fell to the ground"],
) == (1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
class TestQuoREF:
def test_articles_are_ignored(self):
assert get_metric_drop("td", ["the td"]) == (1.0, 1.0)
assert get_metric_drop("the a NOT an ARTICLE the an a", ["NOT ARTICLE"]) == (1.0, 1.0)
def test_casing_is_ignored(self):
assert get_metric_drop("This was a triumph", ["tHIS Was A TRIUMPH"]) == (1.0, 1.0)
def test_long_answers(self):
assert (
get_metric_drop(
"David Thomas",
[
"Thomas David Arquette Thomas David Arquette Thomas \
David Arquette Thomas David Arquette"
],
)
== (0.0, 0.8)
)
def test_span_order_is_ignored(self):
assert get_metric_drop(["athlete", "unprofessional"], [["unprofessional", "athlete"]]) == (
1.0,
1.0,
)
assert get_metric_drop(
["algebra", "arithmetic"], [["arithmetic", "algebra", "geometry"]]
) == (0.0, 0.67)
def test_word_order_is_not_ignored(self):
assert get_metric_drop(["athlete unprofessional"], [["unprofessional athlete"]]) == (
0.0,
1.0,
)
def test_bag_alignment_is_optimal(self):
assert get_metric_drop(
["Thomas Jefferson", "Thomas Davidson Arquette"], [["David Thomas", "Thomas Jefferson"]]
) == (0.0, 0.7)
assert get_metric_drop(
["Thomas David Arquette"], [["David Thomas", "Thomas Jefferson"]]
) == (0.0, 0.4)
def test_multiple_gold_spans(self):
assert get_metric_drop(
["Thomas David Arquette"],
[["David Thomas"], ["Thomas Jefferson"], ["David Thomas"], ["Thomas David"]],
) == (0.0, 0.8)
def test_long_gold_spans(self):
assert get_metric_drop(
["Thomas David Arquette"], [["David Thomas was eating an apple and fell to the ground"]]
) == (0.0, 0.33)
class TestSQUAD2:
def test_impossible_answer(self):
assert get_metric_squad("", ["news"]) == (0.0, 0.0)
assert get_metric_squad("news", [""]) == (0.0, 0.0)
assert get_metric_squad("", [""]) == (1.0, 1.0)
def test_functional_case(self):
assert get_metric_squad("This was a triumph", ["a triumph"]) == (0.0, 0.5)
class TestIntegration:
def test_sample_results(self):
gold_file = FIXTURES_ROOT / "rc" / "orb_sample_input.jsonl"
predictions_file = FIXTURES_ROOT / "rc" / "orb_sample_predictions.json"
result = os.system(
f"python -m allennlp_models.rc.tools.orb --dataset_file {gold_file} "
f"--prediction_file {predictions_file} --metrics_output_file /tmp/output.json"
)
assert result == 0
| allennlp-models-main | tests/rc/evaluations/orb_test.py |
import pytest
from allennlp_models.rc.dataset_readers.utils import char_span_to_token_span
@pytest.mark.parametrize(
"token_offsets, character_span, expected_result",
[
([(0, 3), (4, 4), (5, 8)], (5, 8), ((2, 2), False)),
([(0, 3), (4, 4), (5, 8)], (4, 8), ((1, 2), False)),
([(0, 3), (4, 4), (5, 8)], (0, 8), ((0, 2), False)),
([(0, 3), (4, 4), (5, 8)], (1, 8), ((0, 2), True)),
([(0, 3), (4, 4), (5, 8)], (7, 8), ((2, 2), True)),
([(0, 3), (4, 4), (5, 8)], (7, 9), ((2, 2), True)),
],
)
def test_char_span_to_token_span(token_offsets, character_span, expected_result):
assert char_span_to_token_span(token_offsets, character_span) == expected_result
def test_char_span_to_token_span_throws():
with pytest.raises(ValueError):
char_span_to_token_span([(0, 3), (4, 4), (5, 8)], (7, 19))
| allennlp-models-main | tests/rc/dataset_readers/utils_test.py |
from allennlp.common.params import Params
from allennlp.common.util import ensure_list
from allennlp.data import DatasetReader
import pytest
from allennlp_models.rc import TransformerSquadReader
from tests import FIXTURES_ROOT
class TestTransformerSquadReader:
def test_from_params(self):
with pytest.warns(DeprecationWarning):
squad_reader = DatasetReader.from_params(
Params(
{
"type": "transformer_squad",
"skip_invalid_examples": True,
"transformer_model_name": "test_fixtures/bert-xsmall-dummy",
}
)
)
assert squad_reader.skip_impossible_questions is True
def test_read_from_file_squad1(self):
reader = TransformerSquadReader()
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad.json"))
assert len(instances) == 5
token_text = [t.text for t in instances[0].fields["question_with_context"].tokens]
token_type_ids = [t.type_id for t in instances[0].fields["question_with_context"].tokens]
assert token_text[:3] == ["[CLS]", "To", "whom"]
assert token_type_ids[:3] == [0, 0, 0]
assert token_text[-3:] == ["Mary", ".", "[SEP]"]
assert token_type_ids[-3:] == [1, 1, 1]
assert token_text[instances[0].fields["context_span"].span_start] == "Architectural"
assert token_type_ids[instances[0].fields["context_span"].span_start] == 1
assert token_text[instances[0].fields["context_span"].span_end + 1] == "[SEP]"
assert token_type_ids[instances[0].fields["context_span"].span_end + 1] == 1
assert token_text[instances[0].fields["context_span"].span_end] == "."
assert token_type_ids[instances[0].fields["context_span"].span_end] == 1
assert token_text[
instances[0]
.fields["answer_span"]
.span_start : instances[0]
.fields["answer_span"]
.span_end
+ 1
] == ["Saint", "Bern", "##ade", "##tte", "So", "##ubi", "##rous"]
for instance in instances:
token_type_ids = [t.type_id for t in instance.fields["question_with_context"].tokens]
context_start = instance.fields["context_span"].span_start
context_end = instance.fields["context_span"].span_end + 1
assert all(id == 0 for id in token_type_ids[:context_start])
assert all(id == 1 for id in token_type_ids[context_start:context_end])
@pytest.mark.parametrize("include_cls_index", [True, False])
def test_read_from_file_squad2(self, include_cls_index: bool):
reader = TransformerSquadReader()
# This should be `False` to begin with since the `[CLS]` token is the first
# token with BERT.
assert reader._include_cls_index is False
reader._include_cls_index = include_cls_index
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad2.json"))
assert len(instances) == 6
token_text = [t.text for t in instances[0].fields["question_with_context"].tokens]
token_type_ids = [t.type_id for t in instances[0].fields["question_with_context"].tokens]
assert token_text[:3] == ["[CLS]", "This", "is"]
assert token_type_ids[:3] == [0, 0, 0]
assert token_text[-3:] == ["Mary", ".", "[SEP]"]
assert token_type_ids[-3:] == [1, 1, 1]
for instance in instances:
tokens = instance.fields["question_with_context"].tokens
token_type_ids = [t.type_id for t in tokens]
context_start = instance.fields["context_span"].span_start
context_end = instance.fields["context_span"].span_end + 1
assert all(id == 0 for id in token_type_ids[:context_start])
assert all(id == 1 for id in token_type_ids[context_start:context_end])
if include_cls_index:
assert tokens[instance.fields["cls_index"].sequence_index].text == "[CLS]"
def test_length_limit_works(self):
max_query_length = 10
stride = 20
reader = TransformerSquadReader(
length_limit=100,
max_query_length=max_query_length,
stride=stride,
skip_impossible_questions=False,
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad.json"))
assert len(instances) == 12
# The sequence is "<s> question </s> </s> context".
assert instances[0].fields["context_span"].span_start == len(
reader._tokenizer.sequence_pair_start_tokens
) + max_query_length + len(reader._tokenizer.sequence_pair_mid_tokens)
instance_0_text = [t.text for t in instances[0].fields["question_with_context"].tokens]
instance_1_text = [t.text for t in instances[1].fields["question_with_context"].tokens]
assert instance_0_text[: max_query_length + 2] == instance_1_text[: max_query_length + 2]
assert instance_0_text[max_query_length + 3] != instance_1_text[max_query_length + 3]
assert instance_0_text[-1] == "[SEP]"
assert instance_0_text[-2] == "##rot"
assert (
instance_1_text[instances[1].fields["context_span"].span_start + stride - 1] == "##rot"
)
def test_roberta_bug(self):
"""This reader tokenizes first by spaces, and then re-tokenizes using the wordpiece tokenizer that comes
with the transformer model. For RoBERTa, this produces a bug, since RoBERTa tokens are different depending
on whether they are preceded by a space, and the first round of tokenization cuts off the spaces. The
reader has a workaround for this case. This tests that workaround."""
reader = TransformerSquadReader(transformer_model_name="roberta-base")
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad.json"))
assert instances
assert len(instances) == 5
token_text = [t.text for t in instances[1].fields["question_with_context"].tokens]
token_ids = [t.text_id for t in instances[1].fields["question_with_context"].tokens]
assert token_text[:3] == ["<s>", "What", "Ġsits"]
assert token_ids[:3] == [
0,
2264,
6476,
]
| allennlp-models-main | tests/rc/dataset_readers/transformer_squad_test.py |
allennlp-models-main | tests/rc/dataset_readers/__init__.py |
|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp_models.rc import DropReader
from tests import FIXTURES_ROOT
class TestDropReader:
def test_read_from_file(self):
reader = DropReader()
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "drop.json"))
assert len(instances) == 19
instance = instances[0]
assert set(instance.fields.keys()) == {
"question",
"passage",
"number_indices",
"answer_as_passage_spans",
"answer_as_question_spans",
"answer_as_add_sub_expressions",
"answer_as_counts",
"metadata",
}
assert [t.text for t in instance["question"][:3]] == ["What", "happened", "second"]
assert [t.text for t in instance["passage"][:3]] == ["The", "Port", "of"]
assert [t.text for t in instance["passage"][-3:]] == ["cruise", "ships", "."]
# Note that the last number in here is added as padding in case we don't find any numbers
# in a particular passage.
# Just FYI, these are the actual numbers that the indices correspond to:
# [ "1", "25", "2014", "5", "2018", "1", "2", "1", "54", "52", "6", "60", "58", "2010",
# "67", "2010", "1996", "3", "1", "6", "1", "0"]
assert [f.sequence_index for f in instance["number_indices"]] == [
16,
30,
36,
41,
52,
64,
80,
89,
147,
153,
166,
174,
177,
206,
245,
252,
267,
279,
283,
288,
296,
-1,
]
assert len(instance["answer_as_passage_spans"]) == 1
assert instance["answer_as_passage_spans"][0] == (46, 47)
assert len(instance["answer_as_question_spans"]) == 1
assert instance["answer_as_question_spans"][0] == (5, 6)
assert len(instance["answer_as_add_sub_expressions"]) == 1
assert instance["answer_as_add_sub_expressions"][0].labels == [0] * 22
assert len(instance["answer_as_counts"]) == 1
assert instance["answer_as_counts"][0].label == -1
assert set(instance["metadata"].metadata.keys()) == {
"answer_annotations",
"answer_info",
"answer_texts",
"number_indices",
"number_tokens",
"original_numbers",
"original_passage",
"original_question",
"passage_id",
"passage_token_offsets",
"passage_tokens",
"question_id",
"question_token_offsets",
"question_tokens",
}
def test_read_in_bert_format(self):
reader = DropReader(instance_format="bert")
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "drop.json"))
assert len(instances) == 19
print(instances[0])
instance = instances[0]
assert set(instance.fields.keys()) == {
"answer_as_passage_spans",
"metadata",
"passage",
"question",
"question_and_passage",
}
assert [t.text for t in instance["question"][:3]] == ["What", "happened", "second"]
assert [t.text for t in instance["passage"][:3]] == ["The", "Port", "of"]
assert [t.text for t in instance["passage"][-3:]] == ["cruise", "ships", "."]
question_length = len(instance["question"])
passage_length = len(instance["passage"])
assert len(instance["question_and_passage"]) == question_length + passage_length + 1
assert len(instance["answer_as_passage_spans"]) == 1
assert instance["answer_as_passage_spans"][0] == (
question_length + 1 + 46,
question_length + 1 + 47,
)
assert set(instance["metadata"].metadata.keys()) == {
"answer_annotations",
"answer_texts",
"original_passage",
"original_question",
"passage_id",
"passage_token_offsets",
"passage_tokens",
"question_id",
"question_tokens",
}
def test_read_in_squad_format(self):
reader = DropReader(instance_format="squad")
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "drop.json"))
assert len(instances) == 19
print(instances[0])
instance = instances[0]
assert set(instance.fields.keys()) == {
"question",
"passage",
"span_start",
"span_end",
"metadata",
}
assert [t.text for t in instance["question"][:3]] == ["What", "happened", "second"]
assert [t.text for t in instance["passage"][:3]] == ["The", "Port", "of"]
assert [t.text for t in instance["passage"][-3:]] == ["cruise", "ships", "."]
assert instance["span_start"] == 46
assert instance["span_end"] == 47
assert set(instance["metadata"].metadata.keys()) == {
"answer_annotations",
"answer_texts",
"original_passage",
"original_question",
"passage_id",
"token_offsets",
"passage_tokens",
"question_id",
"question_tokens",
"valid_passage_spans",
}
def test_can_build_from_params(self):
reader = DropReader.from_params(Params({}))
assert reader._tokenizer.__class__.__name__ == "SpacyTokenizer"
assert reader._token_indexers["tokens"].__class__.__name__ == "SingleIdTokenIndexer"
| allennlp-models-main | tests/rc/dataset_readers/drop_test.py |
import pytest
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from tests import FIXTURES_ROOT
import re
from typing import List
from allennlp_models.rc.dataset_readers.record_reader import RecordTaskReader
"""
Tests for the ReCoRD reader from SuperGLUE
"""
# TODO: Add full integration tests
class TestRecordReader:
@pytest.fixture
def reader(self):
yield RecordTaskReader(length_limit=256)
@pytest.fixture
def small_reader(self, reader):
# Some tests need the transformer tokenizer, but not the long lengths.
# Nice Middle ground.
reader._length_limit = 24
reader._query_len_limit = 8
reader._stride = 4
return reader
@pytest.fixture
def whitespace_reader(self, small_reader):
# Set the tokenizer to whitespace tokenization for ease of use and
# testing. Easier to test than using a transformer tokenizer.
small_reader._tokenizer = WhitespaceTokenizer()
small_reader._token_indexers = SingleIdTokenIndexer()
yield small_reader
@pytest.fixture
def passage(self):
return (
"Reading Comprehension with Commonsense Reasoning Dataset ( ReCoRD ) "
"is a large-scale reading comprehension dataset which requires "
"commonsense reasoning"
)
@pytest.fixture
def record_name_passage(self, passage):
"""
From the passage above, this is the snippet that contains the phrase
"Reading Comprehension with Commonsense Reasoning Dataset". The returned
object is a tuple with (start: int, end: int, text: str).
"""
start = 0
end = 56
yield start, end, passage[start:end]
@pytest.fixture
def tokenized_passage(self, passage):
tokenizer = WhitespaceTokenizer()
return tokenizer.tokenize(passage)
@pytest.fixture
def answers(self):
return [
{"start": 58, "end": 64, "text": "ReCoRD"},
{"start": 128, "end": 149, "text": "commonsense reasoning"},
{"start": 256, "end": 512, "text": "Should not exist"},
]
@pytest.fixture
def example_basic(self):
return {
"id": "dummy1",
"source": "ReCoRD docs",
"passage": {
"text": "ReCoRD contains 120,000+ queries from 70,000+ news articles. Each "
"query has been validated by crowdworkers. Unlike existing reading "
"comprehension datasets, ReCoRD contains a large portion of queries "
"requiring commonsense reasoning, thus presenting a good challenge "
"for future research to bridge the gap between human and machine "
"commonsense reading comprehension .",
"entities": [
{"start": 0, "end": 6},
{"start": 156, "end": 162},
{"start": 250, "end": 264},
],
},
"qas": [
{
"id": "dummyA1",
"query": "@placeholder is a dataset",
"answers": [
{"start": 0, "end": 6, "text": "ReCoRD"},
{"start": 156, "end": 162, "text": "ReCoRD"},
],
},
{
"id": "dummayA2",
"query": "ReCoRD presents a @placeholder with the commonsense reading "
"comprehension task",
"answers": [
{"start": 250, "end": 264, "text": "good challenge"},
],
},
],
}
@pytest.fixture
def curiosity_example(self):
"""
Bug where most examples did not return any instances, so doing
regression testing on this real example that did not return anything.
"""
return {
"id": "d978b083f3f97a2ab09771c72398cfbac094f818",
"source": "Daily mail",
"passage": {
"text": "By Sarah Griffiths PUBLISHED: 12:30 EST, 10 July 2013 | UPDATED: "
"12:37 EST, 10 July 2013 Nasa's next Mars rover has been given a "
"mission to find signs of past life and to collect and store rock "
"from the the red planet that will one day be sent back to Earth. It "
"will demonstrate technology for a human exploration of the planet "
"and look for signs of life. The space agency has revealed what the "
"rover, known as Mars 2020, will look like. Scroll down for video... "
"Nasa's next Mars rover (plans pictured) has been given a mission to "
"find signs of past life and to collect and store rock from the the "
"red planet that will one day be sent back to Earth. Mars 2020 will "
"also demonstrate technology for a human exploration of the "
"planet\n@highlight\nMars 2020 will collect up to 31 rock and soil "
"samples from the red planet and will look for signs of "
"extraterrestrial life\n@highlight\nThe new rover will use the same "
"landing system as Curiosity and share its frame, which has saved "
"Nasa $1 billion\n@highlight\nThe mission will bring the sapec agency "
"a step closer to meeting President Obama's challenge to send humans "
"to Mars in the next decade",
"entities": [
{"start": 3, "end": 17},
{"start": 89, "end": 92},
{"start": 101, "end": 104},
{"start": 252, "end": 256},
{"start": 411, "end": 414},
{"start": 463, "end": 466},
{"start": 475, "end": 478},
{"start": 643, "end": 647},
{"start": 650, "end": 653},
{"start": 742, "end": 745},
{"start": 926, "end": 934},
{"start": 973, "end": 976},
{"start": 1075, "end": 1079},
{"start": 1111, "end": 1114},
],
},
"qas": [
{
"id": "d978b083f3f97a2ab09771c72398cfbac094f818"
"-04b6e904611f0d706521db167a05a11bf693e40e-61",
"query": "The 2020 mission plans on building on the accomplishments of "
"@placeholder and other Mars missions.",
"answers": [{"start": 926, "end": 934, "text": "Curiosity"}],
}
],
}
@pytest.fixture
def skyfall_example(self):
"""
Another example that was not returning instances
"""
return {
"id": "6f1ca8baf24bf9e5fc8e33b4b3b04bd54370b25f",
"source": "Daily mail",
"passage": {
"text": "They're both famous singers who have lent their powerful voices to "
"James "
"Bond films. And it seems the Oscars' stage wasn't big enough to "
"accommodate larger-and-life divas Adele and Dame Shirley Bassey, "
"at least at the same time. Instead of the two songstresses dueting or "
"sharing the stage, each performed her theme song separately during "
"Sunday night's ceremony. Scroll down for video Battle of the divas: "
"Adele and Dame Shirley Bassey separately sang James Bond theme songs "
"during Sunday night's Oscar ceremony Shirley performed first, "
"singing Goldfinger nearly 50 years since she first recorded the song "
"for "
"the 1964 Bond film of the same name.\n@highlight\nAdele awarded Oscar "
"for Best Original Score for Skyfall",
"entities": [
{"start": 67, "end": 76},
{"start": 102, "end": 107},
{"start": 171, "end": 175},
{"start": 181, "end": 199},
{"start": 407, "end": 411},
{"start": 417, "end": 435},
{"start": 453, "end": 462},
{"start": 498, "end": 502},
{"start": 513, "end": 519},
{"start": 546, "end": 555},
{"start": 620, "end": 623},
{"start": 659, "end": 663},
{"start": 673, "end": 701},
{"start": 707, "end": 713},
],
},
"qas": [
{
"id": "6f1ca8baf24bf9e5fc8e33b4b3b04bd54370b25f"
"-98823006424cc595642b5ae5fa1b533bbd215a56-105",
"query": "The full works: Adele was accompanied by an orchestra, choir and "
"light display during her performance of @placeholder",
"answers": [{"start": 707, "end": 713, "text": "Skyfall"}],
}
],
}
@staticmethod
def _token_list_to_str(tokens) -> List[str]:
return list(map(str, tokens))
#####################################################################
# Unittests #
#####################################################################
def test_tokenize_slice_bos(self, whitespace_reader, passage, record_name_passage):
"""
Test `tokenize_slice` with a string that is at the beginning of the
text. This means that `start`=0.
"""
result = list(
whitespace_reader.tokenize_slice(
passage, record_name_passage[0], record_name_passage[1]
)
)
assert len(result) == 6
expected = ["Reading", "Comprehension", "with", "Commonsense", "Reasoning", "Dataset"]
for i in range(len(result)):
assert str(result[i]) == expected[i]
def test_tokenize_slice_prefix(self, whitespace_reader, passage, record_name_passage):
result = list(
whitespace_reader.tokenize_slice(
passage, record_name_passage[0] + 8, record_name_passage[1]
)
)
expected = ["Comprehension", "with", "Commonsense", "Reasoning", "Dataset"]
assert len(result) == len(expected)
for i in range(len(result)):
assert str(result[i]) == expected[i]
def test_tokenize_str(self, whitespace_reader, record_name_passage):
result = list(whitespace_reader.tokenize_str(record_name_passage[-1]))
expected = ["Reading", "Comprehension", "with", "Commonsense", "Reasoning", "Dataset"]
assert len(result) == len(expected)
for i in range(len(result)):
assert str(result[i]) == expected[i]
def test_get_instances_from_example(self, small_reader, tokenized_passage, example_basic):
# TODO: Make better
result = list(small_reader.get_instances_from_example(example_basic))
result_text = " ".join([t.text for t in result[0]["question_with_context"].tokens])
assert len(result) == 2
assert len(result[0]["question_with_context"].tokens) == small_reader._length_limit
assert "@" in result_text
assert "place" in result_text
assert "holder" in result_text
result_text = " ".join([t.text for t in result[1]["question_with_context"].tokens])
assert len(result[1]["question_with_context"]) == small_reader._length_limit
assert "@" in result_text
assert "place" in result_text
assert "holder" not in result_text
def test_get_instances_from_example_fields(
self, small_reader, tokenized_passage, example_basic
):
results = list(small_reader.get_instances_from_example(example_basic))
expected_keys = [
"question_with_context",
"context_span",
# "cls_index",
"answer_span",
"metadata",
]
for i in range(len(results)):
assert len(results[i].fields) == len(
expected_keys
), f"results[{i}] has incorrect number of fields"
for k in expected_keys:
assert k in results[i].fields, f"results[{i}] is missing {k}"
#####################################################################
# Regression Test #
#####################################################################
def test_get_instances_from_example_curiosity(self, reader, curiosity_example):
tokenized_answer = " ".join(map(str, reader.tokenize_str("Curiosity")))
results = list(reader.get_instances_from_example(curiosity_example))
assert len(results) == 2
assert tokenized_answer in " ".join(map(str, results[0]["question_with_context"].tokens))
assert tokenized_answer in " ".join(map(str, results[1]["question_with_context"].tokens))
# TODO: Make this its own test.
# Kind of forced this extra test in here because I added it while
# solving this bug, so just left it instead of creating another
# unittest.
reader._one_instance_per_query = True
results = list(reader.get_instances_from_example(curiosity_example))
assert len(results) == 1
assert tokenized_answer in " ".join(map(str, results[0]["question_with_context"].tokens))
def test_get_instances_from_example_skyfall(self, reader, skyfall_example):
"""
This will fail for the time being.
"""
tokenized_answer = self._token_list_to_str(reader.tokenize_str("Skyfall"))
results = list(reader.get_instances_from_example(skyfall_example))
assert len(results) == 1
assert (
self._token_list_to_str(results[0]["question_with_context"][-3:-1]) == tokenized_answer
)
def test_tokenize_str_roberta(self):
reader = RecordTaskReader(transformer_model_name="roberta-base", length_limit=256)
result = reader.tokenize_str("The new rover.")
result = list(map(lambda t: t.text[1:], result))
assert len(result) == 4
assert result == ["he", "new", "rover", ""]
def test_read(self, small_reader):
instances = list(small_reader.read(FIXTURES_ROOT.joinpath("rc/record.json")))
assert len(instances) == 2
tokens = self._token_list_to_str(instances[0].fields["question_with_context"])
assert tokens == [
"[CLS]",
"On",
"October",
"10",
",",
"acclaimed",
"comedian",
"and",
"star",
"[SEP]",
"Tracy",
"Morgan",
"hasn",
"'",
"t",
"appeared",
"on",
"stage",
"since",
"the",
"devastating",
"New",
"Jersey",
"[SEP]",
]
answer_span = instances[0].fields["answer_span"]
assert tokens[answer_span.span_start : answer_span.span_end + 1] == ["Tracy", "Morgan"]
tokens = self._token_list_to_str(instances[1].fields["question_with_context"])
assert tokens == [
"[CLS]",
"Under",
"the",
"terms",
"of",
"the",
"agreement",
"any",
"cu",
"[SEP]",
"arrived",
"in",
"2011",
"from",
"China",
"to",
"great",
"fan",
"##fare",
"@",
"highlight",
"On",
"loan",
"[SEP]",
]
answer_span = instances[1].fields["answer_span"]
assert tokens[answer_span.span_start : answer_span.span_end + 1] == ["China"]
def test_to_params(self, small_reader):
assert small_reader.to_params() == {
"type": "superglue_record",
"transformer_model_name": "bert-base-cased",
"length_limit": 24,
"question_length_limit": 8,
"stride": 4,
"raise_errors": False,
"tokenizer_kwargs": {},
"one_instance_per_query": False,
"max_instances": None,
}
| allennlp-models-main | tests/rc/dataset_readers/record_reader_test.py |
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp_models.rc import QangarooReader
from tests import FIXTURES_ROOT
class TestQangarooReader:
def test_read_from_file(self):
reader = QangarooReader()
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "qangaroo.json"))
assert len(instances) == 2
assert [t.text for t in instances[0].fields["candidates"][3]] == ["german", "confederation"]
assert [t.text for t in instances[0].fields["query"]] == ["country", "sms", "braunschweig"]
assert [t.text for t in instances[0].fields["supports"][0][:3]] == [
"The",
"North",
"German",
]
assert [t.text for t in instances[0].fields["answer"]] == ["german", "empire"]
assert instances[0].fields["answer_index"].sequence_index == 4
def test_can_build_from_params(self):
reader = QangarooReader.from_params(Params({}))
assert reader._token_indexers["tokens"].__class__.__name__ == "SingleIdTokenIndexer"
| allennlp-models-main | tests/rc/dataset_readers/qangaroo_test.py |
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp.data import DatasetReader
import pytest
from allennlp_models.rc import SquadReader
from allennlp_models.rc.dataset_readers.squad import SQUAD2_NO_ANSWER_TOKEN
from tests import FIXTURES_ROOT
class TestSquadReader:
def test_from_params(self):
squad1_reader = DatasetReader.from_params(Params({"type": "squad1"}))
assert squad1_reader.no_answer_token is None
squad2_reader = DatasetReader.from_params(Params({"type": "squad2"}))
assert squad2_reader.no_answer_token is not None
with pytest.warns(DeprecationWarning):
squad_reader = DatasetReader.from_params(
Params({"type": "squad1", "skip_invalid_examples": True})
)
assert squad_reader.skip_impossible_questions is True
def test_read_from_file(self):
reader = SquadReader()
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad.json"))
assert len(instances) == 5
assert [t.text for t in instances[0].fields["question"].tokens[:3]] == ["To", "whom", "did"]
assert [t.text for t in instances[0].fields["passage"].tokens[:3]] == [
"Architecturally",
",",
"the",
]
assert [t.text for t in instances[0].fields["passage"].tokens[-3:]] == ["of", "Mary", "."]
assert instances[0].fields["span_start"].sequence_index == 102
assert instances[0].fields["span_end"].sequence_index == 104
assert [t.text for t in instances[1].fields["question"].tokens[:3]] == [
"What",
"sits",
"on",
]
assert [t.text for t in instances[1].fields["passage"].tokens[:3]] == [
"Architecturally",
",",
"the",
]
assert [t.text for t in instances[1].fields["passage"].tokens[-3:]] == ["of", "Mary", "."]
assert instances[1].fields["span_start"].sequence_index == 17
assert instances[1].fields["span_end"].sequence_index == 23
# We're checking this case because I changed the answer text to only have a partial
# annotation for the last token, which happens occasionally in the training data. We're
# making sure we get a reasonable output in that case here.
assert [t.text for t in instances[3].fields["question"].tokens[:3]] == [
"Which",
"individual",
"worked",
]
assert [t.text for t in instances[3].fields["passage"].tokens[:3]] == ["In", "1882", ","]
assert [t.text for t in instances[3].fields["passage"].tokens[-3:]] == [
"Nuclear",
"Astrophysics",
".",
]
span_start = instances[3].fields["span_start"].sequence_index
span_end = instances[3].fields["span_end"].sequence_index
answer_tokens = instances[3].fields["passage"].tokens[span_start : (span_end + 1)]
expected_answer_tokens = ["Father", "Julius", "Nieuwland"]
assert [t.text for t in answer_tokens] == expected_answer_tokens
def test_can_build_from_params(self):
reader = SquadReader.from_params(Params({}))
assert reader._tokenizer.__class__.__name__ == "SpacyTokenizer"
assert reader._token_indexers["tokens"].__class__.__name__ == "SingleIdTokenIndexer"
def test_length_limit_works(self):
# We're making sure the length of the text is correct if length limit is provided.
reader = SquadReader(
passage_length_limit=30, question_length_limit=10, skip_impossible_questions=True
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad.json"))
assert len(instances[0].fields["question"].tokens) == 10
assert len(instances[0].fields["passage"].tokens) == 30
# invalid examples where all the answers exceed the passage length should be skipped.
assert len(instances) == 3
# Length limit still works if we do not skip the invalid examples
reader = SquadReader(
passage_length_limit=30, question_length_limit=10, skip_impossible_questions=False
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad.json"))
assert len(instances[0].fields["question"].tokens) == 10
assert len(instances[0].fields["passage"].tokens) == 30
# invalid examples should not be skipped.
assert len(instances) == 5
# Make sure the answer texts does not change, so that the evaluation will not be affected
reader_unlimited = SquadReader(
passage_length_limit=30, question_length_limit=10, skip_impossible_questions=False
)
instances_unlimited = ensure_list(
reader_unlimited.read(FIXTURES_ROOT / "rc" / "squad.json")
)
for instance_x, instance_y in zip(instances, instances_unlimited):
print(instance_x.fields["metadata"]["answer_texts"])
assert set(instance_x.fields["metadata"]["answer_texts"]) == set(
instance_y.fields["metadata"]["answer_texts"]
)
class TestSquad2Reader:
def test_read_from_file(self):
reader = SquadReader.squad2()
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad2.json"))
assert len(instances) == 6
assert [t.text for t in instances[0].fields["question"].tokens[:3]] == ["This", "is", "an"]
assert [t.text for t in instances[0].fields["passage"].tokens[:3]] == [
"Architecturally",
",",
"the",
]
assert [t.text for t in instances[0].fields["passage"].tokens[-4:]] == [
"of",
"Mary",
".",
SQUAD2_NO_ANSWER_TOKEN,
]
assert instances[0].fields["span_start"].sequence_index == 142
assert instances[0].fields["span_end"].sequence_index == 142
assert [t.text for t in instances[1].fields["question"].tokens[:3]] == ["To", "whom", "did"]
assert [t.text for t in instances[1].fields["passage"].tokens[:3]] == [
"Architecturally",
",",
"the",
]
assert [t.text for t in instances[1].fields["passage"].tokens[-4:]] == [
"of",
"Mary",
".",
SQUAD2_NO_ANSWER_TOKEN,
]
assert instances[1].fields["span_start"].sequence_index == 102
assert instances[1].fields["span_end"].sequence_index == 104
assert [t.text for t in instances[2].fields["question"].tokens[:3]] == [
"What",
"sits",
"on",
]
assert [t.text for t in instances[2].fields["passage"].tokens[:3]] == [
"Architecturally",
",",
"the",
]
assert [t.text for t in instances[2].fields["passage"].tokens[-4:]] == [
"of",
"Mary",
".",
SQUAD2_NO_ANSWER_TOKEN,
]
assert instances[2].fields["span_start"].sequence_index == 17
assert instances[2].fields["span_end"].sequence_index == 23
# We're checking this case because I changed the answer text to only have a partial
# annotation for the last token, which happens occasionally in the training data. We're
# making sure we get a reasonable output in that case here.
assert [t.text for t in instances[4].fields["question"].tokens[:3]] == [
"Which",
"individual",
"worked",
]
assert [t.text for t in instances[4].fields["passage"].tokens[:3]] == ["In", "1882", ","]
assert [t.text for t in instances[4].fields["passage"].tokens[-4:]] == [
"Nuclear",
"Astrophysics",
".",
SQUAD2_NO_ANSWER_TOKEN,
]
span_start = instances[4].fields["span_start"].sequence_index
span_end = instances[4].fields["span_end"].sequence_index
answer_tokens = instances[4].fields["passage"].tokens[span_start : (span_end + 1)]
expected_answer_tokens = ["Father", "Julius", "Nieuwland"]
assert [t.text for t in answer_tokens] == expected_answer_tokens
def test_can_build_from_params(self):
reader = DatasetReader.from_params(Params({"type": "squad2"}))
assert reader._tokenizer.__class__.__name__ == "SpacyTokenizer" # type: ignore
assert reader._token_indexers["tokens"].__class__.__name__ == "SingleIdTokenIndexer" # type: ignore
def test_length_limit_works(self):
# We're making sure the length of the text is correct if length limit is provided.
reader = SquadReader.squad2(
passage_length_limit=30, question_length_limit=10, skip_impossible_questions=True
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad2.json"))
assert len(instances[0].fields["question"].tokens) == 6
assert len(instances[0].fields["passage"].tokens) == 30
# invalid examples where all the answers exceed the passage length should be skipped.
assert len(instances) == 4
# Length limit still works if we do not skip the invalid examples
reader = SquadReader.squad2(
passage_length_limit=30, question_length_limit=10, skip_impossible_questions=False
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "rc" / "squad2.json"))
assert len(instances[0].fields["question"].tokens) == 6
assert len(instances[0].fields["passage"].tokens) == 30
# invalid examples should not be skipped.
assert len(instances) == 6
# Make sure the answer texts does not change, so that the evaluation will not be affected
reader_unlimited = SquadReader.squad2(
passage_length_limit=30, question_length_limit=10, skip_impossible_questions=False
)
instances_unlimited = ensure_list(
reader_unlimited.read(FIXTURES_ROOT / "rc" / "squad2.json")
)
for instance_x, instance_y in zip(instances, instances_unlimited):
print(instance_x.fields["metadata"]["answer_texts"])
assert set(instance_x.fields["metadata"]["answer_texts"]) == set(
instance_y.fields["metadata"]["answer_texts"]
)
| allennlp-models-main | tests/rc/dataset_readers/squad_test.py |
from pytest import approx
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp_models.rc import ReadingComprehensionPredictor
from tests import FIXTURES_ROOT
class TestBidafPredictor(AllenNlpTestCase):
def test_uses_named_inputs(self):
inputs = {
"question": "What kind of test succeeded on its first attempt?",
"passage": "One time I was writing a unit test, and it succeeded on the first attempt.",
}
archive = load_archive(FIXTURES_ROOT / "rc" / "bidaf" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading_comprehension")
result = predictor.predict_json(inputs)
best_span = result.get("best_span")
assert best_span is not None
assert isinstance(best_span, list)
assert len(best_span) == 2
assert all(isinstance(x, int) for x in best_span)
assert best_span[0] <= best_span[1]
best_span_str = result.get("best_span_str")
assert isinstance(best_span_str, str)
assert best_span_str != ""
for probs_key in ("span_start_probs", "span_end_probs"):
probs = result.get(probs_key)
assert probs is not None
assert all(isinstance(x, float) for x in probs)
assert sum(probs) == approx(1.0)
def test_batch_prediction(self):
inputs = [
{
"question": "What kind of test succeeded on its first attempt?",
"passage": "One time I was writing a unit test, and it succeeded on the first attempt.",
},
{
"question": "What kind of test succeeded on its first attempt at batch processing?",
"passage": "One time I was writing a unit test, and it always failed!",
},
]
archive = load_archive(FIXTURES_ROOT / "rc" / "bidaf" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading_comprehension")
results = predictor.predict_batch_json(inputs)
assert len(results) == 2
for result in results:
best_span = result.get("best_span")
best_span_str = result.get("best_span_str")
start_probs = result.get("span_start_probs")
end_probs = result.get("span_end_probs")
assert best_span is not None
assert isinstance(best_span, list)
assert len(best_span) == 2
assert all(isinstance(x, int) for x in best_span)
assert best_span[0] <= best_span[1]
assert isinstance(best_span_str, str)
assert best_span_str != ""
for probs in (start_probs, end_probs):
assert probs is not None
assert all(isinstance(x, float) for x in probs)
assert sum(probs) == approx(1.0)
def test_predictions_to_labeled_instances(self):
inputs = {
"question": "What kind of test succeeded on its first attempt?",
"passage": "One time I was writing a unit test, and it succeeded on the first attempt.",
}
archive = load_archive(FIXTURES_ROOT / "rc" / "bidaf" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading_comprehension")
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert "span_start" in new_instances[0].fields
assert "span_end" in new_instances[0].fields
assert new_instances[0].fields["span_start"] is not None
assert new_instances[0].fields["span_end"] is not None
assert len(new_instances) == 1
def test_predictions_to_labeled_instances_with_naqanet(self):
inputs = {
"question": "What kind of test succeeded on its first attempt?",
"passage": "One time I was writing 2 unit tests, and 1 succeeded on the first attempt.",
}
archive = load_archive(FIXTURES_ROOT / "rc" / "naqanet" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading_comprehension")
predictor._dataset_reader.skip_when_all_empty = False
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert "number_indices" in new_instances[0].fields
assert "answer_as_passage_spans" in new_instances[0].fields
assert "answer_as_question_spans" in new_instances[0].fields
assert "answer_as_add_sub_expressions" in new_instances[0].fields
assert "answer_as_counts" in new_instances[0].fields
assert "metadata" in new_instances[0].fields
assert len(new_instances) == 1
outputs["answer"]["answer_type"] = "count"
outputs["answer"]["count"] = 2
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert new_instances[0]["answer_as_counts"][0].label == 2
outputs["answer"]["answer_type"] = "passage_span"
outputs["answer"]["spans"] = [[0, 8]] # character offsets
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert new_instances[0]["answer_as_passage_spans"][0] == (0, 1) # token indices
outputs["answer"]["answer_type"] = "arithmetic"
outputs["answer"]["numbers"] = [{"sign": 2}, {"sign": 0}]
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert new_instances[0]["answer_as_add_sub_expressions"][0].labels == [2, 0, 0]
outputs["answer"]["answer_type"] = "question_span"
outputs["answer"]["spans"] = [[0, 9]] # character offsets
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert new_instances[0]["answer_as_question_spans"][0] == (0, 1) # token indices
| allennlp-models-main | tests/rc/predictors/bidaf_test.py |
allennlp-models-main | tests/rc/predictors/__init__.py |
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp_models.rc import TransformerSquadReader
from allennlp_models.rc import TransformerQA
from allennlp_models.rc import TransformerQAPredictor
class TestTransformerQAPredictor(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.reader = TransformerSquadReader(length_limit=50, stride=10)
self.vocab = Vocabulary()
self.model = TransformerQA(self.vocab)
self.predictor = TransformerQAPredictor(self.model, self.reader)
# We're running an untrained model, so the answers will be random.
def test_predict_single_instance(self):
prediction = self.predictor.predict(
"What is love?", "Baby don't hurt me, don't hurt me, no more."
)
span_start, span_end = prediction["best_span"]
assert -1 <= span_start <= span_end
assert "best_span_str" in prediction and isinstance(prediction["best_span_str"], str)
if span_start > -1:
assert len(prediction["best_span_str"]) > 0
def test_predict_long_instance(self):
# We use a short context and a long context, so that the long context has to be broken into multiple
# instances and re-assembled into a single answer.
questions = [
{
"question": "Do fish drink water?",
"context": """
A freshwater fish's insides has a higher salt content than the exterior water, so their bodies
are constantly absorbing water through osmosis via their permeable gills.
""",
},
{
"question": "Why don't animals have wheels?",
"context": """
The worlds of fiction and myth are full of wheeled creatures, so why not the real world? After
all, the wheel is an efficient design, and it seems like there would be obvious advantages to
quickly moving around while consuming little energy.
The key is to remember that evolution is a process, not something that happens overnight. A
giraffe with just a little bit longer neck than the others will be able to reach slightly
higher trees, which will ultimately lead to the species' neck length getting longer and longer
over generations. In the meantime, those other giraffes can still eat, just not quite as well.
But a wheel either works or it doesn't. A somewhat circular semi-wheelish thing would only be a
hindrance, and evolution can't produce a trait that's perfect from the get-go.
""",
},
]
predictions = self.predictor.predict_batch_json(questions)
assert len(predictions) == 2
| allennlp-models-main | tests/rc/predictors/transformer_qa_test.py |
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from tests import FIXTURES_ROOT
class TestDialogQAPredictor:
def test_uses_named_inputs(self):
inputs = {
"paragraphs": [
{
"qas": [
{
"followup": "y",
"yesno": "x",
"question": "When was the first one?",
"answers": [{"answer_start": 0, "text": "One time"}],
"id": "C_q#0",
},
{
"followup": "n",
"yesno": "x",
"question": "What were you doing?",
"answers": [{"answer_start": 15, "text": "writing a"}],
"id": "C_q#1",
},
{
"followup": "m",
"yesno": "y",
"question": "How often?",
"answers": [{"answer_start": 4, "text": "time I"}],
"id": "C_q#2",
},
],
"context": "One time I was writing a unit test,\
and it succeeded on the first attempt.",
}
]
}
archive = load_archive(
FIXTURES_ROOT / "rc" / "dialog_qa" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "dialog_qa")
result = predictor.predict_json(inputs)
best_span_str_list = result.get("best_span_str")
for best_span_str in best_span_str_list:
assert isinstance(best_span_str, str)
assert best_span_str != ""
def test_batch_prediction(self):
inputs = [
{
"paragraphs": [
{
"qas": [
{
"followup": "y",
"yesno": "x",
"question": "When was the first one?",
"answers": [{"answer_start": 0, "text": "One time"}],
"id": "C_q#0",
},
{
"followup": "n",
"yesno": "x",
"question": "What were you doing?",
"answers": [{"answer_start": 15, "text": "writing a"}],
"id": "C_q#1",
},
{
"followup": "m",
"yesno": "y",
"question": "How often?",
"answers": [{"answer_start": 4, "text": "time I"}],
"id": "C_q#2",
},
],
"context": "One time I was writing a unit test,\
and it succeeded on the first attempt.",
}
]
},
{
"paragraphs": [
{
"qas": [
{
"followup": "y",
"yesno": "x",
"question": "When was the first one?",
"answers": [{"answer_start": 0, "text": "One time"}],
"id": "C_q#0",
},
{
"followup": "n",
"yesno": "x",
"question": "What were you doing?",
"answers": [{"answer_start": 15, "text": "writing a"}],
"id": "C_q#1",
},
{
"followup": "m",
"yesno": "y",
"question": "How often?",
"answers": [{"answer_start": 4, "text": "time I"}],
"id": "C_q#2",
},
],
"context": "One time I was writing a unit test,\
and it succeeded on the first attempt.",
}
]
},
]
archive = load_archive(
FIXTURES_ROOT / "rc" / "dialog_qa" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "dialog_qa")
results = predictor.predict_batch_json(inputs)
assert len(results) == 2
| allennlp-models-main | tests/rc/predictors/dialog_qa_test.py |
from flaky import flaky
import pytest
import numpy
from numpy.testing import assert_almost_equal
import torch
from allennlp.commands.train import train_model_from_file
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase, ModelTestCase, requires_gpu
from allennlp.data import DatasetReader, Vocabulary
from allennlp.data import Batch
from allennlp.models import Model
from allennlp_models.rc import BidirectionalAttentionFlow
from tests import FIXTURES_ROOT
class BidirectionalAttentionFlowTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "rc" / "bidaf" / "experiment.json",
FIXTURES_ROOT / "rc" / "squad.json",
seed=27,
)
torch.use_deterministic_algorithms(True)
def teardown_method(self):
super().teardown_method()
torch.use_deterministic_algorithms(False)
@flaky
def test_forward_pass_runs_correctly(self):
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
metrics = self.model.get_metrics(reset=True)
# We've set up the data such that there's a fake answer that consists of the whole
# paragraph. _Any_ valid prediction for that question should produce an F1 of greater than
# zero, while if we somehow haven't been able to load the evaluation data, or there was an
# error with using the evaluation script, this will fail. This makes sure that we've
# loaded the evaluation data correctly and have hooked things up to the official evaluation
# script.
assert metrics["f1"] > 0
span_start_probs = output_dict["span_start_probs"][0].data.numpy()
span_end_probs = output_dict["span_start_probs"][0].data.numpy()
assert_almost_equal(numpy.sum(span_start_probs, -1), 1, decimal=6)
assert_almost_equal(numpy.sum(span_end_probs, -1), 1, decimal=6)
span_start, span_end = tuple(output_dict["best_span"][0].data.numpy())
assert span_start >= 0
assert span_start <= span_end
assert span_end < self.instances[0].fields["passage"].sequence_length()
assert isinstance(output_dict["best_span_str"][0], str)
# Some recent efficiency changes (using bmm for `weighted_sum`, the more efficient
# `masked_softmax`...) have made this _very_ flaky...
@flaky(max_runs=5)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(
self.param_file,
tolerance=1e-4,
gradients_to_ignore={"_span_start_predictor._module.bias"},
)
@flaky
def test_batch_predictions_are_consistent(self):
# The CNN encoder has problems with this kind of test - it's not properly masked yet, so
# changing the amount of padding in the batch will result in small differences in the
# output of the encoder. Because BiDAF is so deep, these differences get magnified through
# the network and make this test impossible. So, we'll remove the CNN encoder entirely
# from the model for this test. If/when we fix the CNN encoder to work correctly with
# masking, we can change this back to how the other models run this test, with just a
# single line.
# Save some state.
saved_model = self.model
saved_instances = self.instances
# Modify the state, run the test with modified state.
params = Params.from_file(self.param_file)
reader = DatasetReader.from_params(params["dataset_reader"])
reader._token_indexers = {"tokens": reader._token_indexers["tokens"]}
self.instances = list(reader.read(FIXTURES_ROOT / "rc" / "squad.json"))
vocab = Vocabulary.from_instances(self.instances)
for instance in self.instances:
instance.index_fields(vocab)
del params["model"]["text_field_embedder"]["token_embedders"]["token_characters"]
params["model"]["phrase_layer"]["input_size"] = 2
self.model = Model.from_params(vocab=vocab, params=params["model"])
self.ensure_batch_predictions_are_consistent()
# Restore the state.
self.model = saved_model
self.instances = saved_instances
def test_get_best_span(self):
span_begin_probs = torch.FloatTensor([[0.1, 0.3, 0.05, 0.3, 0.25]]).log()
span_end_probs = torch.FloatTensor([[0.65, 0.05, 0.2, 0.05, 0.05]]).log()
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])
# When we were using exclusive span ends, this was an edge case of the dynamic program.
# We're keeping the test to make sure we get it right now, after the switch in inclusive
# span end. The best answer is (1, 1).
span_begin_probs = torch.FloatTensor([[0.4, 0.5, 0.1]]).log()
span_end_probs = torch.FloatTensor([[0.3, 0.6, 0.1]]).log()
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 1]])
# Another instance that used to be an edge case.
span_begin_probs = torch.FloatTensor([[0.8, 0.1, 0.1]]).log()
span_end_probs = torch.FloatTensor([[0.8, 0.1, 0.1]]).log()
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])
span_begin_probs = torch.FloatTensor([[0.1, 0.2, 0.05, 0.3, 0.25]]).log()
span_end_probs = torch.FloatTensor([[0.1, 0.2, 0.5, 0.05, 0.15]]).log()
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 2]])
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the phrase layer wrong - it should be 10 to match
# the embedding + char cnn dimensions.
params["model"]["phrase_layer"]["input_size"] = 12
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
params = Params.from_file(self.param_file)
# Make the modeling layer input_dimension wrong - it should be 40 to match
# 4 * output_dim of the phrase_layer.
params["model"]["phrase_layer"]["input_size"] = 30
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
params = Params.from_file(self.param_file)
# Make the modeling layer input_dimension wrong - it should be 70 to match
# 4 * phrase_layer.output_dim + 3 * modeling_layer.output_dim.
params["model"]["span_end_encoder"]["input_size"] = 50
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
@requires_gpu
class BidirectionalAttentionFlowMixedPrecisionTest(AllenNlpTestCase):
@flaky(max_runs=5)
def test_model_can_train_save_and_load_with_mixed_precision(self):
seed = 0 # This test is very sensitive to the seed.
train_model_from_file(
FIXTURES_ROOT / "rc" / "bidaf" / "experiment.json",
self.TEST_DIR,
overrides=f"{{'trainer.use_amp':true,'trainer.cuda_device':0,'random_seed':{seed},'numpy_seed':{seed},'pytorch_seed':{seed}}}",
)
| allennlp-models-main | tests/rc/models/bidaf_test.py |
import numpy
import torch
from flaky import flaky
from allennlp.common.testing import ModelTestCase
from allennlp.data import Batch
from allennlp_models.rc import BidafEnsemble
from tests import FIXTURES_ROOT
class BidafEnsembleTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "rc" / "bidaf" / "experiment.json", FIXTURES_ROOT / "rc" / "squad.json"
)
self.model.eval()
def test_ensemble_chooses_highest_average_confidence_2(self):
subresults = [
{
"span_start_probs": torch.FloatTensor([[0.9, 0.0, 0.0, 0.0]]),
"span_end_probs": torch.FloatTensor([[0.9, 0.0, 0.0, 0.0]]),
"best_span": torch.LongTensor([[0, 0]]),
"best_span_str": "What",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
{
"span_start_probs": torch.FloatTensor([[0.0, 0.0, 1.0, 0.0]]),
"span_end_probs": torch.FloatTensor([[0.0, 0.0, 1.0, 0.0]]),
"best_span": torch.LongTensor([[2, 2]]),
"best_span_str": "cheese",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
]
from allennlp_models.rc.models.bidaf_ensemble import ensemble
numpy.testing.assert_almost_equal(
ensemble(subresults).data[0].cpu().numpy(), torch.LongTensor([2, 2]).cpu().numpy()
)
def test_ensemble_chooses_highest_average_confidence_3(self):
subresults = [
{
"span_start_probs": torch.FloatTensor([[0.0, 0.0, 0.9, 0.1]]),
"span_end_probs": torch.FloatTensor([[0.0, 0.0, 0.9, 0.1]]),
"best_span": torch.LongTensor([[2, 2]]),
"best_span_str": "cheese",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
{
"span_start_probs": torch.FloatTensor([[0.0, 0.0, 0.9, 0.1]]),
"span_end_probs": torch.FloatTensor([[0.0, 0.0, 0.9, 0.1]]),
"best_span": torch.LongTensor([[2, 2]]),
"best_span_str": "cheese",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
{
"span_start_probs": torch.FloatTensor([[0.9, 0.0, 0.0, 0.0]]),
"span_end_probs": torch.FloatTensor([[0.9, 0.0, 0.0, 0.0]]),
"best_span": torch.LongTensor([[0, 0]]),
"best_span_str": "What",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
]
from allennlp_models.rc.models.bidaf_ensemble import ensemble
numpy.testing.assert_almost_equal(
ensemble(subresults).data[0].cpu().numpy(), torch.LongTensor([2, 2]).numpy()
)
@flaky
def test_forward_pass_runs_correctly(self):
"""
Check to make sure a forward pass on an ensemble of two identical copies of a model yields the same
results as the model itself.
"""
bidaf_ensemble = BidafEnsemble([self.model, self.model])
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
bidaf_output_dict = self.model(**training_tensors)
ensemble_output_dict = bidaf_ensemble(**training_tensors)
metrics = self.model.get_metrics(reset=True)
# We've set up the data such that there's a fake answer that consists of the whole
# paragraph. _Any_ valid prediction for that question should produce an F1 of greater than
# zero, while if we somehow haven't been able to load the evaluation data, or there was an
# error with using the evaluation script, this will fail. This makes sure that we've
# loaded the evaluation data correctly and have hooked things up to the official evaluation
# script.
assert metrics["f1"] > 0
assert torch.equal(ensemble_output_dict["best_span"], bidaf_output_dict["best_span"])
assert ensemble_output_dict["best_span_str"] == bidaf_output_dict["best_span_str"]
| allennlp-models-main | tests/rc/models/bidaf_ensemble_test.py |
from numpy.testing import assert_almost_equal
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.rc.models.utils import get_best_span
class TestRcUtil(AllenNlpTestCase):
def test_get_best_span(self):
span_begin_probs = torch.FloatTensor([[0.1, 0.3, 0.05, 0.3, 0.25]]).log()
span_end_probs = torch.FloatTensor([[0.65, 0.05, 0.2, 0.05, 0.05]]).log()
begin_end_idxs = get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])
# When we were using exclusive span ends, this was an edge case of the dynamic program.
# We're keeping the test to make sure we get it right now, after the switch in inclusive
# span end. The best answer is (1, 1).
span_begin_probs = torch.FloatTensor([[0.4, 0.5, 0.1]]).log()
span_end_probs = torch.FloatTensor([[0.3, 0.6, 0.1]]).log()
begin_end_idxs = get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 1]])
# Another instance that used to be an edge case.
span_begin_probs = torch.FloatTensor([[0.8, 0.1, 0.1]]).log()
span_end_probs = torch.FloatTensor([[0.8, 0.1, 0.1]]).log()
begin_end_idxs = get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])
span_begin_probs = torch.FloatTensor([[0.1, 0.2, 0.05, 0.3, 0.25]]).log()
span_end_probs = torch.FloatTensor([[0.1, 0.2, 0.5, 0.05, 0.15]]).log()
begin_end_idxs = get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 2]])
| allennlp-models-main | tests/rc/models/utils_test.py |
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
from allennlp_models.rc import NumericallyAugmentedQaNet
class NumericallyAugmentedQaNetTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "rc" / "naqanet" / "experiment.json",
FIXTURES_ROOT / "rc" / "drop.json",
)
def test_model_can_train_save_and_load(self):
import torch
torch.autograd.set_detect_anomaly(True)
self.ensure_model_can_train_save_and_load(
self.param_file,
# Due to numerical instability, these scalar tensors might sometimes
# have zero gradients.
gradients_to_ignore={
"_passage_span_end_predictor._linear_layers.1.bias",
"_question_span_end_predictor._linear_layers.1.bias",
},
)
| allennlp-models-main | tests/rc/models/naqanet_test.py |
from flaky import flaky
import numpy
from numpy.testing import assert_almost_equal
from allennlp.common import Params
from allennlp.common.testing import ModelTestCase
from allennlp.data import DatasetReader, Vocabulary
from allennlp.data import Batch
from allennlp.models import Model
from allennlp_models import rc # noqa: F401
from tests import FIXTURES_ROOT
class QaNetTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "rc" / "qanet" / "experiment.json", FIXTURES_ROOT / "rc" / "squad.json"
)
@flaky
def test_forward_pass_runs_correctly(self):
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
metrics = self.model.get_metrics(reset=True)
# We've set up the data such that there's a fake answer that consists of the whole
# paragraph. _Any_ valid prediction for that question should produce an F1 of greater than
# zero, while if we somehow haven't been able to load the evaluation data, or there was an
# error with using the evaluation script, this will fail. This makes sure that we've
# loaded the evaluation data correctly and have hooked things up to the official evaluation
# script.
assert metrics["f1"] > 0
span_start_probs = output_dict["span_start_probs"][0].data.numpy()
span_end_probs = output_dict["span_start_probs"][0].data.numpy()
assert_almost_equal(numpy.sum(span_start_probs, -1), 1, decimal=6)
assert_almost_equal(numpy.sum(span_end_probs, -1), 1, decimal=6)
span_start, span_end = tuple(output_dict["best_span"][0].data.numpy())
assert span_start >= 0
assert span_start <= span_end
assert span_end < self.instances[0].fields["passage"].sequence_length()
assert isinstance(output_dict["best_span_str"][0], str)
@flaky
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-4)
def test_batch_predictions_are_consistent(self):
# The same issue as the bidaf test case.
# The CNN encoder has problems with this kind of test - it's not properly masked yet, so
# changing the amount of padding in the batch will result in small differences in the
# output of the encoder. So, we'll remove the CNN encoder entirely from the model for this test.
# Save some state.
saved_model = self.model
saved_instances = self.instances
# Modify the state, run the test with modified state.
params = Params.from_file(self.param_file)
reader = DatasetReader.from_params(params["dataset_reader"])
reader._token_indexers = {"tokens": reader._token_indexers["tokens"]}
self.instances = list(reader.read(FIXTURES_ROOT / "rc" / "squad.json"))
vocab = Vocabulary.from_instances(self.instances)
for instance in self.instances:
instance.index_fields(vocab)
del params["model"]["text_field_embedder"]["token_embedders"]["token_characters"]
params["model"]["phrase_layer"]["num_convs_per_block"] = 0
params["model"]["modeling_layer"]["num_convs_per_block"] = 0
self.model = Model.from_params(vocab=vocab, params=params["model"])
self.ensure_batch_predictions_are_consistent()
# Restore the state.
self.model = saved_model
self.instances = saved_instances
| allennlp-models-main | tests/rc/models/qanet_test.py |
allennlp-models-main | tests/rc/models/__init__.py |
|
import numpy
from numpy.testing import assert_almost_equal
from allennlp.commands.train import train_model_from_file
from allennlp.common.testing import ModelTestCase, AllenNlpTestCase, requires_gpu
from allennlp.data import Batch
from tests import FIXTURES_ROOT
import pytest
import torch
import allennlp_models.rc
class TransformerQaTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "rc" / "transformer_qa" / "experiment.jsonnet",
FIXTURES_ROOT / "rc" / "squad.json",
)
def test_model_can_train_save_and_load(self):
# Huggingface transformer models come with pooler weights, but this model doesn't use the pooler.
self.ensure_model_can_train_save_and_load(
self.param_file,
gradients_to_ignore={
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.weight",
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.bias",
},
)
def test_forward_pass_runs_correctly(self):
self.model.training = False
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
metrics = self.model.get_metrics(reset=True)
# We've set up the data such that there's a fake answer that consists of the whole
# paragraph. _Any_ valid prediction for that question should produce an F1 of greater than
# zero, while if we somehow haven't been able to load the evaluation data, or there was an
# error with using the evaluation script, this will fail. This makes sure that we've
# loaded the evaluation data correctly and have hooked things up to the official evaluation
# script.
assert metrics["per_instance_f1"] > 0
output_span_start_probs = output_dict["span_start_probs"][0].data.numpy()
output_span_end_probs = output_dict["span_end_probs"][0].data.numpy()
output_best_span_probs = output_dict["best_span_probs"][0].data.numpy()
assert_almost_equal(numpy.sum(output_span_start_probs, -1), 1, decimal=6)
assert_almost_equal(numpy.sum(output_span_end_probs, -1), 1, decimal=6)
assert output_best_span_probs > 0 and numpy.sum(output_best_span_probs, -1) <= 1
span_start_probs = torch.nn.functional.softmax(output_dict["span_start_logits"], dim=-1)[
0
].data.numpy()
span_end_probs = torch.nn.functional.softmax(output_dict["span_end_logits"], dim=-1)[
0
].data.numpy()
best_span_probs = (
torch.nn.functional.softmax(output_dict["best_span_scores"], dim=-1)[0].data.numpy(),
0,
)
assert_almost_equal(numpy.sum(span_start_probs, -1), 1, decimal=6)
assert_almost_equal(numpy.sum(span_end_probs, -1), 1, decimal=6)
assert numpy.sum(best_span_probs, -1) > 0 and numpy.sum(best_span_probs, -1) <= 1
span_start, span_end = tuple(output_dict["best_span"][0].data.numpy())
assert span_start >= -1
assert span_start <= span_end
assert span_end < self.instances[0].fields["question_with_context"].sequence_length()
assert isinstance(output_dict["best_span_str"][0], str)
class TransformerQaV2Test(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "rc" / "transformer_qa" / "experiment_v2.jsonnet",
FIXTURES_ROOT / "rc" / "squad2.json",
)
def test_model_can_train_save_and_load(self):
# Huggingface transformer models come with pooler weights, but this model doesn't use the pooler.
self.ensure_model_can_train_save_and_load(
self.param_file,
gradients_to_ignore={
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.weight",
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.dense.bias",
},
)
@requires_gpu
class TransformerQaMixedPrecisionTest(AllenNlpTestCase):
def test_model_can_train_save_and_load_with_mixed_precision(self):
train_model_from_file(
FIXTURES_ROOT / "rc" / "transformer_qa" / "experiment.jsonnet",
self.TEST_DIR,
overrides="{'trainer.use_amp':true,'trainer.cuda_device':0}",
)
| allennlp-models-main | tests/rc/models/transformer_qa_test.py |
from allennlp.common.testing import ModelTestCase
from allennlp.data import Batch
import torch
import allennlp_models.rc
from tests import FIXTURES_ROOT
class DialogQATest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "rc" / "dialog_qa" / "experiment.json",
FIXTURES_ROOT / "rc" / "dialog_qa" / "quac_sample.json",
seed=42,
)
self.batch = Batch(self.instances)
self.batch.index_instances(self.vocab)
torch.use_deterministic_algorithms(True)
def teardown_method(self):
super().teardown_method()
torch.use_deterministic_algorithms(False)
def test_forward_pass_runs_correctly(self):
training_tensors = self.batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
assert "best_span_str" in output_dict and "loss" in output_dict
assert "followup" in output_dict and "yesno" in output_dict
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(
self.param_file, tolerance=1e-4, gradients_to_ignore={"_matrix_attention._bias"}
)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
| allennlp-models-main | tests/rc/models/dialog_qa_test.py |
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp_models.rc import QuACReader
from tests import FIXTURES_ROOT
class TestQuACReader:
def test_read(self):
params = Params({"num_context_answers": 2})
reader = QuACReader.from_params(params)
instances = reader.read(str(FIXTURES_ROOT / "rc" / "dialog_qa" / "quac_sample.json"))
instances = ensure_list(instances)
assert instances[0].fields["question"].sequence_length() == 6
assert instances[0].fields["yesno_list"].sequence_length() == 6
assert [t.text for t in instances[0].fields["question"].field_list[0].tokens[:3]] == [
"What",
"was",
"the",
]
assert len(instances) == 2
passage_length = len(instances[0].fields["passage"].tokens)
assert [t.text for t in instances[0].fields["passage"].tokens[:3]] == ["DJ", "Kool", "Herc"]
assert [x.label for x in instances[0].fields["yesno_list"].field_list] == [
"x",
"x",
"y",
"x",
"x",
"x",
]
assert [x.label for x in instances[0].fields["followup_list"].field_list] == [
"y",
"m",
"m",
"n",
"m",
"y",
]
assert (
instances[0].fields["p1_answer_marker"].field_list[0].labels == ["O"] * passage_length
)
# Check the previous answer marking here
prev_1_list = ["O"] * passage_length
prev_2_list = ["O"] * passage_length
q0_span_start = instances[0].fields["span_start"].field_list[0].sequence_index
q0_span_end = instances[0].fields["span_end"].field_list[0].sequence_index
prev_1_list[q0_span_start] = "<{0:d}_{1:s}>".format(1, "start")
prev_1_list[q0_span_end] = "<{0:d}_{1:s}>".format(1, "end")
prev_2_list[q0_span_start] = "<{0:d}_{1:s}>".format(2, "start")
prev_2_list[q0_span_end] = "<{0:d}_{1:s}>".format(2, "end")
for passage_index in range(q0_span_start + 1, q0_span_end):
prev_1_list[passage_index] = "<{0:d}_{1:s}>".format(1, "in")
prev_2_list[passage_index] = "<{0:d}_{1:s}>".format(2, "in")
assert instances[0].fields["p1_answer_marker"].field_list[1].labels == prev_1_list
assert instances[0].fields["p2_answer_marker"].field_list[2].labels == prev_2_list
| allennlp-models-main | tests/rc/models/quac_test.py |
from allennlp.interpret.attackers import Hotflip
from allennlp.interpret.attackers.hotflip import DEFAULT_IGNORE_TOKENS
from allennlp.models import load_archive
from allennlp.predictors import Predictor
import allennlp_models.rc
from tests import FIXTURES_ROOT
class TestHotflip:
def test_using_squad_model(self):
inputs = {
"question": "OMG, I heard you coded a test that succeeded on its first attempt, is that true?",
"passage": "Bro, never doubt a coding wizard! I am the king of software, MWAHAHAHA",
}
archive = load_archive(FIXTURES_ROOT / "rc" / "bidaf" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading_comprehension")
hotflipper = Hotflip(predictor)
hotflipper.initialize()
attack = hotflipper.attack_from_json(inputs, "question", "grad_input_2")
print(attack)
assert attack is not None
assert "final" in attack
assert "original" in attack
assert "outputs" in attack
assert len(attack["final"][0]) == len(
attack["original"]
) # hotflip replaces words without removing
instance = predictor._json_to_instance(inputs)
assert instance["question"] != attack["final"][0] # check that the input has changed.
outputs = predictor._model.forward_on_instance(instance)
original_labeled_instance = predictor.predictions_to_labeled_instances(instance, outputs)[0]
original_span_start = original_labeled_instance["span_start"].sequence_index
original_span_end = original_labeled_instance["span_end"].sequence_index
flipped_span_start = attack["outputs"][0]["best_span"][0]
flipped_span_end = attack["outputs"][0]["best_span"][1]
for i, token in enumerate(instance["question"]):
token = str(token)
if token in DEFAULT_IGNORE_TOKENS:
assert token in attack["final"][0] # ignore tokens should not be changed
# HotFlip keeps changing tokens until either the prediction changes or all tokens have
# been changed. If there are tokens in the HotFlip final result that were in the
# original (i.e., not all tokens were flipped), then the prediction should be
# different.
else:
if token == attack["final"][0][i]:
assert (
original_span_start != flipped_span_start
or original_span_end != flipped_span_end
)
| allennlp-models-main | tests/rc/interpret/bidaf_hotflip_test.py |
allennlp-models-main | tests/rc/interpret/__init__.py |
|
allennlp-models-main | tests/rc/modules/__init__.py |
|
import torch
from torch.nn.parallel.data_parallel import DataParallel
from allennlp.common.testing import AllenNlpTestCase, requires_multi_gpu
from allennlp_models.rc.modules.seq2seq_encoders.stacked_self_attention import (
StackedSelfAttentionEncoder,
)
class TestStackedSelfAttention(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
encoder = StackedSelfAttentionEncoder(
input_dim=9,
hidden_dim=12,
projection_dim=6,
feedforward_hidden_dim=5,
num_layers=3,
num_attention_heads=3,
)
assert encoder.get_input_dim() == 9
# hidden_dim + projection_dim
assert encoder.get_output_dim() == 12
def test_stacked_self_attention_can_run_foward(self):
# Correctness checks are elsewhere - this is just stacking
# blocks which are already well tested, so we just check shapes.
encoder = StackedSelfAttentionEncoder(
input_dim=9,
hidden_dim=12,
projection_dim=9,
feedforward_hidden_dim=5,
num_layers=3,
num_attention_heads=3,
)
inputs = torch.randn([3, 5, 9])
encoder_output = encoder(inputs, None)
assert list(encoder_output.size()) == [3, 5, 12]
@requires_multi_gpu
def test_stacked_self_attention_can_run_foward_on_multiple_gpus(self):
encoder = StackedSelfAttentionEncoder(
input_dim=9,
hidden_dim=12,
projection_dim=9,
feedforward_hidden_dim=5,
num_layers=3,
num_attention_heads=3,
).to(0)
parallel_encoder = DataParallel(encoder, device_ids=[0, 1])
inputs = torch.randn([3, 5, 9]).to(0)
encoder_output = parallel_encoder(inputs, None)
assert list(encoder_output.size()) == [3, 5, 12]
| allennlp-models-main | tests/rc/modules/seq2seq_encoders/stacked_self_attention_test.py |
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.params import Params
from allennlp_models.rc import QaNetEncoder
class QaNetEncoderTest(AllenNlpTestCase):
def test_qanet_encoder_can_build_from_params(self):
params = Params(
{
"input_dim": 16,
"hidden_dim": 16,
"attention_projection_dim": 16,
"feedforward_hidden_dim": 16,
"num_blocks": 2,
"num_convs_per_block": 2,
"conv_kernel_size": 3,
"num_attention_heads": 4,
"dropout_prob": 0.1,
"layer_dropout_undecayed_prob": 0.1,
"attention_dropout_prob": 0,
}
)
encoder = QaNetEncoder.from_params(params)
assert isinstance(encoder, QaNetEncoder)
assert encoder.get_input_dim() == 16
assert encoder.get_output_dim() == 16
def test_qanet_encoder_runs_forward(self):
encoder = QaNetEncoder(
input_dim=16,
hidden_dim=16,
attention_projection_dim=16,
feedforward_hidden_dim=16,
num_blocks=2,
num_convs_per_block=2,
conv_kernel_size=3,
num_attention_heads=4,
dropout_prob=0.1,
layer_dropout_undecayed_prob=0.1,
attention_dropout_prob=0.1,
)
inputs = torch.randn(2, 12, 16)
assert list(encoder(inputs).size()) == [2, 12, 16]
| allennlp-models-main | tests/rc/modules/seq2seq_encoders/qanet_encoder_test.py |
allennlp-models-main | tests/rc/modules/seq2seq_encoders/__init__.py |
|
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.params import Params
from allennlp_models.rc.modules.seq2seq_encoders.multi_head_self_attention import (
MultiHeadSelfAttention,
)
class MultiHeadSelfAttentionTest(AllenNlpTestCase):
def test_multi_head_self_attention_can_build_from_params(self):
params = Params({"num_heads": 3, "input_dim": 2, "attention_dim": 3, "values_dim": 6})
encoder = MultiHeadSelfAttention.from_params(params)
assert isinstance(encoder, MultiHeadSelfAttention)
assert encoder.get_input_dim() == 2
assert encoder.get_output_dim() == 2
def test_multi_head_self_attention_runs_forward(self):
attention = MultiHeadSelfAttention(num_heads=3, input_dim=5, attention_dim=6, values_dim=9)
inputs = torch.randn(2, 12, 5)
assert list(attention(inputs).size()) == [2, 12, 5]
def test_multi_head_self_attention_respects_masking(self):
attention = MultiHeadSelfAttention(
num_heads=3, input_dim=5, attention_dim=6, values_dim=9, attention_dropout_prob=0.0
)
tensor = torch.randn(2, 12, 5)
mask = torch.ones([2, 12]).bool()
mask[0, 6:] = False
result = attention(tensor, mask)
# Compute the same function without a mask, but with
# only the unmasked elements - should be the same.
result_without_mask = attention(tensor[:, :6, :])
numpy.testing.assert_almost_equal(
result[0, :6, :].detach().cpu().numpy(),
result_without_mask[0, :, :].detach().cpu().numpy(),
)
| allennlp-models-main | tests/rc/modules/seq2seq_encoders/multi_head_self_attention_test.py |
allennlp-models-main | tests/generation/__init__.py |
|
import tempfile
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import ensure_list
from allennlp_models.generation import Seq2SeqDatasetReader
from tests import FIXTURES_ROOT
class TestSeq2SeqDatasetReader:
def test_default_format(self):
reader = Seq2SeqDatasetReader()
instances = reader.read(str(FIXTURES_ROOT / "generation" / "seq2seq_copy.tsv"))
instances = ensure_list(instances)
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["source_tokens"].tokens] == [
"@start@",
"this",
"is",
"a",
"sentence",
"@end@",
]
assert [t.text for t in fields["target_tokens"].tokens] == [
"@start@",
"this",
"is",
"a",
"sentence",
"@end@",
]
fields = instances[1].fields
assert [t.text for t in fields["source_tokens"].tokens] == [
"@start@",
"this",
"is",
"another",
"@end@",
]
assert [t.text for t in fields["target_tokens"].tokens] == [
"@start@",
"this",
"is",
"another",
"@end@",
]
fields = instances[2].fields
assert [t.text for t in fields["source_tokens"].tokens] == [
"@start@",
"all",
"these",
"sentences",
"should",
"get",
"copied",
"@end@",
]
assert [t.text for t in fields["target_tokens"].tokens] == [
"@start@",
"all",
"these",
"sentences",
"should",
"get",
"copied",
"@end@",
]
def test_source_add_start_token(self):
reader = Seq2SeqDatasetReader(source_add_start_token=False)
instances = reader.read(str(FIXTURES_ROOT / "generation" / "seq2seq_copy.tsv"))
instances = ensure_list(instances)
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["source_tokens"].tokens] == [
"this",
"is",
"a",
"sentence",
"@end@",
]
assert [t.text for t in fields["target_tokens"].tokens] == [
"@start@",
"this",
"is",
"a",
"sentence",
"@end@",
]
def test_max_length_truncation(self):
reader = Seq2SeqDatasetReader(source_max_tokens=3, target_max_tokens=5)
instances = reader.read(str(FIXTURES_ROOT / "generation" / "seq2seq_copy.tsv"))
instances = ensure_list(instances)
assert reader._source_max_exceeded == 2
assert reader._target_max_exceeded == 1
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["source_tokens"].tokens] == [
"@start@",
"this",
"is",
"a",
"@end@",
]
assert [t.text for t in fields["target_tokens"].tokens] == [
"@start@",
"this",
"is",
"a",
"sentence",
"@end@",
]
def test_delimiter_parameter(self):
reader = Seq2SeqDatasetReader(delimiter=",")
instances = reader.read(str(FIXTURES_ROOT / "generation" / "seq2seq_copy.csv"))
instances = ensure_list(instances)
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["source_tokens"].tokens] == [
"@start@",
"this",
"is",
"a",
"sentence",
"@end@",
]
assert [t.text for t in fields["target_tokens"].tokens] == [
"@start@",
"this",
"is",
"a",
"sentence",
"@end@",
]
fields = instances[2].fields
assert [t.text for t in fields["source_tokens"].tokens] == [
"@start@",
"all",
"these",
"sentences",
"should",
"get",
"copied",
"@end@",
]
assert [t.text for t in fields["target_tokens"].tokens] == [
"@start@",
"all",
"these",
"sentences",
"should",
"get",
"copied",
"@end@",
]
@pytest.mark.parametrize("line", (("a\n"), ("a\tb\tc\n")))
def test_invalid_line_format(self, line):
with tempfile.NamedTemporaryFile("w") as fp_tmp:
fp_tmp.write(line)
fp_tmp.flush()
reader = Seq2SeqDatasetReader()
with pytest.raises(ConfigurationError):
list(reader.read(fp_tmp.name))
@pytest.mark.parametrize("line", (("a b\tc d\n"), ('"a b"\t"c d"\n')))
def test_correct_quote_handling(self, line):
with tempfile.NamedTemporaryFile("w") as fp_tmp:
fp_tmp.write(line)
fp_tmp.flush()
reader = Seq2SeqDatasetReader()
instances = reader.read(fp_tmp.name)
instances = ensure_list(instances)
assert len(instances) == 1
fields = instances[0].fields
assert [t.text for t in fields["source_tokens"].tokens] == [
"@start@",
"a",
"b",
"@end@",
]
assert [t.text for t in fields["target_tokens"].tokens] == [
"@start@",
"c",
"d",
"@end@",
]
def test_bad_start_or_end_symbol(self):
with pytest.raises(ValueError, match=r"Bad start or end symbol \('BAD SYMBOL"):
Seq2SeqDatasetReader(start_symbol="BAD SYMBOL")
| allennlp-models-main | tests/generation/dataset_readers/seq2seq_test.py |
allennlp-models-main | tests/generation/dataset_readers/__init__.py |
|
import numpy as np
import torch
from allennlp.common import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import ensure_list
from allennlp.data import DatasetReader
from allennlp.data.fields import TensorField
from allennlp.data.vocabulary import Vocabulary, DEFAULT_OOV_TOKEN
from allennlp_models.generation.dataset_readers import CopyNetDatasetReader
from tests import FIXTURES_ROOT
class TestCopyNetReader(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
params = Params.from_file(FIXTURES_ROOT / "generation" / "copynet" / "experiment.json")
self.reader: CopyNetDatasetReader = DatasetReader.from_params(params["dataset_reader"])
instances = self.reader.read(
FIXTURES_ROOT / "generation" / "copynet" / "data" / "copyover.tsv"
)
self.instances = ensure_list(instances)
self.vocab = Vocabulary.from_params(params=params["vocabulary"], instances=self.instances)
def test_vocab_namespaces(self):
assert self.vocab.get_vocab_size("target_tokens") > 5
def test_instances(self):
assert len(self.instances) == 2
assert set(self.instances[0].fields.keys()) == set(
(
"source_tokens",
"source_token_ids",
"target_tokens",
"target_token_ids",
"source_to_target",
"metadata",
)
)
def test_tokens(self):
fields = self.instances[0].fields
assert [t.text for t in fields["source_tokens"].tokens] == [
"these",
"tokens",
"should",
"be",
"copied",
"over",
":",
"hello",
"world",
]
assert fields["metadata"]["source_tokens"] == [
"these",
"tokens",
"should",
"be",
"copied",
"over",
":",
"hello",
"world",
]
assert [t.text for t in fields["target_tokens"].tokens] == [
"@start@",
"the",
"tokens",
'"',
"hello",
"world",
'"',
"were",
"copied",
"@end@",
]
assert fields["metadata"]["target_tokens"] == [
"the",
"tokens",
'"',
"hello",
"world",
'"',
"were",
"copied",
]
def test_source_and_target_token_ids(self):
source_token_ids = self.instances[0].fields["source_token_ids"].array
target_token_ids = self.instances[0].fields["target_token_ids"].array
assert list(source_token_ids) == [
0, # these
1, # tokens
2, # should
3, # be
4, # copied
5, # over
6, # :
7, # hello
8, # world
]
assert list(target_token_ids) == [
9, # @start@
10, # the
1, # tokens
11, # "
7, # hello
8, # world
11, # "
12, # were
4, # copied
13, # @end@
]
def test_source_to_target(self):
source_to_target_field = self.instances[0].fields["source_to_target"]
source_to_target_field.index(self.vocab)
tensor = source_to_target_field.as_tensor(source_to_target_field.get_padding_lengths())
check = np.array(
[
self.vocab.get_token_index("these", "target_tokens"),
self.vocab.get_token_index("tokens", "target_tokens"),
self.vocab.get_token_index("should", "target_tokens"),
self.vocab.get_token_index("be", "target_tokens"),
self.vocab.get_token_index("copied", "target_tokens"),
self.vocab.get_token_index("over", "target_tokens"),
self.vocab.get_token_index(":", "target_tokens"),
self.vocab.get_token_index("hello", "target_tokens"),
self.vocab.get_token_index("world", "target_tokens"),
]
)
np.testing.assert_equal(tensor.numpy(), check)
assert tensor[1].item() != self.vocab.get_token_index(DEFAULT_OOV_TOKEN, "target_tokens")
def test_text_to_instance_with_weight(self):
instance = self.reader.text_to_instance("Hello,", "World!", weight=0.5)
assert "weight" in instance.fields
assert isinstance(instance.fields["weight"], TensorField)
assert instance.fields["weight"].tensor.dtype == torch.float
| allennlp-models-main | tests/generation/dataset_readers/copynet_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp_models.generation.predictors import Seq2SeqPredictor
from tests import FIXTURES_ROOT
class TestSeq2SeqPredictor(AllenNlpTestCase):
def test_uses_named_inputs_with_simple_seq2seq(self):
inputs = {"source": "What kind of test succeeded on its first attempt?"}
archive = load_archive(
FIXTURES_ROOT / "generation" / "simple" / "serialization" / "model.tar.gz"
)
predictor: Seq2SeqPredictor = Predictor.from_archive(archive, "seq2seq")
result = predictor.predict_json(inputs)
predicted_tokens = result.get("predicted_tokens")
assert predicted_tokens is not None
assert isinstance(predicted_tokens, list)
for predicted_token in predicted_tokens:
assert all(isinstance(x, str) for x in predicted_token)
def test_uses_named_inputs_with_composed_seq2seq(self):
inputs = {"source": "What kind of test succeeded on its first attempt?"}
archive = load_archive(
FIXTURES_ROOT / "generation" / "composed" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "seq2seq")
result = predictor.predict_json(inputs)
predicted_tokens = result.get("predicted_tokens")
assert predicted_tokens is not None
assert isinstance(predicted_tokens, list)
assert all(isinstance(x, str) for x in predicted_tokens)
def test_copynet_predictions(self):
archive = load_archive(
FIXTURES_ROOT / "generation" / "copynet" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "seq2seq")
model = predictor._model
end_token = model.vocab.get_token_from_index(model._end_index, model._target_namespace)
output_dict = predictor.predict("these tokens should be copied over : hello world")
assert len(output_dict["predictions"]) == model._beam_search.beam_size
assert len(output_dict["predicted_tokens"]) == model._beam_search.beam_size
for predicted_tokens in output_dict["predicted_tokens"]:
assert all(isinstance(x, str) for x in predicted_tokens)
assert end_token not in predicted_tokens
| allennlp-models-main | tests/generation/predictors/seq2seq_test.py |
allennlp-models-main | tests/generation/predictors/__init__.py |
|
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
from allennlp_models import generation # noqa: F401
class T5Test(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "generation" / "t5" / "experiment.jsonnet",
FIXTURES_ROOT / "generation" / "bart" / "data" / "url_lists" / "all_train.txt",
)
def test_model_can_train_save_load_predict(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-2)
| allennlp-models-main | tests/generation/models/t5_test.py |
allennlp-models-main | tests/generation/models/__init__.py |
|
import numpy as np
import pytest
import torch
from _pytest.mark import param
from allennlp.commands.train import train_model_from_file
from allennlp.common import Params
from allennlp.common.testing import ModelTestCase, requires_gpu
from allennlp.data import Batch, DatasetReader
from allennlp.models import Model
from allennlp_models.generation import CopyNetDatasetReader, CopyNetSeq2Seq # noqa: F401
from scipy.special import logsumexp
from tests import FIXTURES_ROOT
class CopyNetTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "generation" / "copynet" / "experiment.json",
FIXTURES_ROOT / "generation" / "copynet" / "data" / "copyover.tsv",
)
def test_backwards_compatibility_with_beam_search_args(self):
# These values are arbitrary but should be different than the config.
beam_size, max_decoding_steps = 100, 1000
params = Params.from_file(self.param_file)
params["model"]["beam_size"] = beam_size
params["model"]["max_decoding_steps"] = max_decoding_steps
# The test harness is set up to treat DeprecationWarning's like errors, so this needs to
# be called within the pytest context manager.
with pytest.raises(DeprecationWarning):
model = Model.from_params(vocab=self.vocab, params=params.get("model"))
assert model._beam_search.beam_size == beam_size
assert model._beam_search.max_steps == max_decoding_steps
def test_model_can_train_save_load(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-2)
@requires_gpu
def test_model_can_train_with_amp(self):
train_model_from_file(
self.param_file,
self.TEST_DIR,
overrides="{'trainer.use_amp':true,'trainer.cuda_device':0}",
)
def test_model_can_train_with_scheduled_sampling_ratio(self):
train_model_from_file(
self.param_file,
self.TEST_DIR,
overrides="{'model.scheduled_sampling_ratio':0.5}",
)
def test_vocab(self):
vocab = self.model.vocab
assert vocab.get_vocab_size(self.model._target_namespace) == 8
assert "hello" not in vocab._token_to_index[self.model._target_namespace]
assert "world" not in vocab._token_to_index[self.model._target_namespace]
assert "@COPY@" in vocab._token_to_index["target_tokens"]
def test_train_instances(self):
inputs = self.instances[0].as_tensor_dict()
source_tokens = inputs["source_tokens"]["tokens"]
target_tokens = inputs["target_tokens"]["tokens"]
assert list(source_tokens["tokens"].size()) == [9]
assert list(target_tokens["tokens"].size()) == [10]
assert target_tokens["tokens"][0] == self.model._start_index
assert target_tokens["tokens"][4] == self.model._oov_index
assert target_tokens["tokens"][5] == self.model._oov_index
assert target_tokens["tokens"][-1] == self.model._end_index
def test_get_ll_contrib(self):
# batch_size = 3, trimmed_input_len = 3
#
# In the first instance, the contribution to the likelihood should
# come from both the generation scores and the copy scores, since the
# token is in the source sentence and the target vocabulary.
# In the second instance, the contribution should come only from the
# generation scores, since the token is not in the source sentence.
# In the third instance, the contribution should come only from the copy scores,
# since the token is in the source sequence but is not in the target vocabulary.
vocab = self.model.vocab
generation_scores = torch.tensor(
[
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], # these numbers are arbitrary.
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8],
[0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2],
]
)
# shape: (batch_size, target_vocab_size)
copy_scores = torch.tensor(
[[1.0, 2.0, 1.0], [1.0, 2.0, 3.0], [2.0, 2.0, 3.0]] # these numbers are arbitrary.
)
# shape: (batch_size, trimmed_input_len)
target_tokens = torch.tensor(
[
vocab.get_token_index("tokens", self.model._target_namespace),
vocab.get_token_index("the", self.model._target_namespace),
self.model._oov_index,
]
)
# shape: (batch_size,)
target_to_source = torch.tensor([[0, 1, 0], [0, 0, 0], [1, 0, 1]])
# shape: (batch_size, trimmed_input_len)
copy_mask = torch.tensor([[True, True, False], [True, False, False], [True, True, True]])
# shape: (batch_size, trimmed_input_len)
# This is what the log likelihood result should look like.
ll_check = np.array(
[
# First instance.
logsumexp(np.array([generation_scores[0, target_tokens[0].item()].item(), 2.0]))
- logsumexp(np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0, 2.0])),
# Second instance.
generation_scores[1, target_tokens[1].item()].item()
- logsumexp(np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])),
# Third instance.
logsumexp(np.array([2.0, 3.0]))
- logsumexp(np.array([0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 2.0, 2.0, 3.0])),
]
)
# This is what the selective_weights result should look like.
selective_weights_check = np.stack(
[
np.array([0.0, 1.0, 0.0]),
np.array([0.0, 0.0, 0.0]),
np.exp([2.0, float("-inf"), 3.0]) / (np.exp(2.0) + np.exp(3.0)),
]
)
generation_scores_mask = generation_scores.new_full(
generation_scores.size(), True, dtype=torch.bool
)
ll_actual, selective_weights_actual, _ = self.model._get_ll_contrib(
generation_scores,
generation_scores_mask,
copy_scores,
target_tokens,
target_to_source,
copy_mask,
)
np.testing.assert_almost_equal(ll_actual.data.numpy(), ll_check, decimal=6)
np.testing.assert_almost_equal(
selective_weights_actual.data.numpy(), selective_weights_check, decimal=6
)
def test_get_input_and_selective_weights(self):
target_vocab_size = self.model._target_vocab_size
oov_index = self.model._oov_index
copy_index = self.model._copy_index
# shape: (group_size,)
last_predictions = torch.tensor(
[5, 6, target_vocab_size + 1] # only generated. # copied AND generated.
) # only copied.
# shape: (group_size, source_sequence_length)
source_to_target = torch.tensor(
[[6, oov_index, oov_index], [6, oov_index, 6], [5, oov_index, oov_index]]
)
# shape: (group_size, source_sequence_length)
source_token_ids = torch.tensor(
[
[0, 1, 2],
[0, 1, 0],
[0, 1, 1],
] # no duplicates. # first and last source tokens match.
) # middle and last source tokens match.
# shape: (group_size, source_sequence_length)
copy_probs = torch.tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])
state = {
"source_to_target": source_to_target,
"source_token_ids": source_token_ids,
"copy_log_probs": (copy_probs + 1e-45).log(),
}
input_choices, selective_weights = self.model._get_input_and_selective_weights(
last_predictions, state
)
assert list(input_choices.size()) == [3]
assert list(selective_weights.size()) == [3, 3]
# shape: (group_size,)
input_choices_check = np.array([5, 6, copy_index])
np.testing.assert_equal(input_choices.numpy(), input_choices_check)
# shape: (group_size, source_sequence_length)
selective_weights_check = np.array([[0.0, 0.0, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]])
np.testing.assert_equal(selective_weights.numpy(), selective_weights_check)
def test_gather_final_log_probs(self):
target_vocab_size = self.model._target_vocab_size
assert target_vocab_size == 8
oov_index = self.model._oov_index
assert oov_index not in [5, 6]
# shape: (group_size, source_sequence_length)
source_to_target = torch.tensor([[6, oov_index, oov_index], [oov_index, 5, 5]])
# shape: (group_size, source_sequence_length)
source_token_ids = torch.tensor([[0, 1, 1], [0, 1, 1]])
# shape: (group_size, target_vocab_size)
generation_probs = torch.tensor([[0.1] * target_vocab_size, [0.1] * target_vocab_size])
# shape: (group_size, source_sequence_length)
copy_probs = torch.tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])
state = {"source_to_target": source_to_target, "source_token_ids": source_token_ids}
final_log_probs = self.model._gather_final_log_probs(
generation_probs.log(), copy_probs.log(), state
)
final_probs = final_log_probs.exp()
assert list(final_probs.size()) == [2, target_vocab_size + 3]
final_probs_check = np.array(
[
# First copy token matches a source token. So first copy score is added to
# corresponding generation score.
# Second and third copy tokens match, so third copy score added to second
# copy score.
[
0.1,
0.1,
0.1,
0.1,
0.1,
0.1,
0.2,
0.1, # modified generation scores
0.0,
0.2,
0.0,
], # modified copy scores
# Second and third copy tokens match the same token in target vocab.
[
0.1,
0.1,
0.1,
0.1,
0.1,
0.3,
0.1,
0.1, # modified generation scores
0.1,
0.0,
0.0,
], # modified copy scores
]
)
np.testing.assert_array_almost_equal(final_probs.numpy(), final_probs_check)
def test_gather_extended_gold_tokens(self):
vocab_size = self.model._target_vocab_size
end_index = self.model._end_index
pad_index = self.model._pad_index
oov_index = self.model._oov_index
tok_index = 6 # some other arbitrary token
assert tok_index not in [end_index, pad_index, oov_index]
# first sentence tokens:
# 1: oov but not copied
# 2: not oov and not copied
# 3: not copied
# 4: not copied
# second sentence tokens:
# 1: not oov and copied
# 2: oov and copied
# 3: not copied
# 4: not copied
# shape: (batch_size, target_sequence_length)
target_tokens = torch.tensor(
[
[oov_index, tok_index, end_index, pad_index],
[tok_index, oov_index, tok_index, end_index],
]
)
# shape: (batch_size, source_sequence_length)
source_token_ids = torch.tensor([[0, 1, 2, 3], [0, 1, 0, 2]])
# shape: (batch_size, target_sequence_length)
target_token_ids = torch.tensor([[4, 5, 6, 7], [1, 0, 3, 4]])
# shape: (batch_size, target_sequence_length)
result = self.model._gather_extended_gold_tokens(
target_tokens, source_token_ids, target_token_ids
)
# shape: (batch_size, target_sequence_length)
check = np.array(
[
[oov_index, tok_index, end_index, pad_index],
[tok_index, vocab_size, tok_index, end_index],
]
)
np.testing.assert_array_equal(result.numpy(), check)
def test_get_predicted_tokens(self):
tok_index = self.vocab.get_token_index("tokens", self.model._target_namespace)
end_index = self.model._end_index
vocab_size = self.model._target_vocab_size
# shape: (batch_size, beam_size, max_predicted_length)
predicted_indices = np.array(
[
[
[tok_index, vocab_size, vocab_size + 1, end_index],
[tok_index, tok_index, tok_index, tok_index],
],
[
[tok_index, tok_index, tok_index, end_index],
[tok_index, vocab_size + 1, end_index, end_index],
],
]
)
batch_metadata = [
{"source_tokens": ["hello", "world"]},
{"source_tokens": ["copynet", "is", "cool"]},
]
predicted_tokens = self.model._get_predicted_tokens(predicted_indices, batch_metadata)
assert len(predicted_tokens) == 2
assert len(predicted_tokens[0]) == 2
assert len(predicted_tokens[1]) == 2
assert predicted_tokens[0][0] == ["tokens", "hello", "world"]
assert predicted_tokens[0][1] == ["tokens", "tokens", "tokens", "tokens"]
predicted_tokens = self.model._get_predicted_tokens(
predicted_indices, batch_metadata, n_best=1
)
assert len(predicted_tokens) == 2
assert predicted_tokens[0] == ["tokens", "hello", "world"]
assert predicted_tokens[1] == ["tokens", "tokens", "tokens"]
def test_forward_with_weights(self):
params = Params.from_file(self.param_file)
reader: CopyNetDatasetReader = DatasetReader.from_params(
params["dataset_reader"], serialization_dir=self.TEST_DIR
)
instances = [
reader.text_to_instance("hello hello world", "hello world", weight=0.9),
reader.text_to_instance("hello world", "hello world world", weight=0.5),
]
for instance in instances:
reader.apply_token_indexers(instance)
batch = Batch(instances)
batch.index_instances(self.model.vocab)
inputs = batch.as_tensor_dict()
assert "weight" in inputs
_ = self.model(**inputs)
class CopyNetTransformerTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "generation" / "copynet" / "experiment_transformer.jsonnet",
FIXTURES_ROOT / "generation" / "copynet" / "data" / "copyover.tsv",
)
def test_model_can_train_save_load_predict(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-2)
| allennlp-models-main | tests/generation/models/copynet_test.py |
import json
import numpy
import pytest
import torch
from allennlp.models import Model
from allennlp.common import Params
from allennlp.commands.train import train_model_from_file
from allennlp.common.testing import ModelTestCase, requires_gpu
from allennlp.nn.beam_search import BeamSearch
from allennlp.nn.util import sequence_cross_entropy_with_logits
from tests import FIXTURES_ROOT
class SimpleSeq2SeqTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "generation" / "simple" / "experiment.json",
FIXTURES_ROOT / "generation" / "seq2seq_copy.tsv",
)
def test_backwards_compatibility_with_beam_search_args(self):
# These values are arbitrary but should be different than the config.
beam_size, max_decoding_steps = 100, 1000
params = Params.from_file(self.param_file)
params["model"]["beam_size"] = beam_size
params["model"]["max_decoding_steps"] = max_decoding_steps
# The test harness is set up to treat DeprecationWarning's like errors, so this needs to
# be called within the pytest context manager.
with pytest.raises(DeprecationWarning):
model = Model.from_params(vocab=self.vocab, params=params.get("model"))
assert model._beam_search.beam_size == beam_size
assert model._beam_search.max_steps == max_decoding_steps
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-2)
@requires_gpu
def test_model_can_train_with_amp(self):
train_model_from_file(
self.param_file,
self.TEST_DIR,
overrides="{'trainer.use_amp':true,'trainer.cuda_device':0}",
)
def test_model_can_train_with_scheduled_sampling_ratio(self):
train_model_from_file(
self.param_file,
self.TEST_DIR,
overrides="{'model.scheduled_sampling_ratio':0.5}",
)
def test_bidirectional_model_can_train_save_and_load(self):
param_overrides = json.dumps({"model.encoder.bidirectional": True})
self.ensure_model_can_train_save_and_load(
self.param_file, tolerance=1e-2, overrides=param_overrides
)
def test_multi_layer_decoder_model_can_train_save_and_load(self):
param_overrides = json.dumps({"model.target_decoder_layers": 2})
self.ensure_model_can_train_save_and_load(
self.param_file, tolerance=1e-2, overrides=param_overrides
)
def test_no_attention_model_can_train_save_and_load(self):
param_overrides = json.dumps({"model.attention": None})
self.ensure_model_can_train_save_and_load(
self.param_file, tolerance=1e-2, overrides=param_overrides
)
def test_greedy_model_can_train_save_and_load(self):
param_overrides = json.dumps({"model.beam_search.beam_size": 1})
self.ensure_model_can_train_save_and_load(
self.param_file, tolerance=1e-2, overrides=param_overrides
)
def test_loss_is_computed_correctly(self):
batch_size = 5
num_decoding_steps = 5
num_classes = 10
sample_logits = torch.randn(batch_size, num_decoding_steps - 1, num_classes)
sample_targets = torch.from_numpy(
numpy.random.randint(0, num_classes, (batch_size, num_decoding_steps))
)
# Mask should be either 0 or 1
sample_mask = torch.from_numpy(
numpy.random.randint(0, 2, (batch_size, num_decoding_steps))
).bool()
expected_loss = sequence_cross_entropy_with_logits(
sample_logits, sample_targets[:, 1:].contiguous(), sample_mask[:, 1:].contiguous()
)
actual_loss = self.model._get_loss(sample_logits, sample_targets, sample_mask)
assert numpy.equal(expected_loss.data.numpy(), actual_loss.data.numpy())
def test_decode_runs_correctly(self):
self.model.eval()
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
# `make_output_human_readable` should have added a `predicted_tokens` field to
# `output_dict`. Checking if it's there.
assert "predicted_tokens" in decode_output_dict
# The output of model.make_output_human_readable should still have 'predicted_tokens' after
# using the beam search. To force the beam search, we just remove `target_tokens` from the
# input tensors.
del training_tensors["target_tokens"]
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
assert "predicted_tokens" in decode_output_dict
def test_greedy_decode_matches_beam_search(self):
beam_search = BeamSearch(
self.model._end_index, max_steps=self.model._beam_search.max_steps, beam_size=1
)
training_tensors = self.dataset.as_tensor_dict()
# Get greedy predictions from _forward_loop method of model.
state = self.model._encode(training_tensors["source_tokens"])
state = self.model._init_decoder_state(state)
output_dict_greedy = self.model._forward_loop(state)
output_dict_greedy = self.model.make_output_human_readable(output_dict_greedy)
# Get greedy predictions from beam search (beam size = 1).
state = self.model._encode(training_tensors["source_tokens"])
state = self.model._init_decoder_state(state)
batch_size = state["source_mask"].size()[0]
start_predictions = state["source_mask"].new_full(
(batch_size,), fill_value=self.model._start_index, dtype=torch.long
)
all_top_k_predictions, _ = beam_search.search(
start_predictions, state, self.model.take_step
)
output_dict_beam_search = {"predictions": all_top_k_predictions}
output_dict_beam_search = self.model.make_output_human_readable(output_dict_beam_search)
# Predictions from model._forward_loop and beam_search should match.
assert output_dict_greedy["predicted_tokens"] == output_dict_beam_search["predicted_tokens"]
| allennlp-models-main | tests/generation/models/simple_seq2seq_test.py |
import pytest
from allennlp.common import Params
from allennlp.common.testing import ModelTestCase
from allennlp.models import Model
from allennlp_models import generation # noqa: F401
from tests import FIXTURES_ROOT
class BartTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "generation" / "bart" / "experiment.jsonnet",
FIXTURES_ROOT / "generation" / "bart" / "data" / "url_lists" / "all_train.txt",
)
def test_backwards_compatibility_with_beam_search_args(self):
# These values are arbitrary but should be different than the config.
beam_size, max_decoding_steps = 100, 1000
params = Params.from_file(self.param_file)
params["model"]["beam_size"] = beam_size
params["model"]["max_decoding_steps"] = max_decoding_steps
# The test harness is set up to treat DeprecationWarning's like errors, so this needs to
# be called within the pytest context manager.
with pytest.raises(DeprecationWarning):
model = Model.from_params(vocab=self.vocab, params=params.get("model"))
assert model._beam_search.beam_size == beam_size
assert model._beam_search.max_steps == max_decoding_steps
def test_model_can_train_save_load_predict(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-2)
| allennlp-models-main | tests/generation/models/bart_test.py |
import json
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
class ComposedSeq2SeqTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "generation" / "composed" / "experiment.json",
FIXTURES_ROOT / "generation" / "seq2seq_copy.tsv",
)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-2)
def test_bidirectional_model_can_train_save_and_load(self):
param_overrides = json.dumps(
{
"model.encoder.bidirectional": True,
"model.decoder.decoder_net.decoding_dim": 20,
"model.decoder.decoder_net.bidirectional_input": True,
}
)
self.ensure_model_can_train_save_and_load(
self.param_file, tolerance=1e-2, overrides=param_overrides
)
def test_no_attention_model_can_train_save_and_load(self):
param_overrides = json.dumps({"model.decoder.decoder_net.attention": None})
self.ensure_model_can_train_save_and_load(
self.param_file, tolerance=1e-2, overrides=param_overrides
)
def test_greedy_model_can_train_save_and_load(self):
param_overrides = json.dumps({"model.decoder.beam_search.beam_size": 1})
self.ensure_model_can_train_save_and_load(
self.param_file, tolerance=1e-2, overrides=param_overrides
)
def test_decode_runs_correctly(self):
self.model.eval()
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
# `make_output_human_readable` should have added a `predicted_tokens` field to
# `output_dict`. Checking if it's there.
assert "predicted_tokens" in decode_output_dict
# The output of model.make_output_human_readable should still have 'predicted_tokens' after
# using the beam search. To force the beam search, we just remove `target_tokens` from the
# input tensors.
del training_tensors["target_tokens"]
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
assert "predicted_tokens" in decode_output_dict
| allennlp-models-main | tests/generation/models/composed_seq2seq_test.py |
allennlp-models-main | tests/generation/modules/__init__.py |
|
allennlp-models-main | tests/generation/modules/seq_decoders/__init__.py |
|
from typing import Any, Iterable, Dict
import pytest
import torch
from allennlp.common import Lazy, Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import END_SYMBOL, prepare_environment, START_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import Embedding
from allennlp.nn.beam_search import BeamSearch
from allennlp.training.metrics import BLEU, Metric
from allennlp_models.generation import AutoRegressiveSeqDecoder
from allennlp_models.generation import StackedSelfAttentionDecoderNet
def create_vocab_and_decoder_net(decoder_inout_dim):
vocab = Vocabulary()
vocab.add_tokens_to_namespace(["A", "B", START_SYMBOL, END_SYMBOL])
decoder_net = StackedSelfAttentionDecoderNet(
decoding_dim=decoder_inout_dim,
target_embedding_dim=decoder_inout_dim,
feedforward_hidden_dim=20,
num_layers=2,
num_attention_heads=4,
)
return vocab, decoder_net
class DummyMetric(Metric):
def __init__(self) -> None:
self.reset()
@staticmethod
def f1(predicted: Iterable[Any], expected: Iterable[Any]) -> float:
expected = frozenset(expected)
predicted = frozenset(predicted)
if len(predicted) <= 0 and len(expected) <= 0:
return 1.0
if len(predicted) <= 0 or len(expected) <= 0:
return 0.0
true_positive_count = len(predicted & expected)
p = true_positive_count / len(predicted)
r = true_positive_count / len(expected)
return (2 * p * r) / (p + r)
def __call__(self, best_span_strings, answer_strings):
for best_span_string, answer_string in zip(best_span_strings, answer_strings):
self._total_em += best_span_string == answer_string
self._total_f1 += self.f1(best_span_string, answer_string)
self._count += 1
def get_metric(self, reset: bool = False) -> Dict[str, float]:
exact_match = self._total_em / self._count if self._count > 0 else 0
f1_score = self._total_f1 / self._count if self._count > 0 else 0
if reset:
self.reset()
return {"em": exact_match, "f1": f1_score}
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return f"DummyMetric(em={self._total_em}, f1={self._total_f1})"
class TestAutoRegressiveSeqDecoder(AllenNlpTestCase):
def test_auto_regressive_seq_decoder_init(self):
decoder_inout_dim = 4
vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)
AutoRegressiveSeqDecoder(
vocab,
decoder_net,
Embedding(num_embeddings=vocab.get_vocab_size(), embedding_dim=decoder_inout_dim),
beam_search=Lazy(BeamSearch, constructor_extras={"max_steps": 10}),
)
with pytest.raises(ConfigurationError):
AutoRegressiveSeqDecoder(
vocab,
decoder_net,
Embedding(
num_embeddings=vocab.get_vocab_size(), embedding_dim=decoder_inout_dim + 1
),
beam_search=Lazy(BeamSearch, constructor_extras={"max_steps": 10}),
)
def test_auto_regressive_seq_decoder_forward(self):
batch_size, time_steps, decoder_inout_dim = 2, 3, 4
vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)
auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
vocab,
decoder_net,
Embedding(num_embeddings=vocab.get_vocab_size(), embedding_dim=decoder_inout_dim),
beam_search=Lazy(BeamSearch, constructor_extras={"max_steps": 10, "beam_size": 4}),
)
encoded_state = torch.rand(batch_size, time_steps, decoder_inout_dim)
source_mask = torch.ones(batch_size, time_steps).bool()
target_tokens = {"tokens": {"tokens": torch.ones(batch_size, time_steps).long()}}
source_mask[0, 1:] = False
encoder_out = {"source_mask": source_mask, "encoder_outputs": encoded_state}
assert auto_regressive_seq_decoder.forward(encoder_out) == {}
loss = auto_regressive_seq_decoder.forward(encoder_out, target_tokens)["loss"]
assert loss.shape == torch.Size([]) and loss.requires_grad
auto_regressive_seq_decoder.eval()
assert "predictions" in auto_regressive_seq_decoder.forward(encoder_out)
def test_auto_regressive_seq_decoder_indices_to_tokens(self):
decoder_inout_dim = 4
vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)
auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
vocab,
decoder_net,
Embedding(num_embeddings=vocab.get_vocab_size(), embedding_dim=decoder_inout_dim),
beam_search=Lazy(BeamSearch, constructor_extras={"max_steps": 10}),
)
predictions = torch.tensor([[3, 2, 5, 0, 0], [2, 2, 3, 5, 0]])
tokens_ground_truth = [["B", "A"], ["A", "A", "B"]]
predicted_tokens = auto_regressive_seq_decoder.indices_to_tokens(predictions.numpy())
assert predicted_tokens == tokens_ground_truth
def test_auto_regressive_seq_decoder_post_process(self):
decoder_inout_dim = 4
vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)
auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
vocab,
decoder_net,
Embedding(num_embeddings=vocab.get_vocab_size(), embedding_dim=decoder_inout_dim),
beam_search=Lazy(BeamSearch, constructor_extras={"max_steps": 10}),
)
predictions = torch.tensor([[3, 2, 5, 0, 0], [2, 2, 3, 5, 0]])
tokens_ground_truth = [["B", "A"], ["A", "A", "B"]]
output_dict = {"predictions": predictions}
predicted_tokens = auto_regressive_seq_decoder.post_process(output_dict)["predicted_tokens"]
assert predicted_tokens == tokens_ground_truth
def test_auto_regressive_seq_decoder_tensor_and_token_based_metric(self):
# set all seeds to a fixed value (torch, numpy, etc.).
# this enable a deterministic behavior of the `auto_regressive_seq_decoder`
# below (i.e., parameter initialization and `encoded_state = torch.randn(..)`)
prepare_environment(Params({}))
batch_size, time_steps, decoder_inout_dim = 2, 3, 4
vocab, decoder_net = create_vocab_and_decoder_net(decoder_inout_dim)
auto_regressive_seq_decoder = AutoRegressiveSeqDecoder(
vocab,
decoder_net,
Embedding(num_embeddings=vocab.get_vocab_size(), embedding_dim=decoder_inout_dim),
beam_search=Lazy(BeamSearch, constructor_extras={"max_steps": 10, "beam_size": 4}),
tensor_based_metric=BLEU(),
token_based_metric=DummyMetric(),
).eval()
encoded_state = torch.randn(batch_size, time_steps, decoder_inout_dim)
source_mask = torch.ones(batch_size, time_steps).bool()
target_tokens = {"tokens": {"tokens": torch.ones(batch_size, time_steps).long()}}
source_mask[0, 1:] = False
encoder_out = {"source_mask": source_mask, "encoder_outputs": encoded_state}
auto_regressive_seq_decoder.forward(encoder_out, target_tokens)
assert auto_regressive_seq_decoder.get_metrics()["BLEU"] == 1.388809517005903e-11
assert auto_regressive_seq_decoder.get_metrics()["em"] == 0.0
assert auto_regressive_seq_decoder.get_metrics()["f1"] == 1 / 3
| allennlp-models-main | tests/generation/modules/seq_decoders/auto_regressive_test.py |
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.generation import StackedSelfAttentionDecoderNet
class TestStackedSelfAttentionDecoderNet(AllenNlpTestCase):
def test_stacked_self_attention_decoder_net_init(self):
decoder_inout_dim = 10
decoder_net = StackedSelfAttentionDecoderNet(
decoding_dim=decoder_inout_dim,
target_embedding_dim=decoder_inout_dim,
feedforward_hidden_dim=20,
num_layers=2,
num_attention_heads=5,
)
batch_size = 5
time_steps = 10
encoded_state = torch.rand(batch_size, time_steps, decoder_inout_dim)
source_mask = torch.ones(batch_size, time_steps).bool()
source_mask[0, 7:] = 0
source_mask[1, 5:] = 0
encoder_out = {"source_mask": source_mask, "encoder_outputs": encoded_state}
decoder_init_state = decoder_net.init_decoder_state(encoder_out)
assert decoder_init_state == {}
def test_stacked_self_attention_decoder_net_forward(self):
decoder_inout_dim = 10
decoder_net = StackedSelfAttentionDecoderNet(
decoding_dim=decoder_inout_dim,
target_embedding_dim=decoder_inout_dim,
feedforward_hidden_dim=20,
num_layers=2,
num_attention_heads=5,
)
batch_size = 5
time_steps = 10
encoded_state = torch.rand(batch_size, time_steps, decoder_inout_dim)
source_mask = torch.ones(batch_size, time_steps).bool()
source_mask[0, 7:] = 0
source_mask[1, 5:] = 0
prev_timesteps = 3
prev_step_prediction_embeded = torch.rand(batch_size, prev_timesteps, decoder_inout_dim)
next_state, decoded_vec = decoder_net(
{}, encoded_state, source_mask, prev_step_prediction_embeded
)
assert next_state == {}
assert list(decoded_vec.shape) == [batch_size, prev_timesteps, decoder_inout_dim]
| allennlp-models-main | tests/generation/modules/decoder_nets/stacked_self_attention_test.py |
allennlp-models-main | tests/generation/modules/decoder_nets/__init__.py |
|
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.attention import DotProductAttention
from allennlp_models.generation.modules.decoder_nets.lstm_cell import LstmCellDecoderNet
class TestLstmCellDecoderNet(AllenNlpTestCase):
def test_lstm_cell_decoder_net_init(self):
decoder_inout_dim = 10
lstm_decoder_net = LstmCellDecoderNet(
decoding_dim=decoder_inout_dim,
target_embedding_dim=decoder_inout_dim,
attention=DotProductAttention(),
bidirectional_input=False,
)
batch_size = 5
time_steps = 10
encoded_state = torch.rand(batch_size, time_steps, decoder_inout_dim)
source_mask = torch.ones(batch_size, time_steps).bool()
source_mask[0, 7:] = 0
source_mask[1, 5:] = 0
encoder_out = {"source_mask": source_mask, "encoder_outputs": encoded_state}
decoder_init_state = lstm_decoder_net.init_decoder_state(encoder_out)
assert list(decoder_init_state["decoder_hidden"].shape) == [batch_size, decoder_inout_dim]
assert list(decoder_init_state["decoder_context"].shape) == [batch_size, decoder_inout_dim]
def test_lstm_cell_decoder_net_forward(self):
decoder_inout_dim = 10
lstm_decoder_net = LstmCellDecoderNet(
decoding_dim=decoder_inout_dim,
target_embedding_dim=decoder_inout_dim,
attention=DotProductAttention(),
bidirectional_input=True,
)
batch_size = 5
time_steps = 10
encoded_state = torch.rand(batch_size, time_steps, decoder_inout_dim)
source_mask = torch.ones(batch_size, time_steps).bool()
source_mask[0, 7:] = 0
source_mask[1, 5:] = 0
encoder_out = {"source_mask": source_mask, "encoder_outputs": encoded_state}
prev_step_prediction_embeded = torch.rand(batch_size, 1, decoder_inout_dim)
prev_state = lstm_decoder_net.init_decoder_state(encoder_out)
next_state, decoded_vec = lstm_decoder_net(
prev_state, encoded_state, source_mask, prev_step_prediction_embeded
)
assert list(next_state["decoder_hidden"].shape) == [batch_size, decoder_inout_dim]
assert list(next_state["decoder_context"].shape) == [batch_size, decoder_inout_dim]
assert list(decoded_vec.shape) == [batch_size, decoder_inout_dim]
def test_lstm_cell_decoder_net_forward_without_attention(self):
decoder_inout_dim = 10
lstm_decoder_net = LstmCellDecoderNet(
decoding_dim=decoder_inout_dim,
target_embedding_dim=decoder_inout_dim,
attention=None,
bidirectional_input=True,
)
batch_size = 5
time_steps = 10
encoded_state = torch.rand(batch_size, time_steps, decoder_inout_dim)
source_mask = torch.ones(batch_size, time_steps).bool()
source_mask[0, 7:] = 0
source_mask[1, 5:] = 0
encoder_out = {"source_mask": source_mask, "encoder_outputs": encoded_state}
prev_step_prediction_embeded = torch.rand(batch_size, 1, decoder_inout_dim)
prev_state = lstm_decoder_net.init_decoder_state(encoder_out)
next_state, decoded_vec = lstm_decoder_net(
prev_state, encoded_state, source_mask, prev_step_prediction_embeded
)
assert list(next_state["decoder_hidden"].shape) == [batch_size, decoder_inout_dim]
assert list(next_state["decoder_context"].shape) == [batch_size, decoder_inout_dim]
assert list(decoded_vec.shape) == [batch_size, decoder_inout_dim]
def test_lstm_cell_decoder_net_forward_without_bidirectionality(self):
decoder_inout_dim = 10
lstm_decoder_net = LstmCellDecoderNet(
decoding_dim=decoder_inout_dim,
target_embedding_dim=decoder_inout_dim,
attention=DotProductAttention(),
bidirectional_input=False,
)
batch_size = 5
time_steps = 10
encoded_state = torch.rand(batch_size, time_steps, decoder_inout_dim)
source_mask = torch.ones(batch_size, time_steps).bool()
source_mask[0, 7:] = 0
source_mask[1, 5:] = 0
encoder_out = {"source_mask": source_mask, "encoder_outputs": encoded_state}
prev_step_prediction_embeded = torch.rand(batch_size, 1, decoder_inout_dim)
prev_state = lstm_decoder_net.init_decoder_state(encoder_out)
next_state, decoded_vec = lstm_decoder_net(
prev_state, encoded_state, source_mask, prev_step_prediction_embeded
)
assert list(next_state["decoder_hidden"].shape) == [batch_size, decoder_inout_dim]
assert list(next_state["decoder_context"].shape) == [batch_size, decoder_inout_dim]
assert list(decoded_vec.shape) == [batch_size, decoder_inout_dim]
| allennlp-models-main | tests/generation/modules/decoder_nets/lstm_cell_test.py |
allennlp-models-main | tests/coref/__init__.py |
|
import torch
from allennlp.common.testing import (
multi_device,
AllenNlpTestCase,
global_distributed_metric,
run_distributed_test,
)
from allennlp_models.coref.metrics.conll_coref_scores import ConllCorefScores
class ConllCorefScoresTest(AllenNlpTestCase):
@multi_device
def test_get_predicted_clusters(self, device: str):
top_spans = torch.tensor([[0, 1], [4, 6], [8, 9]], device=device)
antecedent_indices = torch.tensor([[-1, -1, -1], [0, -1, -1], [0, 1, -1]], device=device)
predicted_antecedents = torch.tensor([-1, -1, 1], device=device)
clusters, mention_to_cluster = ConllCorefScores.get_predicted_clusters(
top_spans, antecedent_indices, predicted_antecedents, allow_singletons=False
)
assert len(clusters) == 1
assert set(clusters[0]) == {(4, 6), (8, 9)}
assert mention_to_cluster == {(4, 6): clusters[0], (8, 9): clusters[0]}
def test_metric_values(self):
top_spans = torch.tensor([[[0, 1], [4, 6], [8, 9]], [[0, 1], [4, 6], [8, 9]]])
antecedent_indices = torch.tensor(
[[[-1, -1, -1], [0, -1, -1], [0, 1, -1]], [[-1, -1, -1], [0, -1, -1], [0, 1, -1]]]
)
predicted_antecedents = torch.tensor([[-1, -1, 1], [-1, -1, 1]])
metadata_list = [{"clusters": [((4, 6), (8, 9))]}, {"clusters": [((0, 1), (4, 6))]}]
metric = ConllCorefScores()
metric(top_spans, antecedent_indices, predicted_antecedents, metadata_list)
values = metric.get_metric()
assert values[0] == values[1] == values[2] == 0.625
def test_distributed_metric_values(self):
top_spans = torch.tensor([[[0, 1], [4, 6], [8, 9]]])
antecedent_indices = torch.tensor([[[-1, -1, -1], [0, -1, -1], [0, 1, -1]]])
predicted_antecedents = torch.tensor([[-1, -1, 1]])
metadata_list = [[{"clusters": [((4, 6), (8, 9))]}], [{"clusters": [((0, 1), (4, 6))]}]]
metric_kwargs = {
"top_spans": [top_spans, top_spans],
"antecedent_indices": [antecedent_indices, antecedent_indices],
"predicted_antecedents": [predicted_antecedents, predicted_antecedents],
"metadata_list": metadata_list,
}
desired_values = (0.625, 0.625, 0.625)
run_distributed_test(
[-1, -1],
global_distributed_metric,
ConllCorefScores(),
metric_kwargs,
desired_values,
exact=True,
)
| allennlp-models-main | tests/coref/metrics/conll_coref_scores_test.py |
allennlp-models-main | tests/coref/metrics/__init__.py |
|
import torch
from allennlp.common.testing import (
AllenNlpTestCase,
global_distributed_metric,
run_distributed_test,
)
from allennlp_models.coref.metrics.mention_recall import MentionRecall
class MentionRecallTest(AllenNlpTestCase):
def test_mention_recall(self):
metric = MentionRecall()
batched_top_spans = torch.tensor([[[2, 4], [1, 3]], [[5, 6], [7, 8]]])
batched_metadata = [{"clusters": [[(2, 4), (3, 5)]]}, {"clusters": [[(5, 6), (7, 8)]]}]
metric(batched_top_spans, batched_metadata)
recall = metric.get_metric()
assert recall == 0.75
def test_distributed_mention_recall(self):
batched_top_spans = [torch.tensor([[[2, 4], [1, 3]]]), torch.tensor([[[5, 6], [7, 8]]])]
batched_metadata = [[{"clusters": [[(2, 4), (3, 5)]]}], [{"clusters": [[(5, 6), (7, 8)]]}]]
metric_kwargs = {
"batched_top_spans": batched_top_spans,
"batched_metadata": batched_metadata,
}
desired_values = 0.75
run_distributed_test(
[-1, -1],
global_distributed_metric,
MentionRecall(),
metric_kwargs,
desired_values,
exact=True,
)
| allennlp-models-main | tests/coref/metrics/mention_recall_test.py |
from typing import List, Tuple
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.coref import WinobiasReader
from tests import FIXTURES_ROOT
class TestWinobiasReader:
span_width = 5
def test_read_from_file(self):
conll_reader = WinobiasReader(max_span_width=self.span_width)
instances = ensure_list(conll_reader.read(str(FIXTURES_ROOT / "coref" / "winobias.sample")))
assert len(instances) == 2
fields = instances[0].fields
text = [x.text for x in fields["text"].tokens]
assert text == [
"The",
"designer",
"argued",
"with",
"the",
"developer",
"and",
"slapped",
"her",
"in",
"the",
"face",
".",
]
spans = fields["spans"].field_list
span_starts, span_ends = zip(*[(field.span_start, field.span_end) for field in spans])
candidate_mentions = self.check_candidate_mentions_are_well_defined(
span_starts, span_ends, text
)
gold_span_labels = fields["span_labels"]
gold_indices_with_ids = [(i, x) for i, x in enumerate(gold_span_labels.labels) if x != -1]
gold_mentions_with_ids: List[Tuple[List[str], int]] = [
(candidate_mentions[i], x) for i, x in gold_indices_with_ids
]
assert gold_mentions_with_ids == [(["the", "developer"], 0), (["her"], 0)]
fields = instances[1].fields
text = [x.text for x in fields["text"].tokens]
assert text == [
"The",
"salesperson",
"sold",
"some",
"books",
"to",
"the",
"librarian",
"because",
"she",
"was",
"trying",
"to",
"sell",
"them",
".",
]
spans = fields["spans"].field_list
span_starts, span_ends = zip(*[(field.span_start, field.span_end) for field in spans])
candidate_mentions = self.check_candidate_mentions_are_well_defined(
span_starts, span_ends, text
)
gold_span_labels = fields["span_labels"]
gold_indices_with_ids = [(i, x) for i, x in enumerate(gold_span_labels.labels) if x != -1]
gold_mentions_with_ids: List[Tuple[List[str], int]] = [
(candidate_mentions[i], x) for i, x in gold_indices_with_ids
]
assert gold_mentions_with_ids == [
(["The", "salesperson"], 0),
(["some", "books"], 1),
(["she"], 0),
(["them"], 1),
]
def check_candidate_mentions_are_well_defined(self, span_starts, span_ends, text):
candidate_mentions = []
for start, end in zip(span_starts, span_ends):
# Spans are inclusive.
text_span = text[start : (end + 1)]
candidate_mentions.append(text_span)
# Check we aren't considering zero length spans and all
# candidate spans are less than what we specified
assert all(self.span_width >= len(x) > 0 for x in candidate_mentions)
return candidate_mentions
| allennlp-models-main | tests/coref/dataset_readers/winobias_test.py |
from typing import List, Tuple
import pytest
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.coref import PrecoReader
from tests import FIXTURES_ROOT
class TestPrecoReader:
span_width = 5
@pytest.mark.parametrize("remove_singleton_clusters", (True, False))
def test_read_from_file(self, remove_singleton_clusters):
conll_reader = PrecoReader(
max_span_width=self.span_width, remove_singleton_clusters=remove_singleton_clusters
)
instances = ensure_list(conll_reader.read(str(FIXTURES_ROOT / "coref" / "preco.jsonl")))
assert len(instances) == 3
fields = instances[2].fields
text = [x.text for x in fields["text"].tokens]
assert text == [
"When",
"you",
"watch",
"TV",
"and",
"play",
"video",
"games",
"you",
"make",
"global",
"warming",
"worse",
"!",
"It",
"may",
"seem",
"hard",
"to",
"believe",
",",
"but",
"when",
"electricity",
"is",
"made",
",",
"so",
"are",
"greenhouse",
"gases",
".",
"This",
"means",
"that",
"every",
"time",
"you",
"use",
"electricity",
"you",
"help",
"make",
"global",
"warming",
"worse",
"!",
"Cars",
"are",
"also",
"making",
"global",
"warming",
"worse",
".",
"They",
"burn",
"fossil",
"fuels",
"in",
"their",
"engines",
",",
"and",
"send",
"lots",
"of",
"greenhouse",
"gases",
"into",
"the",
"air",
".",
"Global",
"warming",
"may",
"be",
"a",
"big",
"problem",
",",
"but",
"we",
"can",
"all",
"help",
"stop",
"it",
".",
"People",
"can",
"try",
"to",
"drive",
"their",
"cars",
"less",
".",
"Or",
"even",
"get",
"ones",
"that",
"run",
"on",
"sunlight",
"!",
"You",
"can",
"also",
"help",
".",
"Let",
"'s",
"try",
"one",
"of",
"these",
"top",
"ideas",
":",
"(",
"1",
")",
"Try",
"to",
"use",
"less",
"electricity",
".",
"Turn",
"off",
"lights",
",",
"your",
"television",
",",
"and",
"your",
"computer",
"when",
"you",
"'ve",
"stopped",
"using",
"them",
".",
"To",
"make",
"electricity",
",",
"fossil",
"fuels",
"are",
"burned",
"in",
"big",
"factories",
".",
"But",
"burning",
"fossil",
"fuels",
"also",
"makes",
"greenhouse",
"gases",
".",
"You",
"should",
"also",
"try",
"to",
"watch",
"less",
"TV",
".",
"(",
"2",
")",
"Plant",
"trees",
".",
"Not",
"only",
"is",
"it",
"a",
"fun",
"thing",
"to",
"do",
",",
"but",
"it",
"is",
"also",
"a",
"great",
"way",
"to",
"lower",
"the",
"number",
"of",
"greenhouse",
"gases",
"in",
"the",
"air",
".",
"Trees",
"take",
"carbon",
"dioxide",
"out",
"of",
"the",
"air",
"when",
"they",
"grow",
".",
"(",
"3",
")",
"Do",
"n't",
"throw",
"away",
"your",
"rubbish",
",",
"try",
"to",
"recycle",
"it",
".",
"If",
"rubbish",
"is",
"not",
"recycled",
",",
"it",
"is",
"put",
"in",
"the",
"ground",
".",
"There",
"it",
"rots",
"and",
"makes",
"a",
"greenhouse",
"gas",
"called",
"methane",
".",
"So",
"try",
"to",
"recycle",
"cans",
",",
"bottles",
",",
"plastic",
"bags",
"and",
"newspapers",
".",
"It",
"'ll",
"make",
"you",
"feel",
"great",
"!",
"And",
"it",
"'ll",
"help",
"the",
"Earth",
".",
]
spans = fields["spans"].field_list
span_starts, span_ends = zip(*[(field.span_start, field.span_end) for field in spans])
candidate_mentions = self.check_candidate_mentions_are_well_defined(
span_starts, span_ends, text
)
gold_span_labels = fields["span_labels"]
gold_indices_with_ids = [(i, x) for i, x in enumerate(gold_span_labels.labels) if x != -1]
gold_mentions_with_ids: List[Tuple[List[str], int]] = [
(candidate_mentions[i], x) for i, x in gold_indices_with_ids
]
assert (["you"], 0) in gold_mentions_with_ids
gold_mentions_with_ids.remove((["you"], 0))
assert (["you"], 0) in gold_mentions_with_ids
if not remove_singleton_clusters:
# Singleton mention
assert (["video", "games"], 2) in gold_mentions_with_ids
gold_mentions_with_ids.remove((["video", "games"], 2))
assert not any(_ for _, id_ in gold_mentions_with_ids if id_ == 2)
assert (["them"], 24) in gold_mentions_with_ids
# This is a span which exceeds our max_span_width, so it should not be considered.
assert (
["lights", ",", "your", "television", ",", "and", "your", "computer"],
24,
) not in gold_mentions_with_ids
else:
assert (["video", "games"], 2) not in gold_mentions_with_ids
def check_candidate_mentions_are_well_defined(self, span_starts, span_ends, text):
candidate_mentions = []
for start, end in zip(span_starts, span_ends):
# Spans are inclusive.
text_span = text[start : end + 1]
candidate_mentions.append(text_span)
# Check we aren't considering zero length spans and all
# candidate spans are less than what we specified
assert all(self.span_width >= len(x) > 0 for x in candidate_mentions)
return candidate_mentions
| allennlp-models-main | tests/coref/dataset_readers/preco_test.py |
allennlp-models-main | tests/coref/dataset_readers/__init__.py |
|
from typing import List, Tuple
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.coref import ConllCorefReader
from tests import FIXTURES_ROOT
class TestCorefReader:
span_width = 5
def test_read_from_file(self):
conll_reader = ConllCorefReader(max_span_width=self.span_width)
instances = ensure_list(
conll_reader.read(str(FIXTURES_ROOT / "coref" / "coref.gold_conll"))
)
assert len(instances) == 4
fields = instances[0].fields
text = [x.text for x in fields["text"].tokens]
assert text == [
"In",
"the",
"summer",
"of",
"2005",
",",
"a",
"picture",
"that",
"people",
"have",
"long",
"been",
"looking",
"forward",
"to",
"started",
"emerging",
"with",
"frequency",
"in",
"various",
"major",
"Hong",
"Kong",
"media",
".",
"With",
"their",
"unique",
"charm",
",",
"these",
"well",
"-",
"known",
"cartoon",
"images",
"once",
"again",
"caused",
"Hong",
"Kong",
"to",
"be",
"a",
"focus",
"of",
"worldwide",
"attention",
".",
"The",
"world",
"'s",
"fifth",
"Disney",
"park",
"will",
"soon",
"open",
"to",
"the",
"public",
"here",
".",
]
spans = fields["spans"].field_list
span_starts, span_ends = zip(*[(field.span_start, field.span_end) for field in spans])
candidate_mentions = self.check_candidate_mentions_are_well_defined(
span_starts, span_ends, text
)
gold_span_labels = fields["span_labels"]
gold_indices_with_ids = [(i, x) for i, x in enumerate(gold_span_labels.labels) if x != -1]
gold_mentions_with_ids: List[Tuple[List[str], int]] = [
(candidate_mentions[i], x) for i, x in gold_indices_with_ids
]
assert (["Hong", "Kong"], 0) in gold_mentions_with_ids
gold_mentions_with_ids.remove((["Hong", "Kong"], 0))
assert (["Hong", "Kong"], 0) in gold_mentions_with_ids
assert (["their"], 1) in gold_mentions_with_ids
# This is a span which exceeds our max_span_width, so it should not be considered.
assert (
["these", "well", "-", "known", "cartoon", "images"],
1,
) not in gold_mentions_with_ids
fields = instances[2].fields
text = [x.text for x in fields["text"].tokens]
assert text == [
"The",
"area",
"of",
"Hong",
"Kong",
"is",
"only",
"one",
"thousand",
"-",
"plus",
"square",
"kilometers",
".",
"The",
"population",
"is",
"dense",
".",
"Natural",
"resources",
"are",
"relatively",
"scarce",
".",
"However",
",",
"the",
"clever",
"Hong",
"Kong",
"people",
"will",
"utilize",
"all",
"resources",
"they",
"have",
"created",
"for",
"developing",
"the",
"Hong",
"Kong",
"tourism",
"industry",
".",
]
spans = fields["spans"].field_list
span_starts, span_ends = zip(*[(field.span_start, field.span_end) for field in spans])
candidate_mentions = self.check_candidate_mentions_are_well_defined(
span_starts, span_ends, text
)
gold_span_labels = fields["span_labels"]
gold_indices_with_ids = [(i, x) for i, x in enumerate(gold_span_labels.labels) if x != -1]
gold_mentions_with_ids: List[Tuple[List[str], int]] = [
(candidate_mentions[i], x) for i, x in gold_indices_with_ids
]
assert (["Hong", "Kong"], 0) in gold_mentions_with_ids
gold_mentions_with_ids.remove((["Hong", "Kong"], 0))
assert (["Hong", "Kong"], 0) in gold_mentions_with_ids
assert (["they"], 1) in gold_mentions_with_ids
assert (["the", "clever", "Hong", "Kong", "people"], 1) in gold_mentions_with_ids
def test_wordpiece_modeling(self):
tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
conll_reader = ConllCorefReader(
max_span_width=self.span_width, wordpiece_modeling_tokenizer=tokenizer
)
instances = ensure_list(
conll_reader.read(str(FIXTURES_ROOT / "coref" / "coref.gold_conll"))
)
assert len(instances) == 4
fields = instances[3].fields
text = [x.text for x in fields["text"].tokens]
assert text == [
"[CLS]",
"Hong",
"Kong",
"Wet",
"##land",
"Park",
",",
"which",
"is",
"currently",
"under",
"construction",
",",
"is",
"also",
"one",
"of",
"the",
"designated",
"new",
"projects",
"of",
"the",
"Hong",
"Kong",
"SA",
"##R",
"government",
"for",
"advancing",
"the",
"Hong",
"Kong",
"tourism",
"industry",
".",
"[SEP]",
]
spans = fields["spans"].field_list
span_starts, span_ends = zip(*[(field.span_start, field.span_end) for field in spans])
candidate_mentions = self.check_candidate_mentions_are_well_defined(
span_starts, span_ends, text
)
# Asserts special tokens aren't included in the spans
assert all(span_start > 0 for span_start in span_starts)
assert all(span_end < len(text) - 1 for span_end in span_ends)
gold_span_labels = fields["span_labels"]
gold_indices_with_ids = [(i, x) for i, x in enumerate(gold_span_labels.labels) if x != -1]
gold_mentions_with_ids: List[Tuple[List[str], int]] = [
(candidate_mentions[i], x) for i, x in gold_indices_with_ids
]
assert (["Hong", "Kong"], 0) in gold_mentions_with_ids
# Within span_width before wordpiece splitting but exceeds afterwards
assert (["the", "Hong", "Kong", "SA", "##R", "government"], 0) not in gold_mentions_with_ids
fields = instances[1].fields
text = [x.text for x in fields["text"].tokens]
spans = fields["spans"].field_list
span_starts, span_ends = zip(*[(field.span_start, field.span_end) for field in spans])
candidate_mentions = self.check_candidate_mentions_are_well_defined(
span_starts, span_ends, text
)
gold_span_labels = fields["span_labels"]
gold_indices_with_ids = [(i, x) for i, x in enumerate(gold_span_labels.labels) if x != -1]
gold_mentions_with_ids: List[Tuple[List[str], int]] = [
(candidate_mentions[i], x) for i, x in gold_indices_with_ids
]
# Prior to wordpiece tokenization, 's was one token; wordpiece tokenization splits it into 2
assert (["the", "city", "'", "s"], 0) in gold_mentions_with_ids
def check_candidate_mentions_are_well_defined(self, span_starts, span_ends, text):
candidate_mentions = []
for start, end in zip(span_starts, span_ends):
# Spans are inclusive.
text_span = text[start : end + 1]
candidate_mentions.append(text_span)
# Check we aren't considering zero length spans and all
# candidate spans are less than what we specified
assert all(self.span_width >= len(x) > 0 for x in candidate_mentions)
return candidate_mentions
def test_max_sentences(self):
conll_reader = ConllCorefReader(max_span_width=self.span_width)
instances = ensure_list(
conll_reader.read(str(FIXTURES_ROOT / "coref" / "coref.gold_conll"))
)
limited_conll_reader = ConllCorefReader(max_span_width=self.span_width, max_sentences=2)
limited_instances = ensure_list(
limited_conll_reader.read(str(FIXTURES_ROOT / "coref" / "coref.gold_conll"))
)
assert len(limited_instances) == len(instances) == 4
tokens_of = lambda instance: instance.fields["text"].tokens
text_of = lambda tokens: [token.text for token in tokens]
docs = [tokens_of(instance) for instance in instances]
limited_docs = [tokens_of(instance) for instance in limited_instances]
# Short ones; not truncated
assert limited_docs[1] == docs[1]
assert limited_docs[3] == docs[3]
# Truncation happened
assert len(limited_docs[0]) < len(docs[0])
assert len(limited_docs[2]) < len(docs[2])
assert "Disney" in text_of(docs[0]) and "Disney" not in text_of(limited_docs[0])
assert "tourism" in text_of(docs[2]) and "tourism" not in text_of(limited_docs[2])
# Truncated tokens are the prefixes
assert limited_docs[0] == docs[0][: len(limited_docs[0])]
assert limited_docs[2] == docs[2][: len(limited_docs[2])]
| allennlp-models-main | tests/coref/dataset_readers/coref_test.py |
allennlp-models-main | tests/coref/predictors/__init__.py |
|
import spacy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp_models.coref import CorefPredictor
from tests import FIXTURES_ROOT
class TestCorefPredictor(AllenNlpTestCase):
def test_uses_named_inputs(self):
inputs = {
"document": "This is a single string document about a test. Sometimes it "
"contains coreferent parts."
}
archive = load_archive(FIXTURES_ROOT / "coref" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "coreference_resolution")
result = predictor.predict_json(inputs)
self.assert_predict_result(result)
document = [
"This",
"is",
"a",
"single",
"string",
"document",
"about",
"a",
"test",
".",
"Sometimes",
"it",
"contains",
"coreferent",
"parts",
".",
]
result_doc_words = predictor.predict_tokenized(document)
self.assert_predict_result(result_doc_words)
@staticmethod
def assert_predict_result(result):
document = result["document"]
assert document == [
"This",
"is",
"a",
"single",
"string",
"document",
"about",
"a",
"test",
".",
"Sometimes",
"it",
"contains",
"coreferent",
"parts",
".",
]
clusters = result["clusters"]
assert isinstance(clusters, list)
for cluster in clusters:
assert isinstance(cluster, list)
for mention in cluster:
# Spans should be integer indices.
assert isinstance(mention[0], int)
assert isinstance(mention[1], int)
# Spans should be inside document.
assert 0 < mention[0] <= len(document)
assert 0 < mention[1] <= len(document)
def test_coref_resolved(self):
"""Tests I/O of coref_resolved method"""
document = "This is a test sentence."
archive = load_archive(FIXTURES_ROOT / "coref" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "coreference_resolution")
result = predictor.coref_resolved(document)
assert isinstance(result, str)
def test_replace_corefs(self):
"""Tests core coref replacement logic"""
nlp = spacy.load("en_core_web_sm")
inputs = [
"This is a sentence with no coreferences.", # No coreferences
"Julie wants to buy fruit. That is what she loves.", # Single coreference / personal
"Charlie wants to buy a game, so he can play it with friends.", # Multiple coreferences / personal
"The woman reading a newspaper sat on the bench with her dog.", # Phrasal mention / possessive
"Canada stimulated the country's economy.", # Phrasal coreference / possessive
]
expected_clusters = [
[],
[[[0, 0], [9, 9]]],
[[[0, 0], [8, 8]], [[4, 5], [11, 11]]],
[[[0, 4], [10, 10]]],
[[[0, 0], [2, 4]]],
]
expected_outputs = [
"This is a sentence with no coreferences.",
"Julie wants to buy fruit. That is what Julie loves.",
"Charlie wants to buy a game, so Charlie can play a game with friends.",
"The woman reading a newspaper sat on the bench with The woman reading a newspaper's dog.",
"Canada stimulated Canada's economy.",
]
for i, text in enumerate(inputs):
clusters = expected_clusters[i]
if not clusters:
assert text == inputs[i]
continue
doc = nlp(text)
output = CorefPredictor.replace_corefs(doc, clusters)
assert output == expected_outputs[i]
def test_predictions_to_labeled_instances(self):
inputs = {
"document": "This is a single string document about a test. Sometimes it "
"contains coreferent parts."
}
archive = load_archive(FIXTURES_ROOT / "coref" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "coreference_resolution")
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert new_instances is not None
for new_instance in new_instances:
assert "span_labels" in new_instance
assert len(new_instance["span_labels"]) == 60 # 7 words in input
true_top_spans = set(tuple(span) for span in outputs["top_spans"])
pred_clust_spans = set()
for i, span in enumerate(outputs["top_spans"]):
if new_instance["span_labels"][i]:
pred_clust_spans.add(tuple(span))
assert true_top_spans == pred_clust_spans
| allennlp-models-main | tests/coref/predictors/coref_test.py |
allennlp-models-main | tests/coref/models/__init__.py |
|
import torch
from allennlp.common.testing import ModelTestCase
from allennlp_models import coref # noqa: F401
from tests import FIXTURES_ROOT
class CorefTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "coref" / "experiment.json",
FIXTURES_ROOT / "coref" / "coref.gold_conll",
)
def test_coref_model_can_train_save_and_load(self):
for coarse_to_fine in (True, False):
for inference_order in (1, 3):
self._test_coref_model_can_train_save_and_load(coarse_to_fine, inference_order)
def _test_coref_model_can_train_save_and_load(
self, coarse_to_fine: bool = False, inference_order: int = 1
):
# fmt: off
overrides = (
"{"
+ '"model.coarse_to_fine": ' + f"{str(coarse_to_fine).lower()}" + ","
+ '"model.inference_order": ' + f"{inference_order}"
+ "}"
)
# fmt: on
self.ensure_model_can_train_save_and_load(
self.param_file,
overrides=overrides,
# Due to numerical instability, this scalar tensor might sometimes
# have zero gradient.
gradients_to_ignore={"_attentive_span_extractor._global_attention._module.bias"},
)
self.teardown_method()
self.setup_method()
def test_coref_bert_model_can_train_save_and_load(self):
self.set_up_model(
FIXTURES_ROOT / "coref" / "coref_albert_small.jsonnet",
FIXTURES_ROOT / "coref" / "coref.gold_conll",
)
self.ensure_model_can_train_save_and_load(
self.param_file,
gradients_to_ignore={
"_text_field_embedder.token_embedder_tokens._matched_embedder"
".transformer_model.pooler.weight",
"_text_field_embedder.token_embedder_tokens._matched_embedder"
".transformer_model.pooler.bias",
"_attentive_span_extractor._global_attention._module.bias",
},
)
def test_decode(self):
spans = torch.LongTensor([[1, 2], [3, 4], [3, 7], [5, 6], [14, 56], [17, 80]])
antecedent_indices = torch.LongTensor(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[2, 1, 0, 0, 0, 0],
[3, 2, 1, 0, 0, 0],
[4, 3, 2, 1, 0, 0],
]
).unsqueeze(0)
spans = spans.unsqueeze(0)
antecedent_indices = antecedent_indices
# Indices into `antecedent_indices` indicating the predicted antecedent
# index in `top_spans`.
predicted_antecedents = torch.LongTensor([-1, 0, -1, -1, 1, 3])
predicted_antecedents = predicted_antecedents.unsqueeze(0)
output_dict = {
"top_spans": spans,
"antecedent_indices": antecedent_indices,
"predicted_antecedents": predicted_antecedents,
}
output = self.model.make_output_human_readable(output_dict)
clusters = output["clusters"][0]
gold1 = [(1, 2), (3, 4), (17, 80)]
gold2 = [(3, 7), (14, 56)]
assert len(clusters) == 2
assert gold1 in clusters
assert gold2 in clusters
| allennlp-models-main | tests/coref/models/coref_test.py |
allennlp-models-main | tests/coref/interpret/__init__.py |
|
from pytest import approx
from allennlp.common.testing import AllenNlpTestCase
from allennlp.interpret.saliency_interpreters import SimpleGradient
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from tests import FIXTURES_ROOT
class TestInterpret(AllenNlpTestCase):
def test_simple_gradient_coref(self):
inputs = {
"document": "This is a single string document about a test. Sometimes it "
"contains coreferent parts."
}
archive = load_archive(FIXTURES_ROOT / "coref" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "coreference_resolution")
interpreter = SimpleGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
assert interpretation is not None
assert "instance_1" in interpretation
assert "grad_input_1" in interpretation["instance_1"]
grad_input_1 = interpretation["instance_1"]["grad_input_1"]
# There are 16 tokens in the input. This gets translated into 22 wordpieces, but we need to
# compute gradients on whatever the model considered its "input", which in this case is
# tokens, because the model uses a mismatched tokenizer / embedder.
assert len(grad_input_1) == 16
# two interpretations should be identical for gradient
repeat_interpretation = interpreter.saliency_interpret_from_json(inputs)
repeat_grad_input_1 = repeat_interpretation["instance_1"]["grad_input_1"]
for grad, repeat_grad in zip(grad_input_1, repeat_grad_input_1):
assert grad == approx(repeat_grad)
| allennlp-models-main | tests/coref/interpret/interpret_test.py |
#!/usr/bin/env python
"""
Ensures models are automatically found by allennlp.
"""
import logging
from allennlp.common.plugins import import_plugins
from allennlp.models import Model
logging.basicConfig(level=logging.INFO)
import_plugins()
Model.by_name("copynet_seq2seq")
| allennlp-models-main | scripts/ensure_models_found.py |
"""
Run this script to update the list of pre-trained models in the README based on the current model cards.
"""
from typing import List
import json
import glob
AUTO_GENERATED_SECTION_START = "<!-- This section is automatically generated"
AUTO_GENERATED_SECTION_END = "<!-- End automatically generated section -->"
def main():
with open("README.md") as readme_file:
readme_lines = readme_file.readlines()
section_start_idx = next(
(i for i, l in enumerate(readme_lines) if l.startswith(AUTO_GENERATED_SECTION_START))
)
section_end_idx = next(
(i for i, l in enumerate(readme_lines) if l.startswith(AUTO_GENERATED_SECTION_END))
)
model_list: List[str] = ["\n"]
for model_card_path in sorted(glob.glob("allennlp_models/modelcards/*.json")):
if model_card_path.endswith("modelcard-template.json"):
continue
with open(model_card_path) as model_card_file:
model_card = json.load(model_card_file)
model_id = model_card["id"]
description = model_card["model_details"]["short_description"]
model_list.append(
f"- [`{model_id}`](https://github.com/allenai/allennlp-models/tree/main/"
f"{model_card_path}) - {description}\n"
)
model_list.append("\n")
readme_lines = (
readme_lines[: section_start_idx + 1] + model_list + readme_lines[section_end_idx:]
)
with open("README.md", "w") as readme_file:
readme_file.writelines(readme_lines)
if __name__ == "__main__":
main()
| allennlp-models-main | scripts/update_readme_model_list.py |
#!/usr/bin/env python
"""
This script is used to populate the table of contents for the API in the mkdocs config file.
"""
import argparse
from pathlib import Path
from typing import Any, List
from ruamel.yaml import YAML
from allennlp_models.version import VERSION
API_TOC_KEY = "Models"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("target_yaml", help="Path to the target mkdocs config file.")
parser.add_argument("source_yaml", help="Path to the mkdocs skeleton config file.")
parser.add_argument("docs_root", help="The root of the markdown docs folder.")
parser.add_argument(
"api_docs_path", help="The root of the API docs within the markdown docs root folder."
)
parser.add_argument("--docs-version", type=str, default=f"v{VERSION}")
return parser.parse_args()
def build_api_toc(source_path: Path, docs_root: Path):
nav_entries: List[Any] = []
for child in source_path.iterdir():
if child.is_dir():
nav_subsection = build_api_toc(child, docs_root)
elif child.suffix == ".md":
nav_subsection = str(child.relative_to(docs_root))
nav_entries.append({child.stem: nav_subsection})
nav_entries.sort(key=lambda x: list(x)[0], reverse=False)
return nav_entries
def main():
yaml = YAML()
opts = parse_args()
source_yaml = yaml.load(Path(opts.source_yaml))
nav_entries = build_api_toc(Path(opts.api_docs_path), Path(opts.docs_root))
# Add version to name.
source_yaml["site_name"] = f"AllenNLP Models {opts.docs_version}"
# Find the yaml sub-object corresponding to the API table of contents.
site_nav = source_yaml["nav"]
for nav_obj in site_nav:
if API_TOC_KEY in nav_obj:
break
nav_obj[API_TOC_KEY] = nav_entries
with open(opts.target_yaml, "w") as f:
yaml.dump(source_yaml, f)
print(f"{opts.target_yaml} created")
if __name__ == "__main__":
main()
| allennlp-models-main | scripts/build_docs_config.py |
#!/usr/bin/env python
"""
Ensures allennlp and models versions are the same.
"""
from allennlp.version import VERSION as CORE_VERSION
from allennlp_models.version import VERSION as MODELS_VERSION
assert CORE_VERSION == MODELS_VERSION, f"core: {CORE_VERSION}, models: {MODELS_VERSION}"
| allennlp-models-main | scripts/ensure_versions_match.py |
#!/usr/bin/env python3
import argparse
from typing import Dict
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("version_type", choices=["stable", "latest", "current"])
parser.add_argument("--minimal", action="store_true", default=False)
parser.add_argument("--as-range", action="store_true", default=False)
return parser.parse_args()
def post_process(version: str, minimal: bool = False, as_range: bool = False):
assert not (minimal and as_range)
if version.startswith("v"):
version = version[1:]
if as_range:
major, minor, *_ = version.split(".")
return f">={version},<{major}.{int(minor)+1}"
return version if minimal else f"v{version}"
def get_current_version() -> str:
VERSION: Dict[str, str] = {}
with open("allennlp_models/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
return VERSION["VERSION"]
def get_latest_version() -> str:
# Import this here so this requirements isn't mandatory when we just want to
# call `get_current_version`.
import requests
resp = requests.get("https://api.github.com/repos/allenai/allennlp-models/tags")
return resp.json()[0]["name"]
def get_stable_version() -> str:
import requests
resp = requests.get("https://api.github.com/repos/allenai/allennlp-models/releases/latest")
return resp.json()["tag_name"]
def main() -> None:
opts = parse_args()
if opts.version_type == "stable":
print(post_process(get_stable_version(), opts.minimal, opts.as_range))
elif opts.version_type == "latest":
print(post_process(get_latest_version(), opts.minimal, opts.as_range))
elif opts.version_type == "current":
print(post_process(get_current_version(), opts.minimal, opts.as_range))
else:
raise NotImplementedError
if __name__ == "__main__":
main()
| allennlp-models-main | scripts/get_version.py |
import argparse
import random
from utils import read_csv, write_array2tsv, head_based_split, count_relation
def load_conceptnet(args):
random.seed(args.random_seed)
cn_train_file = args.data_folder + args.data_file
cn_train_data = read_csv(cn_train_file, delimiter="\t")
train_data = [[l[1], l[0], l[2]] for l in cn_train_data]
count_relation(train_data)
original_test_data = []
if args.include_original_test:
cn_test_file = args.data_folder + "test.txt"
cn_test_data = read_csv(cn_test_file, delimiter="\t")
original_test_data = [[l[1], l[0], l[2]] for l in cn_test_data if float(l[3]) == 1.0]
if args.sanity_check:
assert len(original_test_data) == 1200
(train, dev, test) = head_based_split(train_data, args.dev_size, args.test_size, args.head_size_threshold)
return train, dev, original_test_data + test
def main():
# Parse args
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, help='Path to folder containing the data',
default="./data/conceptnet/")
parser.add_argument('--data-file', type=str, help='Dataset filename', default="train300k.txt")
parser.add_argument('--dev-size', type=int, default=5000, help='Dev size')
parser.add_argument('--test-size', type=int, default=30000, help='Test size')
parser.add_argument('--head-size-threshold', default=500, help='Maximum number of tuples a head is involved in, '
'in order to be a candidate for the dev/test set')
parser.add_argument('--random-seed', type=int, default=30, help='Random seed')
parser.add_argument('--sanity-check', action='store_true',
help='If specified, perform sanity check during split creation')
parser.add_argument('--include-original-test', action='store_true',
help='If specified, include the original 1.2k test set')
args = parser.parse_args()
# Load ConceptNet data
(train, dev, test) = load_conceptnet(args)
# Write tsv files
folder = args.data_folder
write_array2tsv(folder + "train.tsv", train)
write_array2tsv(folder + "dev.tsv", dev)
write_array2tsv(folder + "test.tsv", test)
if __name__ == "__main__":
main()
| comet-atomic-2020-master | split/split_conceptnet.py |
from utils import read_csv, write_tsv
def tuple_key(d):
return d[0] + d[1] + d[2]
def main():
folder = "./data/transomcs/"
file = folder + "TransOMCS_full.txt"
data = read_csv(file, delimiter="\t")
confidences = {}
for d in data:
key = tuple_key(d)
confidences[key] = float(d[3])
human_eval_file = folder + "human_evaluation_tuples.tsv"
tuples = read_csv(human_eval_file, delimiter="\t", skip_header=True)
updated_t = [{"head_event": t[0], "relation": t[1], "tail_event": t[2]} for t in tuples if confidences[tuple_key(t)] >= 0.5]
dropped = [{"head_event": t[0], "relation": t[1], "tail_event": t[2]} for t in tuples if confidences[tuple_key(t)] < 0.5]
output_file = folder + "human_evaluation_tuples_v2.tsv"
write_tsv(output_file, updated_t)
output_file = folder + "dropped_human_evaluation_tuples_v2.tsv"
write_tsv(output_file, dropped)
if __name__ == "__main__":
main() | comet-atomic-2020-master | split/filter_human_eval_tuples_with_updated_transomcs.py |
import argparse
import random
from utils import read_csv, write_array2tsv
def load_atomic(args):
random.seed(args.random_seed)
atomic_split_folder = args.data_folder + "original_split/"
atomic_file = args.data_folder + args.data_file
atomic_data = read_csv(atomic_file, delimiter="\t", skip_header=True)
atomic_train_file = atomic_split_folder + "v4_atomic_trn.csv"
atomic_train = read_csv(atomic_train_file)
atomic_dev_file = atomic_split_folder + "v4_atomic_dev.csv"
atomic_dev = read_csv(atomic_dev_file)
atomic_test_file = atomic_split_folder + "v4_atomic_tst.csv"
atomic_test = read_csv(atomic_test_file)
atomic_train_events = set([l[0] for l in atomic_train])
atomic_dev_events = set([l[0] for l in atomic_dev])
atomic_test_events = set([l[0] for l in atomic_test])
atomic_train = [l for l in atomic_data if l[0] in atomic_train_events]
atomic_dev = [l for l in atomic_data if l[0] in atomic_dev_events]
atomic_test = [l for l in atomic_data if l[0] in atomic_test_events]
if args.sanity_check:
nb_train = 0
nb_dev = 0
nb_test = 0
nb_other = 0
for d in atomic_data:
event = d[0]
if event in atomic_train_events:
nb_train += 1
else:
if event in atomic_dev_events:
nb_dev += 1
else:
if event in atomic_test_events:
nb_test += 1
else:
nb_other += 1
assert nb_other == 0
return atomic_train, atomic_dev, atomic_test
def main():
# Parse args
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, help='Path to folder containing the data',
default="./data/atomic/")
parser.add_argument('--data-file', type=str, help='Dataset filename', default="atomic_v1.tsv")
parser.add_argument('--random-seed', type=int, default=30, help='Random seed')
parser.add_argument('--sanity-check', action='store_true',
help='If specified, perform sanity check during split creation')
args = parser.parse_args()
# Load ATOMIC data
(train, dev, test) = load_atomic(args)
# Write tsv files
folder = args.data_folder
write_array2tsv(folder + "train.tsv", train)
write_array2tsv(folder + "dev.tsv", dev)
write_array2tsv(folder + "test.tsv", test)
if __name__ == "__main__":
main()
| comet-atomic-2020-master | split/split_atomic.py |
comet-atomic-2020-master | split/__init__.py |
|
import argparse
import random
from utils import read_csv, write_array2tsv, head_based_split
def load_transomcs(args):
import matplotlib.pyplot as plt
random.seed(args.random_seed)
data_file = args.data_folder + args.data_file
data = read_csv(data_file, delimiter="\t")
selection = [[l[0], l[1], l[2]] for l in data if float(l[3]) >= args.confidence_threshold
and l[1] not in args.excluded_relations]
if args.sanity_check:
confs = [float(l[3]) for l in data]
plt.hist(confs, density=False, bins=30)
plt.yscale("log")
plt.ylabel('Counts')
plt.xlabel('Confidence')
plt.show()
(train, dev, test) = head_based_split(data=selection,
dev_size=args.dev_size,
test_size=args.test_size,
head_size_threshold=args.head_size_threshold)
return train, dev, test
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, help='Path to folder containing the data',
default="./data/transomcs/")
parser.add_argument('--data-file', type=str, help='Dataset filename', default="TransOMCS_full.txt")
parser.add_argument('--random-seed', type=int, default=30, help='Random seed')
parser.add_argument('--dev-size', type=int, default=10000, help='Dev size')
parser.add_argument('--test-size', type=int, default=100000, help='Test size')
parser.add_argument('--head-size-threshold', type=int, default=500, help='Maximum number of tuples a head is involved in, '
'in order to be a candidate for the dev/test set')
parser.add_argument('--confidence-threshold', default=0.5, help='Confidence threshold for transomcs tuple')
parser.add_argument('--excluded-relations', default=["DefinedAs", "LocatedNear"], help='Relations to exclude')
parser.add_argument('--sanity-check', action='store_true',
help='If specified, perform sanity check during split creation')
args = parser.parse_args()
# Load TransOMCS data
(train, dev, test) = load_transomcs(args)
# Write tsv files
folder = args.data_folder
write_array2tsv(folder + "train.tsv", train)
write_array2tsv(folder + "dev.tsv", dev)
write_array2tsv(folder + "test.tsv", test)
if __name__ == "__main__":
main()
| comet-atomic-2020-master | split/split_transomcs.py |
import json
import sys
import csv
import operator
import random
def read_csv(input_file, quotechar='"', delimiter=",", skip_header=False):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar, quoting=csv.QUOTE_ALL, skipinitialspace=True)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
if skip_header:
lines = lines[1:]
return lines
def write_tsv(output_file, data, header=False):
keys = list(data[0].keys())
with open(output_file, 'w') as f:
w = csv.DictWriter(f, keys, delimiter='\t', lineterminator='\n')
if header:
w.writeheader()
for r in data:
entry = {k: r[k] for k in keys}
w.writerow(entry)
def write_array2tsv(output_file, data, header=False):
keys = range(len(data[0]))
with open(output_file, 'w') as f:
w = csv.DictWriter(f, keys, delimiter='\t', lineterminator='\n')
if header:
w.writeheader()
for r in data:
entry = {k: r[k] for k in keys}
w.writerow(entry)
def write_csv(filename, data, fieldnames):
with open(filename, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for d in data:
formatted_d = {}
for key, val in d.items():
formatted_d[key] = json.dumps(val)
writer.writerow(formatted_d)
def read_jsonl(filename):
data = []
with open(filename, "r") as f:
for line in f:
data.append(json.loads(line))
return data
def write_items(output_file, items):
with open(output_file, 'w') as f:
for concept in items:
f.write(concept + "\n")
f.close()
def write_jsonl(f, d):
write_items(f, [json.dumps(r) for r in d])
def count_relation(d):
relation_count = {}
prefix_count = {}
head_count = {}
for l in d:
r = l[1]
if r not in relation_count.keys():
relation_count[r] = 0
relation_count[r] += 1
prefix = l[0]+l[1]
if prefix not in prefix_count.keys():
prefix_count[prefix] = 0
prefix_count[prefix] += 1
head = l[0]
if head not in head_count.keys():
head_count[head] = 0
head_count[head] += 1
sorted_relation_count = dict(sorted(relation_count.items(), key=operator.itemgetter(1), reverse=True))
sorted_prefix_count = dict(sorted(prefix_count.items(), key=operator.itemgetter(1), reverse=True))
sorted_head_count = dict(sorted(head_count.items(), key=operator.itemgetter(1), reverse=True))
print("Relations:")
for r in sorted_relation_count.keys():
print(r, sorted_relation_count[r])
print("\nPrefixes:")
print("uniq prefixes: ", len(sorted_prefix_count.keys()))
i = 0
for r in sorted_prefix_count.keys():
print(r, sorted_prefix_count[r])
i += 1
if i > 20:
break
print("\nHeads:")
i = 0
for r in sorted_head_count.keys():
print(r, sorted_head_count[r])
i += 1
if i > 20:
break
def get_head_set(d):
return set([l[0] for l in d])
def head_based_split(data, dev_size, test_size, head_size_threshold=500, dev_heads=[], test_heads=[]):
"""
:param data: the tuples to split according to the heads, where the head is the first element of each tuple
:param dev_size: target size of the dev set
:param test_size: target size of the test set
:param head_size_threshold: Maximum number of tuples a head can be involved in,
in order to be considered for the dev/test set'
:param dev_heads: heads that are forced to belong to the dev set
:param test_heads: heads that are forced to belong to the test set
:return:
"""
head_count = {}
for l in data:
head = l[0]
if head not in head_count.keys():
head_count[head] = 0
head_count[head] += 1
remaining_heads = dict(head_count)
test_selected_heads = {}
test_head_total_count = 0
for h in test_heads:
if h in remaining_heads:
c = remaining_heads[h]
test_selected_heads[h] = c
test_head_total_count += c
remaining_heads.pop(h)
while test_head_total_count < test_size:
h = random.sample(remaining_heads.keys(), 1)[0]
c = remaining_heads[h]
if c < head_size_threshold:
test_selected_heads[h] = c
test_head_total_count += c
remaining_heads.pop(h)
test = [l for l in data if l[0] in test_selected_heads.keys()]
dev_selected_heads = {}
dev_head_total_count = 0
for h in dev_heads:
if h in remaining_heads:
c = remaining_heads[h]
dev_selected_heads[h] = c
dev_head_total_count += c
remaining_heads.pop(h)
while dev_head_total_count < dev_size:
h = random.sample(remaining_heads.keys(), 1)[0]
c = remaining_heads[h]
if c < head_size_threshold:
dev_selected_heads[h] = c
dev_head_total_count += c
remaining_heads.pop(h)
dev = [l for l in data if l[0] in dev_selected_heads.keys()]
dev_test_heads = set(list(dev_selected_heads.keys()) + list(test_selected_heads.keys()))
train = [l for l in data if l[0] not in dev_test_heads]
return train, dev, test
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):]
| comet-atomic-2020-master | split/utils.py |
import argparse
import random
from utils import read_csv, write_jsonl
def sample_kg(args):
random.seed(args.random_seed)
data_file = args.input_file
data = read_csv(data_file, delimiter="\t")
prefixes = {}
for l in data:
prefix = l[0] + " " + l[1]
if prefix not in prefixes.keys():
prefixes[prefix] = {"head": l[0], "relation":l[1], "tails": []}
prefixes[prefix]["tails"].append(l[2])
excluded_relations = [
"HasPainIntensity",
"LocatedNear",
"LocationOfAction",
"DesireOf",
"NotMadeOf",
"InheritsFrom",
"InstanceOf",
"RelatedTo",
"SymbolOf",
"CreatedBy",
"NotHasA",
"NotIsA",
"NotHasProperty",
"NotCapableOf",
"IsA",
"DefinedAs"
]
print(len(list(prefixes.keys())))
rel_prefixes = [p for p in prefixes.keys() if prefixes[p]["relation"] not in excluded_relations]
print(len(rel_prefixes))
sampled_prefixes = random.sample(list(prefixes.keys()), args.sample_size)
samples = [prefixes[k] for k in sampled_prefixes]
rel_samples = [s for s in samples if s["relation"] not in excluded_relations]
print(len(rel_samples))
return samples
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input-file', type=str, help='Dataset filename')
parser.add_argument('--output-file', type=str, help='Dataset filename')
parser.add_argument('--random-seed', type=int, default=30, help='Random seed')
parser.add_argument('--sample-size', type=int, default=5000, help='Dev size')
args = parser.parse_args()
# Load KG data
samples = sample_kg(args)
# Write tsv files
write_jsonl(args.output_file, samples)
if __name__ == "__main__":
main()
| comet-atomic-2020-master | split/sample_prefixes.py |
import argparse
import random
from utils import read_csv, write_array2tsv, head_based_split, get_head_set
def load_atomic2020(args):
random.seed(args.random_seed)
atomic2020_v1_file = args.data_folder + "atomic_original_tuples.tsv"
atomic2020_addl_file = args.data_folder + "atomic_additional_tuples.tsv"
atomic2020_cn_file = args.data_folder + "atomic_conceptnet_tuples.tsv"
v1_data = read_csv(atomic2020_v1_file, delimiter="\t", skip_header=True)
addl_data = read_csv(atomic2020_addl_file, delimiter="\t", skip_header=True)
cn_data_with_id = read_csv(atomic2020_cn_file, delimiter="\t", skip_header=True)
cn_data = [l[1:] for l in cn_data_with_id]
# Atomic split
atomic_train = read_csv(args.atomic_split + "train.tsv", delimiter="\t", skip_header=False)
atomic_dev = read_csv(args.atomic_split + "dev.tsv", delimiter="\t", skip_header=False)
atomic_test = read_csv(args.atomic_split + "test.tsv", delimiter="\t", skip_header=False)
atomic_train_events = get_head_set(atomic_train)
atomic_dev_events = get_head_set(atomic_dev)
atomic_test_events = get_head_set(atomic_test)
v1_data_train = [l for l in v1_data if l[0] in atomic_train_events]
v1_data_dev = [l for l in v1_data if l[0] in atomic_dev_events]
v1_data_test = [l for l in v1_data if l[0] in atomic_test_events]
assert len(v1_data) == len(v1_data_train) + len(v1_data_dev) + len(v1_data_test)
# CN split
cn_train = read_csv(args.conceptnet_split + "train.tsv", delimiter="\t", skip_header=False)
cn_dev = read_csv(args.conceptnet_split + "dev.tsv", delimiter="\t", skip_header=False)
cn_test = read_csv(args.conceptnet_split + "test.tsv", delimiter="\t", skip_header=False)
cn_train_heads = get_head_set(cn_train)
cn_dev_heads = get_head_set(cn_dev)
cn_test_heads = get_head_set(cn_test)
cn_data_train = [l for l in cn_data if l[0] in cn_train_heads]
cn_data_dev = [l for l in cn_data if l[0] in cn_dev_heads]
cn_data_test = [l for l in cn_data if l[0] in cn_test_heads]
# Additional tuples split
(addl_train, addl_dev, addl_test) = head_based_split(addl_data,
dev_size=args.dev_size,
test_size=args.test_size,
head_size_threshold=args.head_size_threshold,
dev_heads=atomic_dev_events,
test_heads=atomic_test_events)
new_addl_train = []
new_addl_dev = []
new_addl_test = addl_test
for l in addl_train:
h = l[0]
if h in cn_dev_heads:
new_addl_dev.append(l)
else:
if h in cn_test_heads:
new_addl_test.append(l)
else:
new_addl_train.append(l)
for l in addl_dev:
h = l[0]
if h in cn_test_heads:
new_addl_test.append(l)
else:
new_addl_dev.append(l)
train = v1_data_train + cn_data_train + new_addl_train
dev = v1_data_dev + cn_data_dev + new_addl_dev
test = v1_data_test + cn_data_test + new_addl_test
return train, dev, test
def main():
# Parse args
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, help='Path to folder containing the data',
default="./data/atomic2020/")
parser.add_argument('--atomic-split', type=str, help='Path to folder containing the ATOMIC split',
default="./data/atomic/")
parser.add_argument('--conceptnet-split', type=str, help='Path to folder containing the ConceptNet split',
default="./data/conceptnet/")
parser.add_argument('--data-file', type=str, help='Dataset filename', default="atomic_v1.tsv")
parser.add_argument('--dev-size', type=int, default=20000, help='Dev size')
parser.add_argument('--test-size', type=int, default=50000, help='Test size')
parser.add_argument('--head-size-threshold', type=int, default=500, help='Maximum number of tuples a head is involved in, '
'in order to be a candidate for the dev/test set')
parser.add_argument('--random-seed', type=int, default=30, help='Random seed')
parser.add_argument('--sanity-check', action='store_true',
help='If specified, perform sanity check during split creation')
args = parser.parse_args()
# Load ATOMIC 2020 data
(train, dev, test) = load_atomic2020(args)
# Write tsv files
folder = args.data_folder
write_array2tsv(folder + "train.tsv", train)
write_array2tsv(folder + "dev.tsv", dev)
write_array2tsv(folder + "test.tsv", test)
if __name__ == "__main__":
main()
| comet-atomic-2020-master | split/split_atomic2020.py |
comet-atomic-2020-master | mosaic/__init__.py |
|
# Importing stock libraries
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration
# Import os for env varibles via Beaker
import os
# WandB – Import the wandb library
import wandb
from torch import cuda
import logging
logger = logging.getLogger(__name__)
def log_eval(epoch, tokenizer, model, device, loader, sample_limit=5000, model_class="t5"):
model.eval()
total_loss = 0
loss_count = 0
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype = torch.long)
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone().detach()
lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100
ids = data['source_ids'].to(device, dtype = torch.long)
mask = data['source_mask'].to(device, dtype = torch.long)
if model_class == "t5":
outputs = model(input_ids = ids, attention_mask = mask, decoder_input_ids=y_ids, lm_labels=lm_labels)
else:
outputs = model(input_ids = ids, attention_mask = mask, labels=ids)
loss = outputs[0]
total_loss += loss.item()
loss_count += 1
wandb.log({"Eval Loss": total_loss / loss_count})
logger.info("Eval Loss: {}".format(total_loss / loss_count)) | comet-atomic-2020-master | mosaic/infra/logging.py |
# Importing stock libraries
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration
# Import os for env varibles via Beaker
import os
# WandB – Import the wandb library
import wandb
import logging
from tqdm import tqdm
logger = logging.getLogger("modeling")
from mosaic.infra.logging import log_eval
def train(epoch, tokenizer, model, device, loader, optimizer, val_loader=None, model_class="t5",
save_dir="/models"):
model.train()
batch_count = len(loader)
for iteration, data in tqdm(enumerate(loader, 0)):
y = data['target_ids'].to(device, dtype=torch.long)
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone().detach()
lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
if model_class == "t5":
outputs = model(input_ids=ids, attention_mask=mask, decoder_input_ids=y_ids,
lm_labels=lm_labels)
else:
outputs = model(input_ids=ids, attention_mask=mask, labels=ids)
loss = outputs[0]
if iteration % 100 == 0:
wandb.log({"Training Loss": loss.item(), "Epoch": epoch,
"Batches left": batch_count - iteration})
batches_left = batch_count - iteration
logger.info(
f'\nEpoch: {epoch}, Iteration: {iteration}, Loss: {loss.item()}, Batches left: {batches_left}')
if iteration % 500 == 0:
logger.info(f'\nEpoch: {epoch}, Loss: {loss.item()}, BatchesLeft: {batches_left}')
if iteration % 5000 == 0:
model.save_pretrained(save_dir + "/iter_{}_model".format(iteration))
tokenizer.save_pretrained(save_dir + "/iter_{}_tokenizer".format(iteration))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration % 100 == 0 and val_loader != None:
log_eval(epoch, tokenizer, model, device, val_loader, model_class=model_class)
model.train()
def validate(epoch, tokenizer, model, device, loader):
model.eval()
predictions = []
actuals = []
sources = []
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype=torch.long)
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
generated_ids = model.generate(
input_ids=ids,
attention_mask=mask,
do_sample=True,
max_length=int(os.environ['OUT_LEN']),
num_beams=5,
top_k=50,
top_p=0.95
)
preds = [
tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for
g in generated_ids]
target = [
tokenizer.decode(t, skip_special_tokens=True, clean_up_tokenization_spaces=True) for
t in y]
source = [
tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) for
s in ids]
if _ % 100 == 0:
logger.info(f'Completed {_}')
sources.extend(source)
predictions.extend(preds)
actuals.extend(target)
return sources, predictions, actuals
def beam_generations(tokenizer, model, device, loader, top_k=40):
# This method assumes batch size of 1
model.eval()
predictions = []
actuals = []
sources = []
records = []
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype=torch.long)
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
generated_ids = model.generate(
input_ids=ids,
attention_mask=mask,
temperature=1.0,
do_sample=False,
max_length=int(os.environ['OUT_LEN']),
top_p=0.9,
top_k=top_k,
repetition_penalty=1.0,
num_return_sequences=10 if top_k > 1 else 1,
num_beams=10
)
preds = [tokenizer.decode(g, clean_up_tokenization_spaces=True) for g in generated_ids]
try:
target = [tokenizer.decode(t, clean_up_tokenization_spaces=True) for t in y]
except:
target = ['']
source = [tokenizer.decode(s, clean_up_tokenization_spaces=True) for s in ids]
records.append({
'source': source[0],
'target': target[0],
'generations': preds
})
if _ % 100 == 0:
logger.info(f'Completed {_}')
return records
#
# def batch_greedy_generate(tokenizer, model, dataloader, device, max_num_tokens_to_produce=20):
#
# model.eval()
# with torch.no_grad():
# for _, data in enumerate(dataloader, 0):
# input_ids = data['source_ids'].to(device, dtype = torch.long)
# attn_mask = data['source_mask'].to(device, dtype = torch.long)
#
# pad_token_id = tokenizer.pad_token_id
# eos_token_id = tokenizer.eos_token_id
# eos_not_in_sents = torch.ones(input_ids.shape[0]).long()
#
# last_non_masked_idx = torch.sum(attn_mask, dim=1) - 1
#
# start_idx = inp_idx = (last_non_masked_idx).view(-1, 1).repeat(1, tokenizer.vocab_size).unsqueeze(1)
# past = None
# seq_len = input_ids.size(1)
# position_ids = torch.tensor([list(range(seq_len)) for i in range(input_ids.shape[0])])
# for i, position_ids_slice in enumerate(position_ids):
# position_ids_slice[last_non_masked_idx[i]:] = position_ids_slice[last_non_masked_idx[i]]
#
# for step in range(max_num_tokens_to_produce):
# outputs = model(input_ids, attention_mask=attn_mask, position_ids=position_ids)
#
# if step == 0:
# next_token_logits = outputs[0].gather(1, start_idx).squeeze(1)
# else:
# next_token_logits = outputs[0][:, -1, :]
#
# next_tokens = torch.argmax(next_token_logits, dim=-1)
#
# # this updates which sentences have not seen an <EOS> token so far
# # if one <EOS> token was seen the sentence is finished
# eos_not_in_sents.mul_(next_tokens.ne(eos_token_id).long())
#
# # either append a padding token here if <EOS> has been seen or append next token
# tokens_to_add = next_tokens * (eos_not_in_sents) + pad_token_id * (1 - eos_not_in_sents)
#
# # Update input_ids, attn_mask and position_ids
# input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
# attn_mask = torch.cat([attn_mask, torch.ones((attn_mask.shape[0], 1)).long()], dim=1)
# position_ids = torch.cat([position_ids, (position_ids[:, -1] + 1).unsqueeze(-1)], dim=1)
#
| comet-atomic-2020-master | mosaic/infra/modeling.py |
# Importing stock libraries
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration
# Import os for env varibles via Beaker
import os
# WandB – Import the wandb library
import wandb
from torch import cuda
import logging
logger = logging.getLogger("gpt2-comet")
logging.basicConfig(level=logging.DEBUG)
class KGDataset(Dataset):
def __init__(self, dataframe, tokenizer, source_len, summ_len, model="t5", is_eval=False):
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = summ_len
self.text = self.data.head_event
self.ctext = self.data.tail_event
self.model = model
self.is_eval = is_eval
def __len__(self):
return len(self.text)
def __getitem__(self, index):
text = str(self.text[index])
text = ' '.join(text.split())
ctext = str(self.ctext[index])
ctext = ' '.join(ctext.split())
if self.model == "t5":
source = self.tokenizer.batch_encode_plus([text], pad_to_max_length=True, max_length=self.source_len, return_tensors='pt', truncation=True)
target = self.tokenizer.batch_encode_plus([ctext], pad_to_max_length=True, max_length=self.summ_len, return_tensors='pt', truncation=True)
# source_for_len = self.tokenizer.batch_encode_plus([text], max_length=100, truncation=True)
# target_for_len = self.tokenizer.batch_encode_plus([ctext], max_length=100, truncation=True)
else:
if self.is_eval:
source = self.tokenizer.batch_encode_plus([text], pad_to_max_length=False, max_length=self.source_len, return_tensors='pt', truncation=True)
target = self.tokenizer.batch_encode_plus([ctext], pad_to_max_length=False, max_length=self.summ_len, return_tensors='pt', truncation=True)
else:
source = self.tokenizer.batch_encode_plus([text + ' ' + ctext], pad_to_max_length=True, max_length=self.source_len + self.summ_len, return_tensors='pt', truncation=True)
target = source
if index < 5:
logger.info("Source: {}".format(self.tokenizer.batch_decode(source['input_ids'])))
logger.info("Target: {}".format(self.tokenizer.batch_decode(target['input_ids'])))
source_ids = source['input_ids'].squeeze()
source_mask = source['attention_mask'].squeeze()
target_ids = target['input_ids'].squeeze()
target_mask = target['attention_mask'].squeeze()
return {
'source_ids': source_ids.to(dtype=torch.long),
'source_mask': source_mask.to(dtype=torch.long),
'target_ids': target_ids.to(dtype=torch.long),
'target_ids_y': target_mask.to(dtype=torch.long)
} | comet-atomic-2020-master | mosaic/datasets/KGDataset.py |
comet-atomic-2020-master | mosaic/datasets/__init__.py |
|
from nltk import pos_tag, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import csv
import argparse
import os
exact2tokenized = {}
tokenized2pos = {}
pos2content = {}
def main():
print('\n#######################')
print('Preprocess Part 2')
print('#######################')
data_dir = [(os.path.join(args.data_dir,'atomic2020_exact.tsv'),'atomic2020'),
(os.path.join(args.data_dir,'conceptnet_exact.tsv'),'conceptnet'),
(os.path.join(args.data_dir,'transomcs_exact.tsv'),'transomcs'),
(os.path.join(args.data_dir,'atomic_exact.tsv'),'atomic')
]
sw = set(stopwords.words('english'))
sw.add('x')
sw.add('y')
sw.add('z')
ltz = WordNetLemmatizer()
for f, kb in data_dir:
print('{}: processing file'.format(kb))
d = '/'.join(f.split('/')[:-1]) + '/'
with open(f) as fin, open(d+kb+'_processed.tsv','w') as fout:
reader = csv.reader(fin, delimiter='\t')
writer = csv.writer(fout, delimiter='\t')
lcnt = 0
for line in reader:
if lcnt == 0: # skipping header
lcnt = 1
continue
out_line = process(line, sw, ltz)
writer.writerow(out_line)
def process(line, sw, ltz):
head = line[3]
head_tokens = convert_to_tokens(head)
head_pos = convert_to_pos(head_tokens)
head_content = convert_to_content(head_pos, sw, ltz)
tail = line[4]
tail_tokens = convert_to_tokens(tail)
tail_pos = convert_to_pos(tail_tokens)
tail_content = convert_to_content(tail_pos, sw, ltz)
return line + [head_content, tail_content]
def convert_to_tokens(exact):
if exact in exact2tokenized:
return exact2tokenized[exact]
else:
return tokenize(exact)
def convert_to_pos(tokens):
str_x = list2str(tokens)
if str_x in tokenized2pos:
return tokenized2pos[str_x]
else:
return postag(tokens)
def convert_to_content(pos, sw, ltz):
str_x = list_of_tuple2str(pos)
if str_x in pos2content:
return pos2content[str_x]
else:
return get_content_words(pos, sw, ltz)
def get_content_words(pos_tagged, sw, ltz):
save = []
stop_words = []
for (word, pos) in pos_tagged:
if word in sw:
stop_words.append(word)
continue
if pos[:2] == 'NN':
lemmatized = ltz.lemmatize(word, pos="n")
elif pos[:2] == 'VB':
lemmatized = ltz.lemmatize(word, pos="v")
else:
lemmatized = word
save.append(lemmatized)
if len(save) == 0:
rv = '|'.join(stop_words)
elif len(save) == 1:
rv = save[0]
else:
rv = '|'.join(save)
pos2content[list_of_tuple2str(pos_tagged)] = rv
return rv
def postag(tokenized):
p = pos_tag(tokenized)
tokenized2pos[list2str(tokenized)] = p
return p
def tokenize(exact):
t = word_tokenize(exact)
exact2tokenized[exact] = t
return t
def list_of_tuple2str(lot):
return ",".join("(%s,%s)" % tup for tup in lot)
def list2str(l):
return ",".join(l)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help="directory with KBs in tab separated data files and results from preprocess_kb_triples_part1")
args = parser.parse_args()
main() | comet-atomic-2020-master | human_eval/coverage/preprocess_kb_triples_part2.py |
import pandas as pd
from collections import OrderedDict
import csv
import os
import argparse
def main():
print('\n#######################')
print('Calculate Coverage')
print('#######################')
# OUTPUT DIR
output_dir = os.path.join(args.data_dir, 'output-x')
print("Outputting matches to %s"%output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# INPUT FILENAMES
atomic2020_file = os.path.join(args.data_dir, 'atomic2020_processed.tsv')
cn_file = os.path.join(args.data_dir, 'conceptnet_processed.tsv')
atomic_file = os.path.join(args.data_dir, 'atomic_processed.tsv')
trans_omcs_file = os.path.join(args.data_dir, 'transomcs_processed.tsv')
print('\nReading atomic2020')
dict_a2 = read_into_odict(atomic2020_file)
print('Reading conceptnet')
dict_cn = read_into_odict(cn_file)
print('Reading atomic')
dict_a1 = read_into_odict(atomic_file)
print('Reading transOMCS')
dict_to = read_into_odict(trans_omcs_file)
kbs = [('atomic',dict_a1), ('conceptnet',dict_cn), ('atomic2020', dict_a2), ('transomcs',dict_to)]
kbs_vs = [('atomic',dict_a1), ('conceptnet',dict_cn), ('atomic2020', dict_a2), ('transomcs',dict_to)]
print("\nKB PAIR,TUPLE MATCH COUNTS")
for (kb1,d1) in kbs:
for (kb2,d2) in kbs_vs:
if kb1 == kb2:
continue
name = kb1+'-'+kb2
calculate_hrt(d1, d2, mappings[name], name, output_dir)
def calculate_hrt(d1, d2, relation_mappings, pair_name, output_dir, direction='hrt'):
hrt_match = []
hr_match = []
hrt_match_cnt = 0
hr_match_cnt = 0
no_match = []
for relation1 in d1:
if relation1 not in relation_mappings:
continue
mappings = relation_mappings[relation1]
for head1, tails1 in d1[relation1].items():
for relation2 in mappings:
if relation2 not in d2:
continue
for head2, tails2 in d2[relation2].items():
if head2 < head1:
continue
elif head2 == head1:
hr_match_cnt += 1
hr = [relation1, relation2, head1, head2]
hr_match.append(hr)
if direction == 'hrt':
for tail1 in tails1:
for tail2 in tails2:
if tail2 < tail1:
continue
elif tail2 == tail1:
hrt_match_cnt += 1
hrt_match.append(hr + [tail1, tail2])
break
else:
no_match.append(hr + [tail1, tail2])
break
else:
break
print("%s,%s"%(pair_name, hrt_match_cnt))
with open(os.path.join(output_dir,pair_name+'-'+direction+'-match.csv'),'w') as fout:
writer = csv.writer(fout)
writer.writerows(hrt_match)
def read_into_odict(af,direction='hrt'):
if direction == 'trh':
index_by = 6
other_col = 5
else:
index_by = 5
other_col = 6
df_a = pd.read_csv(af, header=None, index_col=[1, index_by], sep='\t')
df = df_a.sort_index()
df.fillna('none',inplace=True)
return df.groupby(level=0).apply(lambda df: df.sort_index().xs(df.name)[other_col].sort_values(ascending=True).groupby(level=0).agg(list).to_dict(OrderedDict)).to_dict(into=OrderedDict)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help="directory with KBs in tab separated data files and results from preprocess_kb_triples_part2.")
args = parser.parse_args()
# KB LABEL MAPPINGS
atomic_cn_mapping = {
'ObjectUse': ['UsedFor', 'ReceivesAction'],
'HasProperty': ['HasProperty', 'HasA'],
'MadeUpOf': ['PartOf', 'MadeOf', 'HasA', 'ReceivesAction'],
'AtLocation': ['AtLocation', 'ReceivesAction'],
'CapableOf': ['CapableOf', 'HasPrerequisite'],
'Desires': ['Desires'],
'NotDesires': ['NotDesires'],
'xIntent': ['MotivatedByGoal'],
'xReason': ['MotivatedByGoal'],
'xNeed': ['HasPrerequisite'],
'xWant': ['CausesDesire'],
'HasSubEvent': ['HasSubevent', 'HasFirstSubevent', 'HasLastSubevent'],
'Causes': ['Causes', 'ReceivesAction'],
'xEffect': ['Causes'],
'HinderedBy': ['ObstructedBy']
}
cn_atomic_mapping = {
'UsedFor': ['ObjectUse'],
'ReceivesAction':['Causes','ObjectUse','MadeUpOf','AtLocation'],
'HasProperty': ['HasProperty'],
'HasA': ['HasProperty','MadeUpOf'],
'PartOf': ['MadeUpOf'],
'MadeOf': ['MadeUpOf'],
'AtLocation': ['AtLocation'],
'CapableOf': ['CapableOf'],
'HasPrerequisite': ['xNeed','CapableOf'],
'Desires': ['Desires'],
'NotDesires': ['NotDesires'],
'MotivatedByGoal': ['xIntent','xReason'],
'CausesDesire': ['xWant'],
'HasSubevent': ['HasSubEvent'],
'HasFirstSubevent': ['HasSubEvent'],
'HasLastSubevent': ['HasSubEvent'],
'Causes': ['Causes','xEffect'],
'ObstructedBy': ['HinderedBy']
}
atomic_atomic2020_mapping = {
'xIntent': ['xIntent'],
'xNeed': ['xNeed','xReason'],
'xAttr': ['xAttr'],
'xReact': ['xReact'],
'xWant': ['xWant'],
'xEffect': ['xEffect'],
'oReact': ['oReact'],
'oWant': ['oWant'],
'oEffect': ['oEffect']
}
atomic2020_atomic_mapping = {
'xIntent': ['xIntent'],
'xNeed': ['xNeed'],
'xReason': ['xNeed'],
'xAttr': ['xAttr'],
'xReact': ['xReact'],
'xWant': ['xWant'],
'xEffect': ['xEffect'],
'oReact': ['oReact'],
'oWant': ['oWant'],
'oEffect': ['oEffect']
}
conceptnet_labels = {
'AtLocation':['AtLocation'],
'CapableOf':['CapableOf'],
'Causes':['Causes'],
'CausesDesire':['CausesDesire'],
'Desires':['Desires'],
'HasA':['HasA'],
'HasFirstSubevent':['HasFirstSubevent'],
'HasLastSubevent':['HasLastSubevent'],
'HasPrerequisite':['HasPrerequisite'],
'HasProperty':['HasProperty'],
'HasSubevent':['HasSubevent'],
'MadeOf':['MadeOf'],
'MotivatedByGoal':['MotivatedByGoal'],
'NotDesires':['NotDesires'],
'PartOf':['PartOf'],
'ReceivesAction':['ReceivesAction'],
'UsedFor':['UsedFor'],
}
mappings = {
'atomic-conceptnet': atomic_cn_mapping,
'atomic-atomic2020': atomic_atomic2020_mapping,
'atomic-transomcs': atomic_cn_mapping,
'conceptnet-atomic': cn_atomic_mapping,
'conceptnet-atomic2020': cn_atomic_mapping,
'conceptnet-transomcs': conceptnet_labels,
'atomic2020-atomic': atomic2020_atomic_mapping,
'atomic2020-conceptnet': atomic_cn_mapping,
'atomic2020-transomcs': atomic_cn_mapping,
'transomcs-atomic': cn_atomic_mapping,
'transomcs-conceptnet': conceptnet_labels,
'transomcs-atomic2020': cn_atomic_mapping,
}
main()
| comet-atomic-2020-master | human_eval/coverage/calculate_coverage.py |
import pandas as pd
import string
import argparse
import os
str2exact = {}
def main():
print('\n#######################')
print('Preprocess Part 1')
print('#######################')
data_dir = [(os.path.join(args.data_dir,'atomic2020.tsv'),'atomic2020'),
(os.path.join(args.data_dir,'conceptnet.tsv'),'conceptnet'),
(os.path.join(args.data_dir,'transomcs.tsv'),'transomcs'),
(os.path.join(args.data_dir,'atomic.tsv'),'atomic')
]
conceptnet_label_whitelist = {
'AtLocation':None,#
'CapableOf':None,#
'Causes':None,#
'CausesDesire':None,#
'Desires':None,#
'HasA':None,#
'HasFirstSubevent':None,#
'HasLastSubevent':None,#
'HasPrerequisite':None,#
'HasProperty':None,#
'HasSubevent':None,#
'MadeOf':None,#
'MotivatedByGoal':None,#
'NotDesires':None,#
'PartOf':None,#
'ReceivesAction':None,#
'UsedFor':None,#
'ObstructedBy':None
}
for f,kb in data_dir:
print('\n{}: reading file'.format(kb))
df_all = pd.read_csv(f, sep='\t')
before_size = len(df_all)
df_all.drop_duplicates(inplace=True)
before_uniq = len(df_all)
if kb.startswith('atomic'):
df = df_all.copy()
else:
df = df_all[df_all['relation'].isin(conceptnet_label_whitelist)].copy()
print('{}: processing head'.format(kb))
df['head_exact'] = df[['head','relation']].apply(lambda x: str2exact[x['head']] if x['head'] in str2exact else clean_str(x['head'], kb, x['relation']), axis=1)
print('{}: processing tail'.format(kb))
df['tail_exact'] = df[['tail','relation']].apply(lambda x: str2exact[x['tail']] if x['tail'] in str2exact else clean_str(x['tail'], kb, x['relation']), axis=1)
print('{}: writing processed file'.format(kb))
d = '/'.join(f.split('/')[:-1]) + '/'
df[['head', 'relation', 'tail', 'head_exact', 'tail_exact']].to_csv(d + kb + '_exact.tsv', index=False, sep='\t')
def clean_str(s_raw,kb, relation):
if pd.isnull(s_raw):
s_raw = ''
s = s_raw.lower()
if kb[:6] == 'atomic' and 'person' in s:
s = s.replace('personx','person')
s = s.replace('persony','person')
s = s.replace('personz','person')
s = s.strip().translate(str.maketrans('', '', string.punctuation))
l = s.split()
if not l:
rv = ''
elif kb[:6] == 'atomic' and (relation[0] in ["o","x"] or relation in ['isFilledBy', 'HinderedBy', 'isBefore', 'isAfter']) and l[0][:6]=='person':
rv = ' '.join(l[1:])
else:
rv = ' '.join(l)
str2exact[s_raw] = rv
return rv
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help="directory with KBs in tab separated data files. Required headers and columns: [head, relation, tail]")
args = parser.parse_args()
main()
| comet-atomic-2020-master | human_eval/coverage/preprocess_kb_triples_part1.py |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
get_linear_schedule_with_warmup,
)
logger = logging.getLogger(__name__)
MODEL_MODES = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeq2SeqLM,
"translation": AutoModelForSeq2SeqLM,
}
class BaseTransformer(pl.LightningModule):
def __init__(
self,
hparams: argparse.Namespace,
num_labels=None,
mode="base",
config=None,
tokenizer=None,
model=None,
**config_kwargs
):
"""Initialize a model, tokenizer and config."""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.hparams = hparams
self.step_count = 0
self.tfmr_ckpts = {}
self.output_dir = Path(self.hparams.output_dir)
cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
self.config = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
**({"num_labels": num_labels} if num_labels is not None else {}),
cache_dir=cache_dir,
**config_kwargs,
)
else:
self.config: PretrainedConfig = config
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
cache_dir=cache_dir,
)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.model_type = MODEL_MODES[mode]
if model is None:
self.model = self.model_type.from_pretrained(
self.hparams.model_name_or_path,
from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
config=self.config,
cache_dir=cache_dir,
)
else:
self.model = model
def load_hf_checkpoint(self, *args, **kwargs):
self.model = self.model_type.from_pretrained(*args, **kwargs)
def configure_optimizers(self):
"Prepare optimizer and schedule (linear warmup and decay)"
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
scheduler = get_linear_schedule_with_warmup(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
def setup(self, step):
train_batch_size = self.hparams.train_batch_size
dataloader = self.get_dataloader("train", train_batch_size)
self.train_loader = dataloader
self.total_steps = (
(len(dataloader.dataset) // (train_batch_size * max(1, self.hparams.gpus)))
// self.hparams.accumulate_grad_batches
* float(self.hparams.max_epochs)
)
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader("dev", self.hparams.eval_batch_size)
def test_dataloader(self):
return self.get_dataloader("test", self.hparams.eval_batch_size)
def _feature_file(self, mode):
return os.path.join(
self.hparams.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
str(self.hparams.max_seq_length),
),
)
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
save_path = self.output_dir.joinpath("best_tfmr")
save_path.mkdir(exist_ok=True)
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
self.tfmr_ckpts[self.step_count] = save_path
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int)
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--eval_batch_size", default=32, type=int)
class LoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f"lr_group_{i}": lr for i, lr in enumerate(self.lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lrs)
def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Validation results *****")
metrics = trainer.callback_metrics
# Log results
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Test results *****")
metrics = trainer.callback_metrics
# Log and save results to file
output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
writer.write("{} = {}\n".format(key, str(metrics[key])))
def add_generic_args(parser, root_dir) -> None:
# TODO(SS): allow all pl args? parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O2",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int, default=0)
parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--gradient_accumulation_steps",
dest="accumulate_grad_batches",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
def generic_train(
model: BaseTransformer,
args: argparse.Namespace,
early_stopping_callback=False,
logger=True, # can pass WandbLogger() here
extra_callbacks=[],
checkpoint_callback=None,
logging_callback=None,
**extra_train_kwargs
):
pl.seed_everything(args.seed)
# init model
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
# add custom checkpoints
if checkpoint_callback is None:
checkpoint_callback = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1
)
if logging_callback is None:
logging_callback = LoggingCallback()
train_params = {}
# TODO: remove with PyTorch 1.6 since pl uses native amp
if args.fp16:
train_params["precision"] = 16
train_params["amp_level"] = args.fp16_opt_level
if args.gpus > 1:
train_params["distributed_backend"] = "ddp"
trainer = pl.Trainer.from_argparse_args(
args,
weights_summary=None,
callbacks=[logging_callback] + extra_callbacks,
logger=logger,
checkpoint_callback=checkpoint_callback,
early_stop_callback=early_stopping_callback,
**train_params,
)
if args.do_train:
trainer.fit(model)
return trainer
| comet-atomic-2020-master | models/comet_atomic2020_bart/lightning_base.py |
import itertools
import json
import linecache
import os
import pickle
import warnings
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import numpy as np
import torch
from rouge_score import rouge_scorer, scoring
from sacrebleu import corpus_bleu
from torch import nn
from torch.utils.data import Dataset, Sampler
from transformers import BartTokenizer
def encode_line(tokenizer, line, max_length, pad_to_max_length=True, return_tensors="pt"):
extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) else {}
return tokenizer(
[line],
max_length=max_length,
padding="max_length" if pad_to_max_length else None,
truncation=True,
return_tensors=return_tensors,
**extra_kw,
)
def lmap(f: Callable, x: Iterable) -> List:
"""list(map(f, x))"""
return list(map(f, x))
def calculate_bleu_score(output_lns, refs_lns, **kwargs) -> dict:
"""Uses sacrebleu's corpus_bleu implementation."""
return {"bleu": corpus_bleu(output_lns, [refs_lns], **kwargs).score}
def trim_batch(
input_ids, pad_token_id, attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class Seq2SeqDataset(Dataset):
def __init__(
self,
tokenizer,
data_dir,
max_source_length,
max_target_length,
type_path="train",
n_obs=None,
src_lang=None,
tgt_lang=None,
prefix="",
):
super().__init__()
self.src_file = Path(data_dir).joinpath(type_path + ".source")
self.tgt_file = Path(data_dir).joinpath(type_path + ".target")
self.src_lens = self.get_char_lens(self.src_file)
self.max_source_length = max_source_length
self.max_target_length = max_target_length
assert min(self.src_lens) > 0, f"found empty line in {self.src_file}"
self.tokenizer = tokenizer
self.prefix = prefix
if n_obs is not None:
self.src_lens = self.src_lens[:n_obs]
self.pad_token_id = self.tokenizer.pad_token_id
self.src_lang = src_lang
self.tgt_lang = tgt_lang
def __len__(self):
return len(self.src_lens)
def __getitem__(self, index) -> Dict[str, torch.Tensor]:
index = index + 1 # linecache starts at 1
source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n")
tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n")
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
source_inputs = encode_line(self.tokenizer, source_line, self.max_source_length)
target_inputs = encode_line(self.tokenizer, tgt_line, self.max_target_length)
source_ids = source_inputs["input_ids"].squeeze()
target_ids = target_inputs["input_ids"].squeeze()
src_mask = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def get_char_lens(data_file):
return [len(x) for x in Path(data_file).open().readlines()]
@staticmethod
def trim_seq2seq_batch(batch, pad_token_id) -> tuple:
y = trim_batch(batch["decoder_input_ids"], pad_token_id)
source_ids, source_mask = trim_batch(batch["input_ids"], pad_token_id, attention_mask=batch["attention_mask"])
return source_ids, source_mask, y
def collate_fn(self, batch) -> Dict[str, torch.Tensor]:
input_ids = torch.stack([x["input_ids"] for x in batch])
masks = torch.stack([x["attention_mask"] for x in batch])
target_ids = torch.stack([x["decoder_input_ids"] for x in batch])
pad_token_id = self.pad_token_id
y = trim_batch(target_ids, pad_token_id)
source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks)
batch = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
def make_sortish_sampler(self, batch_size):
return SortishSampler(self.src_lens, batch_size)
class MBartDataset(Seq2SeqDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.max_source_length != self.max_target_length:
warnings.warn(
f"Mbart will ignore max_target_length = {self.max_target_length} and use {self.max_source_length} for both sides."
)
def __getitem__(self, index) -> Dict[str, str]:
index = index + 1 # linecache starts at 1
source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n")
tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n")
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
return {
"tgt_texts": source_line,
"src_texts": tgt_line,
}
def collate_fn(self, batch) -> Dict[str, torch.Tensor]:
batch_encoding = self.tokenizer.prepare_translation_batch(
[x["src_texts"] for x in batch],
src_lang=self.src_lang,
tgt_texts=[x["tgt_texts"] for x in batch],
tgt_lang=self.tgt_lang,
max_length=self.max_source_length,
)
return batch_encoding.data
class SortishSampler(Sampler):
"Go through the text data by order of src length with a bit of randomness. From fastai repo."
def __init__(self, data, batch_size):
self.data, self.bs = data, batch_size
def key(self, i):
return self.data[i]
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
idxs = np.random.permutation(len(self.data))
sz = self.bs * 50
ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)]
sort_idx = np.concatenate([sorted(s, key=self.key, reverse=True) for s in ck_idx])
sz = self.bs
ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)]
max_ck = np.argmax([self.key(ck[0]) for ck in ck_idx]) # find the chunk with the largest key,
ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first.
sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int)
sort_idx = np.concatenate((ck_idx[0], sort_idx))
return iter(sort_idx)
logger = getLogger(__name__)
def use_task_specific_params(model, task):
"""Update config with summarization specific params."""
task_specific_params = model.config.task_specific_params
if task_specific_params is not None:
pars = task_specific_params.get(task, {})
logger.info(f"using task specific params for {task}: {pars}")
model.config.update(pars)
def pickle_load(path):
"""pickle.load(path)"""
with open(path, "rb") as f:
return pickle.load(f)
def pickle_save(obj, path):
"""pickle.dump(obj, path)"""
with open(path, "wb") as f:
return pickle.dump(obj, f)
def flatten_list(summary_ids: List[List]):
return [x for x in itertools.chain.from_iterable(summary_ids)]
def save_git_info(folder_path: str) -> None:
"""Save git information to output_dir/git_log.json"""
repo_infos = get_git_info()
save_json(repo_infos, os.path.join(folder_path, "git_log.json"))
def save_json(content, path):
with open(path, "w") as f:
json.dump(content, f, indent=4)
def load_json(path):
with open(path) as f:
return json.load(f)
def get_git_info():
repo = git.Repo(search_parent_directories=True)
repo_infos = {
"repo_id": str(repo),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
}
return repo_infos
ROUGE_KEYS = ["rouge1", "rouge2", "rougeL"]
def calculate_rouge(output_lns: List[str], reference_lns: List[str], use_stemmer=True) -> Dict:
scorer = rouge_scorer.RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)
aggregator = scoring.BootstrapAggregator()
for reference_ln, output_ln in zip(reference_lns, output_lns):
scores = scorer.score(reference_ln, output_ln)
aggregator.add_scores(scores)
result = aggregator.aggregate()
return {k: v.mid.fmeasure for k, v in result.items()}
def freeze_params(model: nn.Module):
for par in model.parameters():
par.requires_grad = False
def grad_status(model: nn.Module) -> Iterable:
return (par.requires_grad for par in model.parameters())
def any_requires_grad(model: nn.Module) -> bool:
return any(grad_status(model))
def assert_all_frozen(model):
model_grads: List[bool] = list(grad_status(model))
n_require_grad = sum(lmap(int, model_grads))
npars = len(model_grads)
assert not any(model_grads), f"{n_require_grad/npars:.1%} of {npars} weights require grad"
def assert_not_all_frozen(model):
model_grads: List[bool] = list(grad_status(model))
npars = len(model_grads)
assert any(model_grads), f"none of {npars} weights require grad"
| comet-atomic-2020-master | models/comet_atomic2020_bart/utils.py |
import logging
import os
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
logger = logging.getLogger(__name__)
class Seq2SeqLoggingCallback(pl.Callback):
@rank_zero_only
def _write_logs(
self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True
) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****")
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
od = Path(pl_module.hparams.output_dir)
if type_path == "test":
results_file = od / "test_results.txt"
generations_file = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, "a+") as writer:
for key in sorted(metrics):
if key in ["log", "progress_bar", "preds"]:
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f"{key}: {val:.6f}\n"
writer.write(msg)
if not save_generations:
return
if "preds" in metrics:
content = "\n".join(metrics["preds"])
generations_file.open("w+").write(content)
@rank_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6})
@rank_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
return self._write_logs(trainer, pl_module, "test")
def get_checkpoint_callback(output_dir, metric):
"""Saves the best model by validation ROUGE2 score."""
if metric == "rouge2":
exp = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
exp = "{val_avg_bleu:.4f}-{step_count}"
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this function."
)
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(output_dir, exp),
monitor=f"val_{metric}",
mode="max",
save_top_k=1,
period=0, # maybe save a checkpoint every time val is run, not just end of epoch.
)
return checkpoint_callback
| comet-atomic-2020-master | models/comet_atomic2020_bart/callbacks.py |
import json
import torch
import argparse
from tqdm import tqdm
from pathlib import Path
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from utils import calculate_rouge, use_task_specific_params, calculate_bleu_score, trim_batch
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
class Comet:
def __init__(self, model_path):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_path).to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
task = "summarization"
use_task_specific_params(self.model, task)
self.batch_size = 1
self.decoder_start_token_id = None
def generate(
self,
queries,
decode_method="beam",
num_generate=5,
):
with torch.no_grad():
examples = queries
decs = []
for batch in list(chunks(examples, self.batch_size)):
batch = self.tokenizer(batch, return_tensors="pt", truncation=True, padding="max_length").to(self.device)
input_ids, attention_mask = trim_batch(**batch, pad_token_id=self.tokenizer.pad_token_id)
summaries = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_start_token_id=self.decoder_start_token_id,
num_beams=num_generate,
num_return_sequences=num_generate,
)
dec = self.tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False)
decs.append(dec)
return decs
all_relations = [
"AtLocation",
"CapableOf",
"Causes",
"CausesDesire",
"CreatedBy",
"DefinedAs",
"DesireOf",
"Desires",
"HasA",
"HasFirstSubevent",
"HasLastSubevent",
"HasPainCharacter",
"HasPainIntensity",
"HasPrerequisite",
"HasProperty",
"HasSubEvent",
"HasSubevent",
"HinderedBy",
"InheritsFrom",
"InstanceOf",
"IsA",
"LocatedNear",
"LocationOfAction",
"MadeOf",
"MadeUpOf",
"MotivatedByGoal",
"NotCapableOf",
"NotDesires",
"NotHasA",
"NotHasProperty",
"NotIsA",
"NotMadeOf",
"ObjectUse",
"PartOf",
"ReceivesAction",
"RelatedTo",
"SymbolOf",
"UsedFor",
"isAfter",
"isBefore",
"isFilledBy",
"oEffect",
"oReact",
"oWant",
"xAttr",
"xEffect",
"xIntent",
"xNeed",
"xReact",
"xReason",
"xWant",
]
if __name__ == "__main__":
# sample usage (reproducing AAAI)
print("model loading ...")
comet = Comet("./comet-atomic_2020_BART_aaai")
comet.model.zero_grad()
print("model loaded")
queries = []
head = "PersonX pleases ___ to make"
rel = "xWant"
query = "{} {}".format(head, rel)
queries.append(query)
print(queries)
results = comet.generate(queries, decode_method="greedy", num_generate=1)
print(results)
# sample usage (reproducing demo)
print("model loading ...")
comet = Comet("./comet-atomic_2020_BART")
comet.model.zero_grad()
print("model loaded")
queries = []
head = "PersonX pleases ___ to make"
rel = "xWant"
query = "{} {} [GEN]".format(head, rel)
queries.append(query)
print(queries)
results = comet.generate(queries, decode_method="beam", num_generate=5)
print(results)
| comet-atomic-2020-master | models/comet_atomic2020_bart/generation_example.py |
import argparse
import glob
import logging
import os
import time
import warnings
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader
from lightning_base import BaseTransformer, add_generic_args, generic_train
from transformers import MBartTokenizer, get_linear_schedule_with_warmup
try:
from .utils import (
assert_all_frozen,
use_task_specific_params,
lmap,
flatten_list,
pickle_save,
save_git_info,
save_json,
freeze_params,
calculate_rouge,
get_git_info,
ROUGE_KEYS,
calculate_bleu_score,
Seq2SeqDataset,
MBartDataset,
)
from .callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback
except ImportError:
from utils import (
Seq2SeqDataset,
MBartDataset,
assert_all_frozen,
use_task_specific_params,
lmap,
flatten_list,
pickle_save,
save_git_info,
save_json,
freeze_params,
calculate_rouge,
get_git_info,
ROUGE_KEYS,
calculate_bleu_score,
)
from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback
logger = logging.getLogger(__name__)
class SummarizationModule(BaseTransformer):
mode = "summarization"
loss_names = ["loss"]
metric_names = ROUGE_KEYS
val_metric = "rouge2"
def __init__(self, hparams, **kwargs):
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
#use_task_specific_params(self.model, "summarization")
#save_git_info(self.hparams.output_dir)
#self.metrics_save_path = Path("/results/metrics.json")
self.metrics_save_path = Path(self.output_dir) / "metrics.json"
self.hparams_save_path = Path(self.output_dir) / "hparams.pkl"
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.dataset_kwargs: dict = dict(
data_dir=self.hparams.data_dir,
max_source_length=self.hparams.max_source_length,
prefix=self.model.config.prefix or "",
)
n_observations_per_split = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
self.target_lens = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
self.freeze_embeds()
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
#self.hparams.git_sha = get_git_info()["repo_sha"]
try:
self.num_workers = hparams.num_workers
except AttributeError:
self.num_workers = 2
self.decoder_start_token_id = None
self.dataset_class = Seq2SeqDataset
def freeze_embeds(self):
"""Freeze token embeddings and positional embeddings for bart, just token embeddings for t5."""
try:
freeze_params(self.model.model.shared)
for d in [self.model.model.encoder, self.model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
except AttributeError:
freeze_params(self.model.shared)
for d in [self.model.encoder, self.model.decoder]:
freeze_params(d.embed_tokens)
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
source_ids, source_mask, y = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"]
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone()
lm_labels[y[:, 1:] == pad_token_id] = -100
outputs = self(source_ids, attention_mask=source_mask, decoder_input_ids=y_ids, labels=lm_labels,)
loss = outputs[0]
return (loss,)
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
return {"loss": loss_tensors[0], "log": logs}
def validation_step(self, batch, batch_idx) -> Dict:
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix="val") -> Dict:
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses["loss"]
rouges = {k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "summ_len"]}
rouge_tensor: torch.FloatTensor = torch.tensor(rouges[self.val_metric]).type_as(loss)
rouges.update({k: v.item() for k, v in losses.items()})
losses.update(rouges)
metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
metrics["avg_rouge1"] = losses['rouge1']
metrics["step_count"] = self.step_count
self.save_metrics(metrics, prefix) # writes to self.metrics_save_path
preds = flatten_list([x["preds"] for x in outputs])
return {"log": metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": rouge_tensor}
def save_metrics(self, latest_metrics, type_path) -> None:
self.metrics[type_path].append(latest_metrics)
save_json(self.metrics, self.metrics_save_path)
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_rouge(preds, target)
def _generative_step(self, batch: dict) -> dict:
pad_token_id = self.tokenizer.pad_token_id
source_ids, source_mask, y = Seq2SeqDataset.trim_seq2seq_batch(batch, pad_token_id)
t0 = time.time()
generated_ids = self.model.generate(
input_ids=source_ids,
attention_mask=source_mask,
use_cache=True,
decoder_start_token_id=self.decoder_start_token_id,
)
gen_time = (time.time() - t0) / source_ids.shape[0]
preds = self.ids_to_clean_text(generated_ids)
target = self.ids_to_clean_text(y)
loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, summ_len=summ_len, preds=preds, target=target, **rouge)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix="test")
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = self.dataset_class(
self.tokenizer,
type_path=type_path,
n_obs=n_obs,
max_target_length=max_target_length,
**self.dataset_kwargs,
)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader:
dataset = self.get_dataset(type_path)
sampler = None
if self.hparams.sortish_sampler and type_path == "train":
assert self.hparams.gpus <= 1
sampler = dataset.make_sortish_sampler(batch_size)
shuffle = False
dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=dataset.collate_fn,
shuffle=shuffle,
num_workers=self.num_workers,
sampler=sampler,
)
return dataloader
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True)
t_total = (
(len(dataloader.dataset) // (self.hparams.train_batch_size * max(1, self.hparams.gpus)))
// self.hparams.accumulate_grad_batches
* float(self.hparams.max_epochs)
)
scheduler = get_linear_schedule_with_warmup(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total
)
if max(scheduler.get_last_lr()) > 0:
warnings.warn("All learning rates are 0")
self.lr_scheduler = scheduler
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size)
@staticmethod
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument(
"--max_source_length",
default=48,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
default=24,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--val_max_target_length",
default=24,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--test_max_target_length",
default=24,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--data_dir",
type=str,
required=True,
help="The input data dir. Should contain train.source, train.target, val.source, val.target, test.source, test.target",
)
parser.add_argument("--freeze_encoder", action="store_true")
parser.add_argument("--freeze_embeds", action="store_true")
parser.add_argument("--sortish_sampler", action="store_true", default=False)
parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default")
parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_val", type=int, default=500, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument(
"--task", type=str, default="summarization", required=False, help="# examples. -1 means use all."
)
parser.add_argument("--src_lang", type=str, default="", required=False)
parser.add_argument("--tgt_lang", type=str, default="", required=False)
parser.add_argument("--atomic", action="store_true")
return parser
class TranslationModule(SummarizationModule):
mode = "translation"
loss_names = ["loss"]
metric_names = ["bleu"]
val_metric = "bleu"
def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs)
self.dataset_kwargs["src_lang"] = hparams.src_lang
self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer):
self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
if isinstance(self.tokenizer, MBartTokenizer):
self.dataset_class = MBartDataset
def calc_generative_metrics(self, preds, target) -> dict:
return calculate_bleu_score(preds, target)
def main(args, model=None) -> SummarizationModule:
Path(args.output_dir).mkdir(exist_ok=True)
if len(os.listdir(args.output_dir)) > 3 and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if model is None:
if args.task == "summarization":
model: SummarizationModule = SummarizationModule(args)
else:
model: SummarizationModule = TranslationModule(args)
### add atomic relation tokens
if args.atomic:
print("Special tokens are added.")
additional_tokens_list = [
"AtLocation",
"CapableOf",
"Causes",
"CausesDesire",
"CreatedBy",
"DefinedAs",
"DesireOf",
"Desires",
"HasA",
"HasFirstSubevent",
"HasLastSubevent",
"HasPainCharacter",
"HasPainIntensity",
"HasPrerequisite",
"HasProperty",
"HasSubEvent",
"HasSubevent",
"HinderedBy",
"InheritsFrom",
"InstanceOf",
"IsA",
"LocatedNear",
"LocationOfAction",
"MadeOf",
"MadeUpOf",
"MotivatedByGoal",
"NotCapableOf",
"NotDesires",
"NotHasA",
"NotHasProperty",
"NotIsA",
"NotMadeOf",
"ObjectUse",
"PartOf",
"ReceivesAction",
"RelatedTo",
"SymbolOf",
"UsedFor",
"isAfter",
"isBefore",
"isFilledBy",
"oEffect",
"oReact",
"oWant",
"xAttr",
"xEffect",
"xIntent",
"xNeed",
"xReact",
"xReason",
"xWant",
]
num_added_toks = model.tokenizer.add_tokens(additional_tokens_list)
model.model.resize_token_embeddings(len(model.tokenizer))
dataset = Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith("/tmp")
or str(args.output_dir).startswith("/var")
):
logger = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
logger = WandbLogger(name=model.output_dir.name, project=dataset)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}")
trainer: pl.Trainer = generic_train(
model,
args,
logging_callback=Seq2SeqLoggingCallback(),
checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric),
logger=logger,
)
pickle_save(model.hparams, model.output_dir / "hparams.pkl")
if not args.do_predict:
return model
model.hparams.test_checkpoint = ""
checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "*.ckpt"), recursive=True)))
if checkpoints:
model.hparams.test_checkpoint = checkpoints[-1]
trainer.resume_from_checkpoint = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams)
trainer.test(model)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = SummarizationModule.add_model_specific_args(parser, os.getcwd())
args = parser.parse_args()
main(args)
| comet-atomic-2020-master | models/comet_atomic2020_bart/finetune.py |
import argparse
import gc
import os
from pathlib import Path
from typing import List
import pytorch_lightning as pl
import torch
from torch import nn
from torch.nn import functional as F
from lightning_base import generic_train
from transformers import AdamW, BartConfig, BartForConditionalGeneration, T5Config, T5ForConditionalGeneration
try:
from .finetune import SummarizationModule
from .finetune import main as ft_main
from .initialization_utils import init_student, copy_layers
from .utils import use_task_specific_params, pickle_load, freeze_params, assert_all_frozen, any_requires_grad
except ImportError:
from finetune import SummarizationModule
from finetune import main as ft_main
from initialization_utils import init_student, copy_layers
from utils import use_task_specific_params, pickle_load, freeze_params, assert_all_frozen, any_requires_grad
class BartSummarizationDistiller(SummarizationModule):
loss_names = ["loss", "ce_loss", "mlm_loss", "enc_mse_loss", "hid_loss_enc", "hid_loss_dec"]
def __init__(self, hparams):
assert Path(hparams.data_dir).exists()
student, student_cfg, teacher = self.pre_init(hparams)
super().__init__(hparams, model=student, config=student_cfg)
self.teacher = teacher
use_task_specific_params(self.teacher, "summarization")
freeze_params(self.teacher)
self.sanity_check_gradients()
self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean")
self.temperature = 2.0
self.alpha_mlm = hparams.alpha_mlm
self.alpha_ce = hparams.alpha_ce
self.alpha_hid = hparams.alpha_hid
# self.alpha_cos = hparams.alpha_cos
self.alpha_encoder_loss = self.hparams.alpha_encoder_loss
gc.collect()
torch.cuda.empty_cache()
def sanity_check_gradients(self):
assert_all_frozen(self.teacher)
assert_all_frozen(self.model.model.decoder.embed_tokens)
assert_all_frozen(self.model.model.encoder.embed_tokens)
if self.different_encoder:
assert any_requires_grad(self.model.model.encoder)
else:
freeze_params(self.model.model.encoder)
del self.teacher.model.encoder
def pre_init(self, hparams):
self.output_dir = Path(hparams.output_dir)
self.output_dir.mkdir(exist_ok=True)
teacher = BartForConditionalGeneration.from_pretrained(hparams.teacher).eval()
student_updates = {
"decoder_layers": hparams.student_decoder_layers,
"encoder_layers": hparams.student_encoder_layers,
}
if hparams.length_penalty != -1:
student_updates["length_penalty"] = hparams.length_penalty
d_layers_to_copy = get_layers_to_copy(student_updates["decoder_layers"], teacher.config.decoder_layers)
e_layers_to_copy: List = get_layers_to_copy(student_updates["encoder_layers"], teacher.config.encoder_layers)
hparams.d_layer_to_copy = d_layers_to_copy
hparams.e_layer_to_copy = e_layers_to_copy
kw = teacher.config.to_diff_dict()
kw.update(student_updates)
# Copy weights
student_cfg = BartConfig(**kw)
student = BartForConditionalGeneration(student_cfg)
student, _ = init_student(student, teacher)
save_dir = self.output_dir.joinpath("student")
self.copy_to_student(d_layers_to_copy, e_layers_to_copy, hparams, student, teacher)
student.save_pretrained(save_dir)
hparams.model_name_or_path = str(save_dir)
return student, student_cfg, teacher
def copy_to_student(self, d_layers_to_copy, e_layers_to_copy, hparams, student, teacher):
if teacher.config.model_type == "t5":
return self.copy_t5_to_student(d_layers_to_copy, e_layers_to_copy, hparams, student, teacher)
self.different_encoder: bool = hparams.student_encoder_layers != teacher.config.encoder_layers
self.different_decoder = hparams.student_decoder_layers != teacher.config.decoder_layers
if self.different_decoder:
copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, d_layers_to_copy)
if self.different_encoder:
copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, e_layers_to_copy)
def copy_t5_to_student(self, d_layers_to_copy, e_layers_to_copy, hparams, student, teacher):
self.different_encoder: bool = hparams.student_encoder_layers != teacher.config.num_layers
self.different_decoder = hparams.student_decoder_layers != teacher.config.num_layers
if self.different_decoder:
copy_layers(teacher.decoder.block, student.decoder.block, d_layers_to_copy)
if self.different_encoder:
copy_layers(teacher.encoder.block, student.encoder.block, e_layers_to_copy)
def calc_mse_loss(self, teacher_outputs: torch.Tensor, student_outputs: torch.Tensor, mask) -> torch.FloatTensor:
if mask is not None:
# mask has False at padding_idx
sel_mask = mask[:, :, None].expand_as(student_outputs).bool()
s_logits_slct = torch.masked_select(student_outputs, sel_mask)
t_logits_slct = torch.masked_select(teacher_outputs, sel_mask)
else:
t_logits_slct = teacher_outputs
s_logits_slct = student_outputs
return F.mse_loss(s_logits_slct, t_logits_slct)
def calc_ce_loss(self, mask, s_logits, t_logits):
if mask is not None:
# mask has False at padding_idx
sel_mask = mask[:, :, None].expand_as(s_logits)
s_logits_slct = torch.masked_select(
s_logits, sel_mask
) # (bs * seq_length * voc_size) modulo the 1s in mask
t_logits_slct = torch.masked_select(
t_logits, sel_mask
) # (bs * seq_length * voc_size) modulo the 1s in mask
else:
t_logits_slct = t_logits
s_logits_slct = s_logits # (bs * seq_length * voc_size) modulo the 1s in mask
s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask
t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask
assert t_logits_slct.size() == s_logits_slct.size()
loss_ce = (
self.ce_loss_fct(
F.log_softmax(s_logits_slct / self.temperature, dim=-1),
F.softmax(t_logits_slct / self.temperature, dim=-1),
)
* (self.temperature) ** 2
)
return loss_ce, s_logits_slct, t_logits_slct
def configure_optimizers(self):
"Prepare optimizer and schedule (linear warmup and decay)"
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
return [optimizer]
@staticmethod
def add_model_specific_args(parser, root_dir):
SummarizationModule.add_model_specific_args(parser, root_dir)
parser.add_argument("--teacher", default="facebook/bart-large-cnn", type=str)
parser.add_argument("--alpha_ce", default=0.8, type=float)
parser.add_argument("--alpha_mlm", default=0.2, type=float)
# parser.add_argument("--alpha_cos", default=0.0, type=float)
parser.add_argument("--alpha_encoder_loss", default=0.0, type=float)
parser.add_argument("--alpha_hid", default=0.0, type=float, required=False)
parser.add_argument("--student_decoder_layers", default=12, type=int, required=False)
parser.add_argument("--student_encoder_layers", default=12, type=int, required=False)
parser.add_argument("--no_teacher", action="store_true", default=False)
parser.add_argument("--length_penalty", type=float, default=-1)
return parser
def _step(self, batch):
# assert is_frozen(self.teacher)
pad_token_id = self.tokenizer.pad_token_id
input_ids, src_mask, y = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"]
decoder_input_ids = y[:, :-1].contiguous()
labels = y[:, 1:].clone()
labels[y[:, 1:] == pad_token_id] = -100
# noinspection PyCallingNonCallable
sloss, slogits, dec_hidden, enc_outputs, enc_hidden_state = self(
input_ids,
attention_mask=src_mask,
decoder_input_ids=decoder_input_ids,
labels=labels,
output_hidden_states=True,
output_attentions=False,
)
def zero_tensor():
return torch.tensor(0.0).type_as(sloss)
loss_encoder, hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor(), zero_tensor()
if self.different_encoder:
with torch.no_grad():
teacher_enc_outputs, teacher_enc_hid, _ = self.teacher.model.encoder(
input_ids, attention_mask=src_mask, output_hidden_states=True
)
if self.hparams.alpha_encoder_loss > 0:
loss_encoder = self.calc_mse_loss(enc_outputs, teacher_enc_outputs, src_mask)
hid_loss_enc = self.calc_hidden_loss(
src_mask, enc_hidden_state, teacher_enc_hid, self.hparams.e_layer_to_copy
)
teacher_enc_outputs = (enc_outputs,)
assert isinstance(teacher_enc_outputs, tuple), type(teacher_enc_outputs)
with torch.no_grad():
tloss, tlogits, tdec_hidden, _ = self.teacher(
input_ids,
attention_mask=src_mask,
encoder_outputs=teacher_enc_outputs,
decoder_input_ids=decoder_input_ids,
lm_labels=labels,
output_hidden_states=True,
)
dec_mask = decoder_input_ids.ne(pad_token_id)
loss_ce, s_logits_slct, t_logits_slct = self.calc_ce_loss(dec_mask, slogits, tlogits)
if self.alpha_hid > 0:
hid_loss_dec = self.calc_hidden_loss(dec_mask, dec_hidden, tdec_hidden, self.hparams.d_layer_to_copy)
blended_loss = (
self.alpha_ce * loss_ce
+ self.alpha_mlm * sloss
+ self.hparams.alpha_encoder_loss * loss_encoder
+ self.hparams.alpha_hid * (hid_loss_enc + hid_loss_dec)
)
return blended_loss, loss_ce, sloss, loss_encoder, hid_loss_enc, hid_loss_dec
def calc_hidden_loss(self, attention_mask, hidden_states, hidden_states_T, matches):
assert not isinstance(
hidden_states, torch.Tensor
), f"expected list or tuple for hidden_states, got tensor of shape {hidden_states.shape}"
assert not isinstance(
hidden_states_T, torch.Tensor
), f"expected list or tuple for hidden_states_T, got tensor of shape {hidden_states_T.shape}"
mask = attention_mask.to(hidden_states[0])
valid_count = mask.sum() * hidden_states[0].size(-1)
hidden_losses = [
(F.mse_loss(hidden_states[i], hidden_states_T[j], reduction="none") * mask.unsqueeze(-1)).sum()
/ valid_count
for i, j in enumerate(matches)
]
return sum(hidden_losses)
class T5SummarizationDistiller(BartSummarizationDistiller):
def pre_init(self, hparams):
raise NotImplementedError("T5 Distillation does not work yet")
self.output_dir = Path(hparams.output_dir)
self.output_dir.mkdir(exist_ok=True)
teacher = T5ForConditionalGeneration.from_pretrained(hparams.teacher)
n_layer = hparams.student_decoder_layers
assert n_layer == hparams.student_encoder_layers # TODO(SS): relax this constraint so that we can do 12-6.
d_layers_to_copy = get_layers_to_copy(n_layer, len(teacher.decoder.block))
e_layers_to_copy: List = get_layers_to_copy(n_layer, len(teacher.encoder.block))
student_updates = {"num_layers": n_layer}
hparams.d_layer_to_copy = d_layers_to_copy
hparams.e_layer_to_copy = e_layers_to_copy
kw = teacher.config.to_diff_dict()
kw.update(student_updates)
# Copy weights
student_cfg = T5Config(**kw)
student = T5ForConditionalGeneration(student_cfg)
student, _ = init_student(student, teacher)
self.copy_to_student(d_layers_to_copy, e_layers_to_copy, hparams, student, teacher)
Path(hparams.output_dir).mkdir(exist_ok=True)
task_specific_params = student.config.task_specific_params
if task_specific_params is not None:
student.config.update(task_specific_params.get("summarization", {})) # TODO: dont hardcode
save_dir = self.output_dir.joinpath("student")
save_dir.mkdir(exist_ok=True)
student.save_pretrained(save_dir)
hparams.model_name_or_path = str(save_dir)
return student, student_cfg, teacher
def freeze_embeds(self):
freeze_params(self.model.shared)
for d in [self.model.encoder, self.model.decoder]:
freeze_params(d.embed_tokens)
def sanity_check_gradients(self):
"""T5"""
assert_all_frozen(self.teacher)
assert_all_frozen(self.model.decoder.embed_tokens)
assert_all_frozen(self.model.encoder.embed_tokens)
if self.different_encoder:
assert any_requires_grad(self.model.encoder)
else:
freeze_params(self.model.encoder)
del self.teacher.model.encoder
if self.different_decoder:
assert any_requires_grad(self.model.decoder)
else:
freeze_params(self.model.decoder) # TODO(SS): very suspicious
def _step(self, batch):
pad_token_id = self.tokenizer.pad_token_id
source_ids, source_mask, y = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"]
decoder_input_ids = y[:, :-1].contiguous()
labels = y[:, 1:].clone()
labels[y[:, 1:] == pad_token_id] = -100
# noinspection PyCallingNonCallable
dec_mask = decoder_input_ids.ne(pad_token_id)
sloss, slogits, dec_hidden, enc_outputs, enc_hidden_state = self(
source_ids,
attention_mask=source_mask,
decoder_input_ids=decoder_input_ids,
labels=labels,
output_hidden_states=True,
output_attentions=False,
use_cache=False,
)
def zero_tensor():
return torch.tensor(0.0).type_as(sloss)
loss_encoder, hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor(), zero_tensor()
if self.different_encoder:
with torch.no_grad():
teacher_enc_outputs, teacher_enc_hid = self.teacher.encoder(
source_ids, attention_mask=source_mask, output_hidden_states=True, use_cache=False,
)
if self.hparams.alpha_encoder_loss > 0:
loss_encoder = self.calc_mse_loss(enc_outputs, teacher_enc_outputs, source_mask)
hid_loss_enc = self.calc_hidden_loss(
source_mask, enc_hidden_state, teacher_enc_hid, self.hparams.e_layer_to_copy
)
teacher_enc_outputs = (enc_outputs,)
assert isinstance(teacher_enc_outputs, tuple), type(teacher_enc_outputs)
with torch.no_grad():
tloss, tlogits, tdec_hidden, _ = self.teacher(
source_ids,
attention_mask=source_mask,
encoder_outputs=teacher_enc_outputs,
decoder_input_ids=decoder_input_ids,
lm_labels=labels,
output_hidden_states=True,
use_cache=False,
)
loss_ce, s_logits_slct, t_logits_slct = self.calc_ce_loss(dec_mask, slogits, tlogits)
if self.alpha_hid > 0:
hid_loss_dec = self.calc_hidden_loss(dec_mask, dec_hidden, tdec_hidden, self.hparams.d_layer_to_copy)
blended_loss = (
self.alpha_ce * loss_ce
+ self.alpha_mlm * sloss
+ self.hparams.alpha_encoder_loss * loss_encoder
+ self.hparams.alpha_hid * (hid_loss_enc + hid_loss_dec)
)
return blended_loss, loss_ce, sloss, loss_encoder, hid_loss_enc, hid_loss_dec
def create_module(args):
t5 = "t5" in args.model_name_or_path
if args.no_teacher:
assert not args.enc_only
module_cls = SummarizationModule
elif t5:
module_cls = T5SummarizationDistiller
elif args.enc_only:
raise ValueError("Deleted that")
else:
module_cls = BartSummarizationDistiller
args.setup_cls: str = module_cls.__name__
model = module_cls(args)
return model
def evaluate_checkpoint(ckpt_path: Path, dest_dir=None):
exp_dir = ckpt_path.parent
if dest_dir is None:
dest_dir = exp_dir
clash = list(dest_dir.glob("test_generations*"))
if clash:
print(f"SKIPPING to avoid overwriting {clash}")
ckpt = torch.load(ckpt_path, map_location="cpu")
if "hparams" in ckpt:
args = argparse.Namespace(**ckpt["hparams"])
else:
args = argparse.Namespace(**pickle_load(exp_dir / "hparams.pkl"))
args.resume_from_checkpoint = str(ckpt_path)
args.do_train = False
args.output_dir = str(dest_dir)
args.n_gpu = 1
args.eval_batch_size = 16
Path(args.output_dir).mkdir(exist_ok=True)
model = create_module(args)
trainer: pl.Trainer = generic_train(model, args, early_stopping_callback=False)
trainer.test(model)
def get_layers_to_copy(n_to_get, tot):
all_layers = list(range(tot))
if tot == 12: # Alternating for special cases
layers_to_copy = { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: all_layers,
}
return layers_to_copy[n_to_get]
else:
return all_layers[:n_to_get] # TODO: better version on theseus-bart branch
def distill_main(args):
Path(args.output_dir).mkdir(exist_ok=True)
if len(os.listdir(args.output_dir)) > 3 and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
model = create_module(args)
return ft_main(args, model=model)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = BartSummarizationDistiller.add_model_specific_args(parser, os.getcwd())
args = parser.parse_args()
distill_main(args)
| comet-atomic-2020-master | models/comet_atomic2020_bart/distillation.py |
# Importing stock libraries
import json
from typing import List
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration, GPT2Tokenizer, GPT2Model, \
GPT2LMHeadModel
# Import os for env varibles via Beaker
import os
import tqdm
# WandB – Import the wandb library
import wandb
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
import inflect
inflection_engine = inflect.engine()
import spacy
nlp = spacy.load("en")
# Print for allenai beaker verification
print(device)
print(torch.cuda.device_count())
KGS_TO_EVAL = ["atomic", "atomic2020", "conceptnet", "transomcs"]
def write_items(items: List[str], output_file):
with open(output_file, 'w') as f:
for item in items:
f.write(str(item) + "\n")
f.close()
def article(word):
return "an" if word[0] in ['a', 'e', 'i', 'o', 'u'] else "a"
def posessive(word):
if inflection_engine.singular_noun(word) is False:
return "have"
else:
return "has"
def vp_present_participle(phrase):
doc = nlp(phrase)
return ' '.join([
inflection_engine.present_participle(token.text) if token.pos_ == "VERB" and token.tag_ != "VGG" else token.text
for token in doc
])
def fact_to_prompt(kg, fact):
head = fact['head']
relation = fact['relation']
tail = fact['tails'][0]
if kg == "conceptnet" or kg == "transomcs":
if relation == "AtLocation":
prompt = "You are likely to find {} {} in {} ".format(
article(head), head, article(tail)
)
elif relation == "CapableOf":
prompt = "{} can ".format(head)
elif relation == "CausesDesire":
prompt = "{} would make you want to ".format(head)
elif relation == "Causes":
prompt = "Sometimes {} causes ".format(head)
elif relation == "CreatedBy":
prompt = "{} is created by".format(head)
elif relation == "Desires":
prompt = "{} {} desires".format(article(head), head)
elif relation == "HasA":
prompt = "{} {} ".format(head, posessive(head))
elif relation == "HasPrerequisite":
prompt = "{} requires ".format(vp_present_participle(head))
elif relation == "HasProperty":
prompt = "{} is ".format(head)
elif relation == "MotivatedByGoal":
prompt = "You would {} because you are ".format(head)
elif relation == "ReceivesAction":
prompt = "{} can be ".format(head)
elif relation == "UsedFor":
prompt = "{} {} is for ".format(article(head).upper(), head)
elif relation == "HasFirstSubevent" or relation == "HasSubevent" or relation == "HasLastSubevent":
prompt = "While {}, you would ".format(vp_present_participle(head))
elif relation == "InheritsFrom":
prompt = "{} inherits from".format(head)
elif relation == "PartOf":
prompt = "{} {} is a part of {} ".format(article(head).upper(), head, article(tail))
elif relation == "IsA":
prompt = "{} is {} ".format(head, article(tail))
elif relation == "InstanceOf":
prompt = "{} is an instance of".format(head)
elif relation == "MadeOf":
prompt = "{} is made of".format(head)
elif relation == "DefinedAs":
prompt = "{} is defined as ".format(head)
elif relation == "NotCapableOf":
prompt = "{} is not capable of".format(head)
elif relation == "NotDesires":
prompt = "{} {} does not desire".format(article(head), head)
elif relation == "NotHasA":
prompt = "{} does not have a".format(head)
elif relation == "NotHasProperty" or relation == "NotIsA":
prompt = "{} is not".format(head)
elif relation == "NotMadeOf":
prompt = "{} is not made of".format(head)
elif relation == "SymbolOf":
prompt = "{} is a symbol of".format(head)
else:
raise Exception(relation)
elif kg == "atomic" or kg == "atomic2020":
if relation == "AtLocation":
prompt = "You are likely to find {} {} in {} ".format(
article(head), head, article(tail)
)
elif relation == "CapableOf":
prompt = "{} can ".format(head)
elif relation == "Causes":
prompt = "Sometimes {} causes ".format(head)
elif relation == "Desires":
prompt = "{} {} desires".format(article(head), head)
elif relation == "HasProperty":
prompt = "{} is ".format(head)
elif relation == "HasSubEvent":
prompt = "While {}, you would ".format(vp_present_participle(head))
elif relation == "HinderedBy":
prompt = "{}. This would not happen if"
elif relation == "MadeUpOf":
prompt = "{} {} contains".format(article(head), head)
elif relation == "NotDesires":
prompt = "{} {} does not desire".format(article(head), head)
elif relation == "ObjectUse":
prompt = "{} {} can be used for".format(article(head), head)
elif relation == "isAfter":
prompt = "{}. Before that, ".format(head)
elif relation == "isBefore":
prompt = "{}. After that, ".format(head)
elif relation == "isFilledBy":
prompt = "{} is filled by".format(head) #TODO
elif relation == "oEffect":
prompt = "{}. The effect on others will be".format(head)
elif relation == "oReact":
prompt = "{}. As a result, others feel".format(head)
elif relation == "oWant":
prompt = "{}. After, others will want to".format(head)
elif relation == "xAttr":
prompt = "{}. PersonX is".format(head)
elif relation == "xEffect":
prompt = "{}. The effect on PersonX will be".format(head)
elif relation == "xIntent":
prompt = "{}. PersonX did this to".format(head)
elif relation == "xNeed":
prompt = "{}. Before, PersonX needs to".format(head)
elif relation == "xReact":
prompt = "{}. PersonX will be".format(head)
elif relation == "xReason":
prompt = "{}. PersonX did this because".format(head)
elif relation == "xWant":
prompt = "{}. After, PersonX will want to".format(head)
else:
raise Exception("Invalid KG")
return prompt.strip()
def read_jsonl_lines(input_file: str) -> List[dict]:
with open(input_file) as f:
lines = f.readlines()
return [json.loads(l.strip()) for l in lines]
def find_nth(haystack, needle, n):
start = haystack.find(needle)
while start >= 0 and n > 1:
start = haystack.find(needle, start+len(needle))
n -= 1
return start if start != -1 else None
def main():
wandb.init(project="gpt2_zeroshot")
config = wandb.config
config.GPT2_MODEL = str(os.environ.get('GPT2_MODEL', "gpt2"))
config.OUTPUT_DIR = str(os.environ.get('OUTPUT_DIR', "data/gpt2-zeroshot/output/"))
config.MODEL_SAVE_LOCATION = str(os.environ.get('MODEL_SAVE_LOCATION', "models/"))
config.SEED = int(os.environ.get("SEED", 42))
config.IN_LEN = 75
config.SUMMARY_LEN = 35
config.DATA_PATH = "data/gpt2-zeroshot" if "DATA_PATH" not in os.environ else os.environ["DATA_PATH"]
config.TOP_K = int(os.environ.get('TOP_K', "1"))
config.TOP_P = float(os.environ.get('TOP_P', "0.9"))
config.NUM_BEAMS = int(os.environ.get('NUM_BEAMS', "10"))
config.NUM_SEQUENCES = int(os.environ.get('NUM_SEQUENCES', "10"))
config.STOP_TOKEN = "."
torch.manual_seed(config.SEED) # pytorch random seed
np.random.seed(config.SEED) # numpy random seed
torch.backends.cudnn.deterministic = True
model_name = config.GPT2_MODEL
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
model.to(device)
wandb.watch(model, log="all")
for kg in KGS_TO_EVAL:
fname = os.path.join(config.DATA_PATH, kg, "test_sampled_prefixes.jsonl")
print("\n\n===== Evaluating {} ===== {} \n\n".format(kg, fname))
generations_for_fact = []
with open(fname) as f:
for line_idx, fact in tqdm.tqdm(enumerate(f)):
prompt = fact_to_prompt(kg, json.loads(fact))
input_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
generations = model.generate(
input_ids=input_ids.to(device),
max_length=input_ids.size(1) + 10,
temperature=1.0,
top_k=config.TOP_K,
top_p=config.TOP_P,
repetition_penalty=1.0,
do_sample=True,
num_return_sequences=config.NUM_SEQUENCES,
num_beams=config.NUM_BEAMS
)
if len(generations.shape) > 2:
generations.squeeze_()
text_generations = []
for gen in generations:
gen = gen.tolist()
text = tokenizer.decode(gen, clean_up_tokenization_spaces=True)
text = text[:find_nth(text, config.STOP_TOKEN, 1)] if config.STOP_TOKEN not in prompt else text[:find_nth(text, config.STOP_TOKEN, 2)]
text_generations.append(text)
generations_for_fact.append({
"idx": line_idx,
"fact": json.loads(fact),
"prompt": prompt,
"generations": text_generations
})
write_items([json.dumps(r) for r in generations_for_fact], os.path.join(config.OUTPUT_DIR, "{}-zeroshot-generations.jsonl".format(kg)))
model.save_pretrained(config.MODEL_SAVE_LOCATION)
if __name__ == '__main__':
main()
| comet-atomic-2020-master | models/gpt2_zeroshot/gpt2-zeroshot.py |
# Importing stock libraries
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
import json
from typing import List
# Importing the GPT2 modules from huggingface/transformers
from transformers import GPT2LMHeadModel, GPT2Tokenizer
# Import os for env varibles via Beaker
import os
# WandB – Import the wandb library
import wandb
import logging
from torch import cuda
from split.utils import write_items
from optparse import OptionParser
device = 'cuda' if cuda.is_available() else 'cpu'
logger = logging.getLogger("gpt2-comet")
logging.basicConfig(level=logging.DEBUG)
# logger.info for allenai beaker verification
logger.info(device)
logger.info(torch.cuda.device_count())
from mosaic.infra.modeling import train, validate, beam_generations
from mosaic.datasets.KGDataset import KGDataset
DEBUG = False
NUM_INST = 100
def read_jsonl_lines(input_file: str) -> List[dict]:
with open(input_file) as f:
lines = f.readlines()
return [json.loads(l.strip()) for l in lines]
def main():
wandb.init(project="gpt2_comet_atomic")
config = wandb.config
config.TRAIN_BATCH_SIZE = int(os.environ.get("TRAIN_BATCH_SIZE", 2))
config.VALID_BATCH_SIZE = int(os.environ.get("VALID_BATCH_SIZE", 2))
config.TRAIN_EPOCHS = int(os.environ.get("TRAIN_EPOCHS", 3))
config.VAL_EPOCHS = int(os.environ.get("VAL_EPOCHS", 1))
config.LEARNING_RATE = float(os.environ.get("LEARNING_RATE", "1e-5"))
config.SEED = int(os.environ.get("SEED", 42))
config.IN_LEN = int(os.environ.get("IN_LEN", 16))
config.OUT_LEN = int(os.environ.get("OUT_LEN", 34))
config.SUMMARY_LEN = 0 # Used for t5
config.OUT_DIR = os.environ.get("OUT_DIR", "/models")
config.DO_TRAIN = os.environ.get("DO_TRAIN", "False") == "True"
config.DO_PRED = os.environ.get("DO_PRED", "True") == "True"
config.PRED_FILE = str(os.environ.get("PRED_FILE", ""))
config.TOP_K = int(os.environ.get("TOP_K", 40))
config.PRED_BATCH = 64
config.TOKENIZER = os.environ.get('TOKENIZER', "gpt2-xl")
torch.manual_seed(config.SEED) # pytorch random seed
np.random.seed(config.SEED) # numpy random seed
torch.backends.cudnn.deterministic = True
model_name = "gpt2" if 'GPT2_MODEL' not in os.environ else os.environ['GPT2_MODEL']
try:
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
except:
tokenizer = GPT2Tokenizer.from_pretrained(config.TOKENIZER)
tokenizer.add_special_tokens({
'eos_token': '[EOS]',
'additional_special_tokens': [
'LocationOfAction',
'HinderedBy',
'HasFirstSubevent',
'NotHasProperty',
'NotHasA',
'HasA',
'AtLocation',
'NotCapableOf',
'CausesDesire',
'HasPainCharacter',
'NotDesires',
'MadeUpOf',
'InstanceOf',
'SymbolOf',
'xReason',
'isAfter',
'HasPrerequisite',
'UsedFor',
'MadeOf',
'MotivatedByGoal',
'Causes',
'oEffect',
'CreatedBy',
'ReceivesAction',
'NotMadeOf',
'xWant',
'PartOf',
'DesireOf',
'HasPainIntensity',
'xAttr',
'DefinedAs',
'oReact',
'xIntent',
'HasSubevent',
'oWant',
'HasProperty',
'IsA',
'HasSubEvent',
'LocatedNear',
'Desires',
'isFilledBy',
'isBefore',
'InheritsFrom',
'xNeed',
'xEffect',
'xReact',
'HasLastSubevent',
'RelatedTo',
'CapableOf',
'NotIsA',
'ObjectUse',
'[GEN]'
]
})
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
train_dataset = pd.read_csv(
os.environ.get('TRAIN_DATA_PATH', "/tmp/gpt2data/atomic_train.tsv"),
encoding='latin-1', sep="\t")
if DEBUG:
train_dataset = train_dataset.head(NUM_INST)
# train_dataset = train_dataset[['head_event', 'tail_event', 'relation']]
train_dataset.head_event = train_dataset.head_event + ' ' + train_dataset.relation \
+ " [GEN]"
train_dataset.tail_event = train_dataset.tail_event + ' [EOS]'
logger.info(train_dataset.head())
logger.info(train_dataset.tail_event)
val_dataset = pd.read_csv(os.environ.get('DEV_DATA_PATH', "/tmp/gpt2data/atomic_dev.tsv"), encoding='latin-1', sep="\t")
if DEBUG:
val_dataset = val_dataset.head(NUM_INST)
val_dataset = val_dataset[['head_event', 'tail_event', 'relation']]
val_dataset.head_event = val_dataset.head_event + ' ' + val_dataset.relation + " [GEN]"
val_dataset.tail_event = val_dataset.tail_event + ' [EOS]'
logger.info(val_dataset.tail_event)
logger.info(val_dataset.head())
test_dataset = pd.read_csv(os.environ.get('TEST_DATA_PATH', "/tmp/gpt2data/atomic_test.tsv"), encoding='latin-1', sep="\t")
if DEBUG:
test_dataset = test_dataset.head(NUM_INST)
test_dataset = test_dataset[['head_event', 'tail_event', 'relation']]
test_dataset.head_event = test_dataset.head_event + ' ' + test_dataset.relation \
+ " [GEN]"
test_dataset.tail_event = test_dataset.tail_event + ' [EOS]'
logger.info(test_dataset.tail_event)
logger.info(test_dataset.head())
val_dataset_mini = pd.read_csv(os.environ.get('DEV_DATA_PATH', "/tmp/gpt2data/atomic_dev.tsv"), encoding='latin-1', sep="\t")
if DEBUG:
val_dataset_mini = val_dataset_mini.head(5)
val_dataset_mini = val_dataset_mini.sample(n=min(int(val_dataset_mini.size / 3), 100),
random_state=config.SEED)
val_dataset_mini = val_dataset_mini[['head_event', 'tail_event', 'relation']]
val_dataset_mini.head_event = val_dataset_mini.head_event + ' ' + val_dataset_mini.relation + " [GEN]"
val_dataset_mini.tail_event = val_dataset_mini.tail_event + ' [EOS]'
logger.info(val_dataset_mini.tail_event)
logger.info(val_dataset_mini.head())
logger.info("TRAIN Dataset tuple count: {}".format(train_dataset.shape))
logger.info("DEV Dataset tuple_count: {}".format(val_dataset.shape))
logger.info("DEV MINI Dataset tuple_count: {}".format(val_dataset_mini.shape))
training_set = KGDataset(train_dataset, tokenizer, config.OUT_LEN, config.SUMMARY_LEN, model="gpt2")
val_set = KGDataset(val_dataset, tokenizer, config.IN_LEN, config.OUT_LEN - config.IN_LEN, model="gpt2", is_eval=True)
val_set_mini = KGDataset(val_dataset.head(2000), tokenizer, config.IN_LEN, config.OUT_LEN - config.IN_LEN, model="gpt2", is_eval=True)
test_set = KGDataset(test_dataset, tokenizer, config.IN_LEN, config.OUT_LEN - config.IN_LEN, model="gpt2", is_eval=True)
train_params = {
'batch_size': config.TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
val_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': 0
}
training_loader = DataLoader(training_set, **train_params, drop_last=True)
val_loader = DataLoader(val_set, **val_params, drop_last=True)
test_loader = DataLoader(test_set, **val_params, drop_last=True)
val_loader_mini = DataLoader(val_set_mini, **val_params, drop_last=True)
logging.info("Loading model from {}".format(model_name))
model = GPT2LMHeadModel.from_pretrained(model_name, use_cdn=False)
logging.info("Move model to device {}".format(device))
model = model.to(device)
model.resize_token_embeddings(len(tokenizer))
optimizer = torch.optim.Adam(params=model.parameters(), lr=config.LEARNING_RATE)
wandb.watch(model, log="all")
if config.DO_TRAIN:
logger.info('Initiating Fine-Tuning for the model on our dataset')
for epoch in range(config.TRAIN_EPOCHS):
train(epoch, tokenizer, model, device, training_loader, optimizer, val_loader_mini, model_class="gpt2")
model.save_pretrained('{}/checkpoint_{}'.format(config.OUT_DIR, epoch))
tokenizer.save_pretrained('{}/checkpoint_{}'.format(config.OUT_DIR, epoch))
model.save_pretrained('/models')
if config.DO_PRED:
if config.PRED_FILE.endswith("jsonl"):
records = read_jsonl_lines(config.PRED_FILE)
pred_dataset = pd.DataFrame.from_records(records)
pred_dataset = pred_dataset.rename(columns={"head": "head_event", "tails": "tail_event"})
pred_dataset = pred_dataset.explode('tail_event')
else:
pred_dataset = pd.read_csv(config.PRED_FILE, encoding='latin-1', sep="\t")
if DEBUG:
pred_dataset = pred_dataset.head(NUM_INST)
pred_dataset = pred_dataset.drop_duplicates(['head_event', 'relation'], ignore_index=True)
pred_dataset.head_event = pred_dataset.head_event + ' ' + pred_dataset.relation + " [GEN]"
pred_dataset.tail_event = pred_dataset.tail_event + ' [EOS]'
logger.info(pred_dataset.tail_event)
logger.info(pred_dataset.head())
pred_set = KGDataset(pred_dataset, tokenizer, config.IN_LEN, config.OUT_LEN - config.IN_LEN, model="gpt2", is_eval=True)
pred_loader = DataLoader(pred_set, **val_params, drop_last=False)
pred_generations = beam_generations(tokenizer, model, device, pred_loader, top_k=config.TOP_K)
write_items(os.path.join(config.OUT_DIR, "pred_generations.jsonl"),
[json.dumps(r) for r in pred_generations])
# Resave the model to keep generations and model associated
model.save_pretrained('/models')
tokenizer.save_pretrained('/models')
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-t", "--test_install",
action="store_true", default=False,
help="Test install, without running any modeling code.")
(options, args) = parser.parse_args()
if not options.test_install:
main()
| comet-atomic-2020-master | models/comet_atomic2020_gpt2/comet_gpt2.py |
import csv
relations = set()
with open('../old_data/atomic2020_train.tsv') as file:
reader = csv.DictReader(file, delimiter='\t')
for row in reader:
relations.add(row['relation'])
with open('../old_data/atomic_train.tsv') as file:
reader = csv.DictReader(file, delimiter='\t')
for row in reader:
relations.add(row['relation'])
with open('../old_data/conceptnet_train.tsv') as file:
reader = csv.DictReader(file, delimiter='\t')
for row in reader:
relations.add(row['relation'])
with open('../data/transomcs_train.tsv') as file:
reader = csv.DictReader(file, delimiter='\t')
for row in reader:
relations.add(row['relation'])
print({'additional_special_tokens': list(relations)}) | comet-atomic-2020-master | scripts/retrieve_special_tokens.py |
import sys
import csv
writer = csv.DictWriter(open('../data/atomic_test.tsv', 'w'), delimiter='\t', fieldnames=['relation', 'head_event', 'tail_event'])
writer.writeheader()
with open(sys.argv[1]) as file:
reader = csv.DictReader(file, delimiter='\t', fieldnames=['head', 'relation', 'tail', 'id1', 'id2', 'score'])
for row in reader:
writer.writerow({'relation': row['relation'], 'head_event': row['head'], 'tail_event': row['tail']}) | comet-atomic-2020-master | scripts/convert_atomic.py |
import sys
import csv
writer = csv.DictWriter(open(f'../data/{sys.argv[1].split("/")[-1].split("_")[0]}_{sys.argv[1].split("/")[-1].split("_")[-1].split(".")[0]}.tsv', 'w'), delimiter='\t', fieldnames=['relation', 'head_event', 'tail_event'])
writer.writeheader()
with open(sys.argv[1]) as file:
reader = csv.DictReader(file, delimiter='\t', fieldnames=['head', 'relation', 'tail'])
for row in reader:
writer.writerow({'relation': row['relation'], 'head_event': row['head'], 'tail_event': row['tail']}) | comet-atomic-2020-master | scripts/convert_ronan.py |
import pandas as pd
import transformers
import os
from transformers import T5Tokenizer
train_dataset = pd.read_csv('../data/atomic2020_train.tsv', encoding='latin-1', sep="\t")
train_dataset = train_dataset[['head_event','tail_event','relation']]
train_dataset.head_event = train_dataset.head_event + ' ' + train_dataset.relation + " [EOS]"
train_dataset.tail_event = train_dataset.tail_event + ' [EOS]'
tokenizer = T5Tokenizer.from_pretrained('t5-large')
tokenizer.add_special_tokens({'eos_token': '[EOS]', 'additional_special_tokens': ['LocationOfAction', 'HinderedBy', 'HasFirstSubevent', 'NotHasProperty', 'NotHasA', 'HasA', 'AtLocation', 'NotCapableOf', 'CausesDesire', 'HasPainCharacter', 'NotDesires', 'MadeUpOf', 'InstanceOf', 'SymbolOf', 'xReason', 'isAfter', 'HasPrerequisite', 'UsedFor', 'MadeOf', 'MotivatedByGoal', 'Causes', 'oEffect', 'CreatedBy', 'ReceivesAction', 'NotMadeOf', 'xWant', 'PartOf', 'DesireOf', 'HasPainIntensity', 'xAttr', 'DefinedAs', 'oReact', 'xIntent', 'HasSubevent', 'oWant', 'HasProperty', 'IsA', 'HasSubEvent', 'LocatedNear', 'Desires', 'isFilledBy', 'isBefore', 'InheritsFrom', 'xNeed', 'xEffect', 'xReact', 'HasLastSubevent', 'RelatedTo', 'CapableOf', 'NotIsA', 'ObjectUse']})
writer_source = open('lens_source.txt', 'w')
writer_target = open('lens_target.txt', 'w')
lens_source = []
lens_target = []
for _, row in train_dataset.iterrows():
text = str(row["head_event"])
text = ' '.join(text.split())
ctext = str(row["tail_event"])
ctext = ' '.join(ctext.split())
source = tokenizer.batch_encode_plus([text])
target = tokenizer.batch_encode_plus([ctext])
writer_source.write(str(len(source["input_ids"][0])) + "\n")
writer_target.write(str(len(target["input_ids"][0])) + "\n")
lens_source.append(len(source["input_ids"][0]))
lens_target.append(len(target["input_ids"][0]))
for lens_list in [lens_source, lens_target]:
results = {
"mean": mean(lens_list),
"min": min(lens_list),
"stdev": stdev(lens_list),
"max": max(lens_list)
}
print(results) | comet-atomic-2020-master | scripts/calculate_max_len.py |
import sys
import csv
writer = csv.DictWriter(open('../data/conceptnet_dev.tsv', 'w'), delimiter='\t', fieldnames=['relation', 'head', 'tail'])
writer.writeheader()
with open(sys.argv[1]) as file:
reader = csv.DictReader(file, delimiter='\t', fieldnames=['relation', 'head', 'tail', 'score'])
for row in reader:
writer.writerow({'relation': row['relation'], 'head': row['head'], 'tail': row['tail']}) | comet-atomic-2020-master | scripts/convert_conceptnet.py |
import json
import sys
import csv
import operator
import random
def read_csv(input_file, quotechar='"', delimiter=",", skip_header=False):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar, quoting=csv.QUOTE_ALL, skipinitialspace=True)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
if skip_header:
lines = lines[1:]
return lines
def write_tsv(output_file, data, header=False):
keys = list(data[0].keys())
with open(output_file, 'w') as f:
w = csv.DictWriter(f, keys, delimiter='\t', lineterminator='\n')
if header:
w.writeheader()
for r in data:
entry = {k: r[k] for k in keys}
w.writerow(entry)
def write_array2tsv(output_file, data, header=False):
keys = range(len(data[0]))
with open(output_file, 'w') as f:
w = csv.DictWriter(f, keys, delimiter='\t', lineterminator='\n')
if header:
w.writeheader()
for r in data:
entry = {k: r[k] for k in keys}
w.writerow(entry)
def write_csv(filename, data, fieldnames):
with open(filename, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for d in data:
formatted_d = {}
for key, val in d.items():
formatted_d[key] = json.dumps(val)
writer.writerow(formatted_d)
def read_jsonl(filename):
data = []
with open(filename, "r") as f:
for line in f:
data.append(json.loads(line))
return data
def write_items(output_file, items):
with open(output_file, 'w') as f:
for concept in items:
f.write(concept + "\n")
f.close()
def write_jsonl(f, d):
write_items(f, [json.dumps(r) for r in d])
def count_relation(d):
relation_count = {}
prefix_count = {}
head_count = {}
for l in d:
r = l[1]
if r not in relation_count.keys():
relation_count[r] = 0
relation_count[r] += 1
prefix = l[0]+l[1]
if prefix not in prefix_count.keys():
prefix_count[prefix] = 0
prefix_count[prefix] += 1
head = l[0]
if head not in head_count.keys():
head_count[head] = 0
head_count[head] += 1
sorted_relation_count = dict(sorted(relation_count.items(), key=operator.itemgetter(1), reverse=True))
sorted_prefix_count = dict(sorted(prefix_count.items(), key=operator.itemgetter(1), reverse=True))
sorted_head_count = dict(sorted(head_count.items(), key=operator.itemgetter(1), reverse=True))
print("Relations:")
for r in sorted_relation_count.keys():
print(r, sorted_relation_count[r])
print("\nPrefixes:")
print("uniq prefixes: ", len(sorted_prefix_count.keys()))
i = 0
for r in sorted_prefix_count.keys():
print(r, sorted_prefix_count[r])
i += 1
if i > 20:
break
print("\nHeads:")
i = 0
for r in sorted_head_count.keys():
print(r, sorted_head_count[r])
i += 1
if i > 20:
break
def get_head_set(d):
return set([l[0] for l in d])
def head_based_split(data, dev_size, test_size, head_size_threshold=500, dev_heads=[], test_heads=[]):
"""
:param data: the tuples to split according to the heads, where the head is the first element of each tuple
:param dev_size: target size of the dev set
:param test_size: target size of the test set
:param head_size_threshold: Maximum number of tuples a head can be involved in,
in order to be considered for the dev/test set'
:param dev_heads: heads that are forced to belong to the dev set
:param test_heads: heads that are forced to belong to the test set
:return:
"""
head_count = {}
for l in data:
head = l[0]
if head not in head_count.keys():
head_count[head] = 0
head_count[head] += 1
remaining_heads = dict(head_count)
test_selected_heads = {}
test_head_total_count = 0
for h in test_heads:
if h in remaining_heads:
c = remaining_heads[h]
test_selected_heads[h] = c
test_head_total_count += c
remaining_heads.pop(h)
while test_head_total_count < test_size:
h = random.sample(remaining_heads.keys(), 1)[0]
c = remaining_heads[h]
if c < head_size_threshold:
test_selected_heads[h] = c
test_head_total_count += c
remaining_heads.pop(h)
test = [l for l in data if l[0] in test_selected_heads.keys()]
dev_selected_heads = {}
dev_head_total_count = 0
for h in dev_heads:
if h in remaining_heads:
c = remaining_heads[h]
dev_selected_heads[h] = c
dev_head_total_count += c
remaining_heads.pop(h)
while dev_head_total_count < dev_size:
h = random.sample(remaining_heads.keys(), 1)[0]
c = remaining_heads[h]
if c < head_size_threshold:
dev_selected_heads[h] = c
dev_head_total_count += c
remaining_heads.pop(h)
dev = [l for l in data if l[0] in dev_selected_heads.keys()]
dev_test_heads = set(list(dev_selected_heads.keys()) + list(test_selected_heads.keys()))
train = [l for l in data if l[0] not in dev_test_heads]
return train, dev, test
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):] | comet-atomic-2020-master | system_eval/utils.py |
import argparse
import numpy as np
from nltk.translate.bleu_score import sentence_bleu
from utils import read_jsonl, remove_prefix, write_jsonl
from evaluation.eval import QGEvalCap
from tabulate import tabulate
import json
import os
from collections import defaultdict
import random
def get_reference_sentences(filename):
result = []
with open(filename) as file:
for line in file:
result.append([x.strip() for x in line.split('\t')[1].split('|')])
return result
def postprocess(sentence):
return sentence
def get_heads_and_relations(filename):
result = []
with open(filename) as file:
for line in file:
line = line.split('\t')[0]
head_event = line.split('@@')[0].strip()
relation = line.split('@@')[1].strip()
to_add = {
'head': head_event,
'relation': relation
}
result.append(to_add)
return result
def get_hypothesises(filename):
result = []
import json
with open(filename) as file:
for line in file:
result.append(json.loads(line)["greedy"])
return result
def preprocess_generations(args):
input_file = args.input_file
outfile_path = os.path.join(os.path.dirname(input_file), os.path.basename(input_file).split('.')[0] + "_gens.jsonl")
outfile = open(outfile_path, 'w')
references_list = get_reference_sentences('test.tsv')
heads_relations = get_heads_and_relations('test.tsv')
hypothesises = get_hypothesises(args.input_file)
idx = 0
total_bleu_1 = 0
total_bleu_2 = 0
total_bleu_3 = 0
total_bleu_4 = 0
relation_bleu_1 = defaultdict(lambda: defaultdict(int))
count = 0
for head_relation, references, hypothesis in zip(heads_relations, references_list, hypothesises):
bleu_1 = sentence_bleu(references, hypothesis, weights=[1.0])
bleu_2 = sentence_bleu(references, hypothesis, weights=[0.5, 0.5])
bleu_3 = sentence_bleu(references, hypothesis, weights=[0.34, 0.33, 0.33])
bleu_4 = sentence_bleu(references, hypothesis)
result = {
'generation': postprocess(hypothesis),
'references': [postprocess(reference) for reference in references],
'input': head_relation
}
if hypothesis != 'none':
total_bleu_1 += bleu_1
total_bleu_2 += bleu_2
total_bleu_3 += bleu_3
total_bleu_4 += bleu_4
relation_bleu_1[head_relation["relation"]]["total"] += bleu_1
relation_bleu_1[head_relation["relation"]]["count"] += 1
count += 1
outfile.write(json.dumps(result) + "\n")
print('gens non-none', count)
outfile_scores = open(os.path.join(os.path.dirname(input_file), os.path.basename(input_file).split('.')[0] + "_scores.jsonl"), 'w')
summary = {
'bleu1': total_bleu_1 / count,
'bleu2': total_bleu_2 / count,
'bleu3': total_bleu_3 / count,
'bleu4': total_bleu_4 / count
}
for relation in relation_bleu_1:
summary[relation] = relation_bleu_1[relation]["total"] / relation_bleu_1[relation]["count"]
outfile_scores.write(json.dumps(summary) + "\n")
excel_str = ""
for key in summary:
excel_str += str(key) + '\t'
outfile_scores.write(excel_str.strip())
outfile_scores.write("\n")
excel_str = ""
for key in summary:
excel_str += str(summary[key]) + '\t'
outfile_scores.write(excel_str.strip())
print(f"Saved gens in {outfile_path}")
return(os.path.abspath(outfile_path))
def get_tuple(l):
gens = [l["generation"]]
head = l["input"]["head"]
tails = l["references"]
relation = l["input"]["relation"]
return {"head": head, "relation": relation, "tails": tails, "generations": gens}
def get2(l):
return list(zip(*l))[1]
def topk_eval(model_name, data, k):
topk_gts = {}
topk_res = {}
instances = []
topk_exact_match = []
topk_exact_match_not_none = []
topk_bleu_score = []
topk_is_head = []
for i, l in enumerate(data):
t = get_tuple(l)
gens = t["generations"]
tails = t["tails"]
head = t["head"]
for (j, g) in enumerate(gens[:k]):
instance = t.copy()
instance["generation"] = g
instances.append(instance)
key = str(i) + "_" + str(j)
topk_gts[key] = tails
topk_res[key] = [g]
if g in tails:
topk_exact_match.append((l, 1))
if g != "none":
topk_exact_match_not_none.append((l, 1))
else:
topk_exact_match.append((l, 0))
if g != "none":
topk_exact_match_not_none.append((l, 0))
if g == head:
topk_is_head.append((l, 1))
else:
topk_is_head.append((l, 0))
QGEval = QGEvalCap(model_name, topk_gts, topk_res)
score, scores = QGEval.evaluate()
return score, scores, instances
def eval(data_file, model_name):
data = read_jsonl(data_file)
if len(data) == 0:
return None
return topk_eval(model_name, data, k=1)
def toRow(name, results, columns):
return [name] + [format(float(results[c]), '#.3f') for c in columns]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', type=str, help='Results file on ATOMIC2020 test set')
args = parser.parse_args()
generations_file = preprocess_generations(args)
input_file = generations_file
expts = [
[input_file, os.path.basename(input_file).split('.')[0]]
]
scores_per_model = []
add_column = True
for f, m in expts:
result_file = './results/{}_scores.jsonl'.format(m)
s, scores, instances = eval(f, model_name=m)
if s == None:
print("Skipping ", m)
continue
for k in scores.keys():
assert len(scores[k]) == len(instances)
results = {"model": m, "scores": s, "all_scores": scores, "instances": instances}
write_jsonl(result_file, [results])
scores_per_model.append(results)
columns = list(results["scores"].keys())
s_row = toRow(results["model"], results["scores"], columns)
if add_column:
rows = [[""] + columns]
add_column = False
rows.append(s_row)
import datetime
date = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
print(scores_per_model)
write_jsonl('./results/scores_{}.jsonl'.format(date), scores_per_model)
print(tabulate(rows, headers='firstrow', tablefmt='latex', floatfmt='#.3f'))
print(tabulate(rows, tablefmt='tsv', floatfmt='#.3f'))
if __name__ == "__main__":
main()
| comet-atomic-2020-master | system_eval/automatic_eval.py |
comet-atomic-2020-master | system_eval/evaluation/__init__.py |
|
from evaluation.bleu.bleu import Bleu
from evaluation.meteor.meteor_nltk import Meteor
from evaluation.rouge.rouge import Rouge
from evaluation.cider.cider import Cider
from evaluation.bert_score.bert_score import BertScore
from collections import defaultdict
from argparse import ArgumentParser
import sys
import json
#reload(sys)
#sys.setdefaultencoding('utf-8')
class QGEvalCap:
def __init__(self, model_key, gts, res, results_file=None):
self.gts = gts
self.res = res
self.results_file = results_file
self.model_key = model_key
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr"),
(BertScore(), "Bert Score")
]
# =================================================
# Compute scores
# =================================================
score_dict = {}
scores_dict = {}
#scores_dict["model_key"] = self.model_key
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
#print("%s: %0.5f"%(m, sc))
output.append(sc)
score_dict[m] = str(sc)
scores_dict[m] = list(scs)
else:
#print("%s: %0.5f"%(method, score))
output.append(score)
score_dict[method] = score
scores_dict[method] = list(scores)
if self.results_file != None:
with open(self.results_file, "a") as f:
f.write(json.dumps(score_dict)+"\n")
return score_dict, scores_dict
def eval(model_key, sources, references, predictions, results_file=None):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
for tup in sources:
pair = {}
pair['tokenized_sentence'] = tup
pairs.append(pair)
cnt = 0
for line in references:
pairs[cnt]['tokenized_question'] = line
cnt += 1
output = predictions
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
## eval
from evaluation.eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']
#res[key] = [pair['prediction']]
res[key] = pair['prediction']
## gts
gts[key].append(pair['tokenized_question'])
QGEval = QGEvalCap(model_key, gts, res, results_file)
return QGEval.evaluate()
def preprocess(file_name, keys):
with open(file_name) as f:
data = f.readlines()
generations = [json.loads(elem) for elem in data]
predictions = {}
references = {}
sources = {}
keys_list = keys if keys!=None else generations["generations"]
for key in keys_list:
references[key] = []
predictions[key] = []
sources[key] = []
for elem in generations:
label = elem["label"]
hyp = elem["hyp"+label]
for key in keys_list:
if key in elem["generations"]:
references[key].append(hyp)
predictions[key].append(elem["generations"][key])
sources[key].append((elem["obs1"], elem["obs2"]))
return sources, references, predictions
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-gen_file", "--gen_file", dest="gen_file", help="generations file with gold/references")
parser.add_argument("--keys", type=str, default=None, help="comma-separated list of model keys")
parser.add_argument("--results_file", default="eval_results.jsonl")
args = parser.parse_args()
print("scores: \n")
keys=None
if args.keys:
keys = args.keys.split(",")
sources, references, predictions = preprocess(args.gen_file, keys)
for key in references.keys():
print("\nEvaluating %s" %key)
eval(key, sources[key], references[key], predictions[key], args.results_file)
| comet-atomic-2020-master | system_eval/evaluation/eval.py |
# Filename: cider.py
#
# Description: Describes the class to compute the CIDEr (Consensus-Based Image Description Evaluation) Metric
# by Vedantam, Zitnick, and Parikh (http://arxiv.org/abs/1411.5726)
#
# Creation Date: Sun Feb 8 14:16:54 2015
#
# Authors: Ramakrishna Vedantam <[email protected]> and Tsung-Yi Lin <[email protected]>
from evaluation.cider.cider_scorer import CiderScorer
import pdb
class Cider:
"""
Main Class to compute the CIDEr metric
"""
def __init__(self, test=None, refs=None, n=4, sigma=6.0):
# set cider to sum over 1 to 4-grams
self._n = n
# set the standard deviation parameter for gaussian penalty
self._sigma = sigma
def compute_score(self, gts, res):
"""
Main function to compute CIDEr score
:param hypo_for_image (dict) : dictionary with key <image> and value <tokenized hypothesis / candidate sentence>
ref_for_image (dict) : dictionary with key <image> and value <tokenized reference sentence>
:return: cider (float) : computed CIDEr score for the corpus
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
cider_scorer = CiderScorer(n=self._n, sigma=self._sigma)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
cider_scorer += (hypo[0], ref)
(score, scores) = cider_scorer.compute_score()
return score, scores
def method(self):
return "CIDEr"
| comet-atomic-2020-master | system_eval/evaluation/cider/cider.py |
__author__ = 'tylin'
| comet-atomic-2020-master | system_eval/evaluation/cider/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.