python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
from allennlp_demo.atis_parser.api import AtisParserModelEndpoint
from allennlp_demo.common.testing import ModelEndpointTestCase
class TestAtisParserModelEndpoint(ModelEndpointTestCase):
endpoint = AtisParserModelEndpoint()
predict_input = {"utterance": "show me the flights from detroit to westchester county"}
| allennlp-demo-main | api/allennlp_demo/atis_parser/test_api.py |
allennlp-demo-main | api/allennlp_demo/roberta_snli/__init__.py |
|
import os
from allennlp_demo.common import config, http
class RobertaSnliModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = RobertaSnliModelEndpoint()
endpoint.run()
| allennlp-demo-main | api/allennlp_demo/roberta_snli/api.py |
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.roberta_snli.api import RobertaSnliModelEndpoint
class TestRobertaSnliModelEndpoint(ModelEndpointTestCase):
endpoint = RobertaSnliModelEndpoint()
predict_input = {
"hypothesis": "Two women are sitting on a blanket near some rocks talking about politics.",
"premise": "Two women are wandering along the shore drinking iced tea.",
}
| allennlp-demo-main | api/allennlp_demo/roberta_snli/test_api.py |
allennlp-demo-main | api/allennlp_demo/nmn_drop/__init__.py |
|
import os
from allennlp.common.util import import_submodules
from allennlp_demo.common import config, http
class NMNDropModelEndpoint(http.ModelEndpoint):
def __init__(self):
import_submodules("semqa")
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = NMNDropModelEndpoint()
endpoint.run()
| allennlp-demo-main | api/allennlp_demo/nmn_drop/api.py |
from overrides import overrides
from allennlp_demo.nmn_drop.api import NMNDropModelEndpoint
from allennlp_demo.common.testing import RcModelEndpointTestCase
class TestNMNDropModelEndpoint(RcModelEndpointTestCase):
endpoint = NMNDropModelEndpoint()
@overrides
def check_predict_result(self, result):
assert result["answer"] is not None
assert len(result["inputs"]) > 0
assert len(result["program_execution"]) > 0
| allennlp-demo-main | api/allennlp_demo/nmn_drop/test_api.py |
allennlp-demo-main | api/allennlp_demo/vilbert_vqa/__init__.py |
|
import os
import tempfile
from base64 import standard_b64decode
from allennlp.common.util import JsonDict
from allennlp_demo.common import config, http
class VilbertVqaModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
def predict(self, inputs: JsonDict):
result = None
image_url = inputs.get("image_url")
if image_url is not None:
result = super().predict({"question": inputs["question"], "image": image_url})
else:
image = inputs.get("image")
if image is not None:
image_base64 = image["image_base64"]
if image_base64 is not None:
with tempfile.NamedTemporaryFile(prefix=f"{self.__class__.__name__}-") as f:
f.write(standard_b64decode(image_base64))
f.flush()
result = super().predict({"question": inputs["question"], "image": f.name})
if result is None:
raise ValueError("No image found in request.")
results = [
{"answer": token, "confidence": score * 100}
for token, score in result["tokens"].items()
if not token.startswith("@@")
]
results.sort(key=lambda x: -x["confidence"])
return results[:45] # Jon only wants the first 45 results.
def load_interpreters(self):
# The interpreters don't work with this model right now.
return {}
def load_attackers(self):
# The attackers don't work with this model right now.
return {}
if __name__ == "__main__":
endpoint = VilbertVqaModelEndpoint()
endpoint.run()
| allennlp-demo-main | api/allennlp_demo/vilbert_vqa/api.py |
from overrides import overrides
from allennlp_demo.common.testing import RcModelEndpointTestCase
from allennlp_demo.vilbert_vqa.api import VilbertVqaModelEndpoint
class TestVilbertVqaModelEndpoint(RcModelEndpointTestCase):
endpoint = VilbertVqaModelEndpoint()
predict_input = {
"question": "What game are they playing?",
"image_url": "https://storage.googleapis.com/allennlp-public-data/vqav2/baseball.jpg",
}
@overrides
def check_predict_result(self, result):
assert len(result) > 0
for answer in result:
assert "answer" in answer
assert "confidence" in answer
assert 0.0 <= answer["confidence"] <= 100.0
| allennlp-demo-main | api/allennlp_demo/vilbert_vqa/test_api.py |
allennlp-demo-main | api/allennlp_demo/binary_gender_bias_mitigated_roberta_snli/__init__.py |
|
import os
from allennlp_demo.common import config, http
class BinaryGenderBiasMitigatedRobertaSnliModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = BinaryGenderBiasMitigatedRobertaSnliModelEndpoint()
endpoint.run()
| allennlp-demo-main | api/allennlp_demo/binary_gender_bias_mitigated_roberta_snli/api.py |
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.binary_gender_bias_mitigated_roberta_snli.api import (
BinaryGenderBiasMitigatedRobertaSnliModelEndpoint,
)
class TestBinaryGenderBiasMitigatedRobertaSnliModelEndpoint(ModelEndpointTestCase):
endpoint = BinaryGenderBiasMitigatedRobertaSnliModelEndpoint()
predict_input = {
"premise": "An accountant can afford a computer.",
"hypothesis": "A gentleman can afford a computer.",
}
| allennlp-demo-main | api/allennlp_demo/binary_gender_bias_mitigated_roberta_snli/test_api.py |
allennlp-demo-main | api/allennlp_demo/coref/__init__.py |
|
import os
from allennlp_demo.common import config, http
class CorefModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = CorefModelEndpoint()
endpoint.run()
| allennlp-demo-main | api/allennlp_demo/coref/api.py |
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.coref.api import CorefModelEndpoint
class TestCorefModelEndpoint(ModelEndpointTestCase):
endpoint = CorefModelEndpoint()
predict_input = {"document": "The woman reading a newspaper sat on the bench with her dog."}
| allennlp-demo-main | api/allennlp_demo/coref/test_api.py |
"""
Script to verify that all demo models are covered by CI in our GitHub Actions workflow.
"""
import yaml
import os
import logging
from typing import Iterable
WORKFLOW_FILE_PATH = ".github/workflows/api_ci.yml"
logging.basicConfig()
# These are endpoints we have tests for that aren't models. This script skips these.
NON_MODEL_ENDPOINTS = set([ "tasks", "model_cards" ])
def find_models() -> Iterable[str]:
for name in os.listdir("api/allennlp_demo"):
if name.startswith("."):
continue
path = os.path.join("api/allennlp_demo/", name)
if not os.path.isdir(path):
continue
config_path = os.path.join(path, "model.json")
if not os.path.isfile(config_path):
continue
yield name
def main():
with open(WORKFLOW_FILE_PATH) as workflow_file:
workflow = yaml.load(workflow_file, Loader=yaml.FullLoader)
tested_models = set(
workflow["jobs"]["endpoint_test"]["strategy"]["matrix"]["model"]
)
all_models = set(find_models())
for model in all_models:
assert model in tested_models, (
f"test for '{model}' model not found in {WORKFLOW_FILE_PATH}. "
f"Did you forget to add '{model}' to the 'Endpoint Test' model matrix?"
)
for model in tested_models:
if model in NON_MODEL_ENDPOINTS:
continue
assert model in all_models, (
f"'{model}' is in the GitHub Actions 'Endpoint Test' job, but does not "
f"appear to correspond to an actual demo model in 'api/allennlp_demo'. "
f"Did you forget to delete '{model}' from the 'Endpoint Test' model matrix?"
)
if __name__ == "__main__":
main()
| allennlp-demo-main | dev/check_models_ci.py |
from setuptools import setup, find_packages
import os
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import allennlp_semparse whilst setting up.
VERSION = {}
with open("allennlp_semparse/version.py") as version_file:
exec(version_file.read(), VERSION)
# Load requirements.txt with a special case for allennlp so we can handle
# cross-library integration testing.
with open("requirements.txt") as requirements_file:
import re
def requirement_is_allennlp(req: str) -> bool:
if req == "allennlp":
return True
if re.match(r"^allennlp[>=<]", req):
return True
if re.match(r"^(git\+)?(https|ssh)://(git@)?github\.com/.*/allennlp\.git", req):
return True
return False
def fix_url_dependencies(req: str) -> str:
"""Pip and setuptools disagree about how URL dependencies should be handled."""
m = re.match(
r"^(git\+)?(https|ssh)://(git@)?github\.com/([\w-]+)/(?P<name>[\w-]+)\.git", req
)
if m is None:
return req
else:
return f"{m.group('name')} @ {req}"
install_requirements = []
allennlp_requirements = []
for line in requirements_file:
line = line.strip()
if line.startswith("#") or len(line) <= 0:
continue
if requirement_is_allennlp(line):
allennlp_requirements.append(line)
else:
install_requirements.append(line)
assert len(allennlp_requirements) == 1
allennlp_override = os.environ.get("ALLENNLP_VERSION_OVERRIDE")
if allennlp_override is not None:
if len(allennlp_override) > 0:
allennlp_requirements = [allennlp_override]
else:
allennlp_requirements = []
install_requirements.extend(allennlp_requirements)
install_requirements = [fix_url_dependencies(req) for req in install_requirements]
setup(
name="allennlp_semparse",
version=VERSION["VERSION"],
description=(
"A framework for building semantic parsers (including neural "
"module networks) with AllenNLP, built by the authors of AllenNLP"
),
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="allennlp NLP deep learning machine reading semantic parsing parsers",
url="https://github.com/allenai/allennlp-semparse",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=install_requirements,
include_package_data=True,
python_requires=">=3.6.1",
zip_safe=False,
)
| allennlp-semparse-master | setup.py |
from .semparse_test_case import SemparseTestCase, ModelTestCase
| allennlp-semparse-master | tests/__init__.py |
import pathlib
from allennlp.common.testing import AllenNlpTestCase, ModelTestCase as AllenNlpModelTestCase
# These imports are to get all of the items registered that we need.
from allennlp_semparse import models, dataset_readers, predictors
ROOT = (pathlib.Path(__file__).parent / "..").resolve()
class SemparseTestCase(AllenNlpTestCase):
PROJECT_ROOT = ROOT
MODULE_ROOT = PROJECT_ROOT / "allennlp_semparse"
TOOLS_ROOT = None # just removing the reference from super class
TESTS_ROOT = PROJECT_ROOT / "tests"
FIXTURES_ROOT = PROJECT_ROOT / "test_fixtures"
class ModelTestCase(AllenNlpModelTestCase):
PROJECT_ROOT = ROOT
MODULE_ROOT = PROJECT_ROOT / "allennlp_semparse"
TOOLS_ROOT = None # just removing the reference from super class
TESTS_ROOT = PROJECT_ROOT / "tests"
FIXTURES_ROOT = PROJECT_ROOT / "test_fixtures"
| allennlp-semparse-master | tests/semparse_test_case.py |
from .. import SemparseTestCase
from allennlp_semparse.dataset_readers import NlvrDatasetReader
from allennlp_semparse.domain_languages import NlvrLanguage
class TestNlvrDatasetReader(SemparseTestCase):
def test_reader_reads_ungrouped_data(self):
test_file = str(self.FIXTURES_ROOT / "data" / "nlvr" / "sample_ungrouped_data.jsonl")
dataset = NlvrDatasetReader().read(test_file)
instances = list(dataset)
assert len(instances) == 3
instance = instances[0]
assert instance.fields.keys() == {
"sentence",
"agenda",
"worlds",
"actions",
"labels",
"identifier",
"metadata",
}
sentence_tokens = instance.fields["sentence"].tokens
expected_tokens = [
"There",
"is",
"a",
"circle",
"closely",
"touching",
"a",
"corner",
"of",
"a",
"box",
".",
]
assert [t.text for t in sentence_tokens] == expected_tokens
actions = [action.rule for action in instance.fields["actions"].field_list]
assert len(actions) == 115
agenda = [item.sequence_index for item in instance.fields["agenda"].field_list]
agenda_strings = [actions[rule_id] for rule_id in agenda]
assert set(agenda_strings) == set(
[
"<Set[Object]:Set[Object]> -> circle",
"<Set[Object]:bool> -> object_exists",
"<Set[Object]:Set[Object]> -> touch_corner",
]
)
worlds = [world_field.as_tensor({}) for world_field in instance.fields["worlds"].field_list]
assert isinstance(worlds[0], NlvrLanguage)
label = instance.fields["labels"].field_list[0].label
assert label == "true"
def test_agenda_indices_are_correct(self):
reader = NlvrDatasetReader()
test_file = str(self.FIXTURES_ROOT / "data" / "nlvr" / "sample_ungrouped_data.jsonl")
dataset = reader.read(test_file)
instances = list(dataset)
instance = instances[0]
sentence_tokens = instance.fields["sentence"].tokens
sentence = " ".join([t.text for t in sentence_tokens])
agenda = [item.sequence_index for item in instance.fields["agenda"].field_list]
actions = [action.rule for action in instance.fields["actions"].field_list]
agenda_actions = [actions[i] for i in agenda]
world = instance.fields["worlds"].field_list[0].as_tensor({})
expected_agenda_actions = world.get_agenda_for_sentence(sentence)
assert expected_agenda_actions == agenda_actions
def test_reader_reads_grouped_data(self):
test_file = str(self.FIXTURES_ROOT / "data" / "nlvr" / "sample_grouped_data.jsonl")
dataset = NlvrDatasetReader().read(test_file)
instances = list(dataset)
assert len(instances) == 2
instance = instances[0]
assert instance.fields.keys() == {
"sentence",
"agenda",
"worlds",
"actions",
"labels",
"identifier",
"metadata",
}
sentence_tokens = instance.fields["sentence"].tokens
expected_tokens = [
"There",
"is",
"a",
"circle",
"closely",
"touching",
"a",
"corner",
"of",
"a",
"box",
".",
]
assert [t.text for t in sentence_tokens] == expected_tokens
actions = [action.rule for action in instance.fields["actions"].field_list]
assert len(actions) == 115
agenda = [item.sequence_index for item in instance.fields["agenda"].field_list]
agenda_strings = [actions[rule_id] for rule_id in agenda]
assert set(agenda_strings) == set(
[
"<Set[Object]:Set[Object]> -> circle",
"<Set[Object]:Set[Object]> -> touch_corner",
"<Set[Object]:bool> -> object_exists",
]
)
worlds = [world_field.as_tensor({}) for world_field in instance.fields["worlds"].field_list]
assert all([isinstance(world, NlvrLanguage) for world in worlds])
labels = [label.label for label in instance.fields["labels"].field_list]
assert labels == ["true", "false", "true", "false"]
def test_reader_reads_processed_data(self):
# Processed data contains action sequences that yield the correct denotations, obtained from
# an offline search.
test_file = str(self.FIXTURES_ROOT / "data" / "nlvr" / "sample_processed_data.jsonl")
dataset = NlvrDatasetReader().read(test_file)
instances = list(dataset)
assert len(instances) == 2
instance = instances[0]
assert instance.fields.keys() == {
"sentence",
"target_action_sequences",
"worlds",
"actions",
"labels",
"identifier",
"metadata",
}
all_action_sequence_indices = instance.fields["target_action_sequences"].field_list
assert len(all_action_sequence_indices) == 20
action_sequence_indices = [
item.sequence_index for item in all_action_sequence_indices[0].field_list
]
actions = [action.rule for action in instance.fields["actions"].field_list]
action_sequence = [actions[rule_id] for rule_id in action_sequence_indices]
assert action_sequence == [
"@start@ -> bool",
"bool -> [<Set[Object]:bool>, Set[Object]]",
"<Set[Object]:bool> -> object_exists",
"Set[Object] -> [<Set[Object]:Set[Object]>, Set[Object]]",
"<Set[Object]:Set[Object]> -> touch_corner",
"Set[Object] -> [<Set[Object]:Set[Object]>, Set[Object]]",
"<Set[Object]:Set[Object]> -> circle",
"Set[Object] -> all_objects",
]
| allennlp-semparse-master | tests/dataset_readers/nlvr_test.py |
from allennlp.common.file_utils import cached_path
from allennlp_semparse.dataset_readers import AtisDatasetReader
from .. import SemparseTestCase
from allennlp_semparse.parsimonious_languages.worlds import AtisWorld
class TestAtisReader(SemparseTestCase):
def test_atis_keep_unparseable(self):
database_file = cached_path("https://allennlp.s3.amazonaws.com/datasets/atis/atis.db")
reader = AtisDatasetReader(database_file=database_file, keep_if_unparseable=True)
instance = reader.text_to_instance(
utterances=["show me the one way flights from detroit me to westchester county"],
sql_query_labels=["this is not a query that can be parsed"],
)
# If we have a query that can't be parsed, we check that it only has one element in the list
# of index fields and that index is the padding index, -1.
assert len(instance.fields["target_action_sequence"].field_list) == 1
assert instance.fields["target_action_sequence"].field_list[0].sequence_index == -1
def test_atis_read_from_file(self):
data_path = SemparseTestCase.FIXTURES_ROOT / "data" / "atis" / "sample.json"
database_file = "https://allennlp.s3.amazonaws.com/datasets/atis/atis.db"
reader = AtisDatasetReader(database_file=database_file)
instances = list(reader.read(str(data_path)))
assert len(instances) == 13
instance = instances[0]
assert set(instance.fields.keys()) == {
"utterance",
"actions",
"world",
"sql_queries",
"target_action_sequence",
"linking_scores",
}
assert [t.text for t in instance.fields["utterance"].tokens] == [
"show",
"me",
"the",
"one",
"way",
"flights",
"from",
"detroit",
"to",
"westchester",
"county",
]
assert isinstance(instance.fields["world"].as_tensor({}), AtisWorld)
world = instance.fields["world"].metadata
assert set(world.valid_actions["number"]) == {
'number -> ["1"]',
'number -> ["0"]',
'number -> ["41"]',
'number -> ["60"]',
}
assert world.linked_entities["string"]["airport_airport_code_string -> [\"'DTW'\"]"][2] == [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
] # ``detroit`` -> ``DTW``
assert world.linked_entities["string"]["flight_stop_stop_airport_string -> [\"'DTW'\"]"][
2
] == [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
] # ``detroit`` -> ``DTW``
assert world.linked_entities["string"]["city_city_code_string -> [\"'DDTT'\"]"][2] == [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
] # ``detroit`` -> ``DDTT``
assert world.linked_entities["string"]["fare_basis_economy_string -> [\"'NO'\"]"][2] == [
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
] # ``one way`` -> ``NO``
assert world.linked_entities["string"][
"city_city_name_string -> [\"'WESTCHESTER COUNTY'\"]"
][2] == [
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
] # ``westchester county`` -> ``WESTCHESTER COUNTY``
assert world.linked_entities["string"]["city_city_code_string -> [\"'HHPN'\"]"][2] == [
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
] # ``westchester county`` -> ``HHPN``
| allennlp-semparse-master | tests/dataset_readers/atis_test.py |
allennlp-semparse-master | tests/dataset_readers/__init__.py |
|
import pytest
from .. import SemparseTestCase
from allennlp_semparse.dataset_readers.grammar_based_text2sql import (
GrammarBasedText2SqlDatasetReader,
)
@pytest.mark.skip(reason="Mark will fix in a nearby PR.")
class TestGrammarBasedText2SqlDatasetReader(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.data_path = str(self.FIXTURES_ROOT / "data" / "text2sql" / "*.json")
self.schema = str(self.FIXTURES_ROOT / "data" / "text2sql" / "restaurants-schema.csv")
self.database = str(self.FIXTURES_ROOT / "data" / "text2sql" / "restaurants.db")
self.reader = GrammarBasedText2SqlDatasetReader(self.schema, self.database)
def test_reader_can_read_data_with_entity_pre_linking(self):
instances = self.reader.read(self.data_path)
instances = list(instances)
assert len(instances) == 5
fields = instances[0].fields
token_field = fields["tokens"]
tokens = [t.text for t in token_field.tokens]
assert tokens == [
"how",
"many",
"buttercup",
"kitchen",
"are",
"there",
"in",
"san",
"francisco",
"?",
]
action_sequence = fields["action_sequence"].field_list
indices = [x.sequence_index for x in action_sequence]
assert indices == [
93,
75,
78,
88,
86,
82,
39,
113,
48,
42,
2,
46,
91,
90,
102,
92,
90,
103,
118,
34,
5,
112,
21,
102,
23,
13,
34,
5,
116,
95,
16,
34,
5,
112,
21,
103,
30,
13,
34,
5,
112,
21,
102,
30,
16,
34,
5,
112,
21,
103,
27,
13,
39,
116,
96,
]
action_fields = fields["valid_actions"].field_list
production_rules = [(x.rule, x.is_global_rule) for x in action_fields]
assert production_rules == [
('arg_list -> [expr, ",", arg_list]', True),
("arg_list -> [expr]", True),
('arg_list_or_star -> ["*"]', True),
("arg_list_or_star -> [arg_list]", True),
('between_expr -> [value, "BETWEEN", value, "AND", value]', True),
("binary_expr -> [value, binaryop, expr]", True),
('binaryop -> ["*"]', True),
('binaryop -> ["+"]', True),
('binaryop -> ["-"]', True),
('binaryop -> ["/"]', True),
('binaryop -> ["<"]', True),
('binaryop -> ["<="]', True),
('binaryop -> ["<>"]', True),
('binaryop -> ["="]', True),
('binaryop -> [">"]', True),
('binaryop -> [">="]', True),
('binaryop -> ["AND"]', True),
('binaryop -> ["LIKE"]', True),
('binaryop -> ["OR"]', True),
('boolean -> ["false"]', True),
('boolean -> ["true"]', True),
('col_ref -> [table_name, ".", column_name]', True),
("col_ref -> [table_name]", True),
('column_name -> ["CITY_NAME"]', True),
('column_name -> ["COUNTY"]', True),
('column_name -> ["FOOD_TYPE"]', True),
('column_name -> ["HOUSE_NUMBER"]', True),
('column_name -> ["NAME"]', True),
('column_name -> ["RATING"]', True),
('column_name -> ["REGION"]', True),
('column_name -> ["RESTAURANT_ID"]', True),
('column_name -> ["STREET_NAME"]', True),
('expr -> ["(", query, ")"]', True),
("expr -> [between_expr]", True),
("expr -> [binary_expr]", True),
("expr -> [in_expr]", True),
("expr -> [like_expr]", True),
("expr -> [null_check_expr]", True),
("expr -> [unary_expr]", True),
("expr -> [value]", True),
('fname -> ["ALL"]', True),
('fname -> ["AVG"]', True),
('fname -> ["COUNT"]', True),
('fname -> ["MAX"]', True),
('fname -> ["MIN"]', True),
('fname -> ["SUM"]', True),
('from_clause -> ["FROM", source]', True),
('function -> [fname, "(", "DISTINCT", arg_list_or_star, ")"]', True),
('function -> [fname, "(", arg_list_or_star, ")"]', True),
('group_clause -> [expr, "," group_clause]', True),
("group_clause -> [expr]", True),
('groupby_clause -> ["GROUP", "BY" group_clause, "HAVING", expr]', True),
('groupby_clause -> ["GROUP", "BY" group_clause]', True),
('in_expr -> [value, "IN", expr]', True),
('in_expr -> [value, "IN", string_set]', True),
('in_expr -> [value, "NOT", "IN", expr]', True),
('in_expr -> [value, "NOT", "IN", string_set]', True),
('like_expr -> [value, "LIKE", string]', True),
('limit -> ["LIMIT", number]', True),
('null_check_expr -> [col_ref, "IS", "NOT", "NULL"]', True),
('null_check_expr -> [col_ref, "IS", "NULL"]', True),
('order_clause -> [ordering_term, "," order_clause]', True),
("order_clause -> [ordering_term]", True),
('orderby_clause -> ["ORDER", "BY" order_clause]', True),
('ordering -> ["ASC"]', True),
('ordering -> ["DESC"]', True),
("ordering_term -> [expr ordering]", True),
("ordering_term -> [expr]", True),
('parenval -> ["(", expr, ")"]', True),
("query -> [select_core groupby_clause, limit]", True),
("query -> [select_core groupby_clause, orderby_clause, limit]", True),
("query -> [select_core groupby_clause, orderby_clause]", True),
("query -> [select_core groupby_clause]", True),
("query -> [select_core orderby_clause, limit]", True),
("query -> [select_core orderby_clause]", True),
("query -> [select_core]", True),
("sel_res_all_star -> ['*']", True),
('sel_res_tab_star -> [table_name ".*"]', True),
("select_core -> [select_with_distinct select_results from_clause where_clause]", True),
("select_core -> [select_with_distinct select_results from_clause]", True),
("select_core -> [select_with_distinct select_results where_clause]", True),
("select_core -> [select_with_distinct select_results]", True),
("select_result -> [expr]", True),
("select_result -> [sel_res_all_star]", True),
("select_result -> [sel_res_tab_star]", True),
('select_results -> [select_result, ",", select_results]', True),
("select_results -> [select_result]", True),
('select_with_distinct -> ["SELECT", "DISTINCT"]', True),
('select_with_distinct -> ["SELECT"]', True),
('single_source -> ["(", query, ")"]', True),
("single_source -> [table_name]", True),
('source -> [single_source, ",", source]', True),
("source -> [single_source]", True),
('statement -> [query, ";"]', True),
("statement -> [query]", True),
("string -> [\"'city_name0'\"]", True),
("string -> [\"'name0'\"]", True),
("string -> [~\"'.*?'\"iu]", True),
('string_set -> ["(", string_set_vals, ")"]', True),
('string_set_vals -> [string, ",", string_set_vals]', True),
("string_set_vals -> [string]", True),
('table_name -> ["GEOGRAPHIC"]', True),
('table_name -> ["LOCATION"]', True),
('table_name -> ["RESTAURANT"]', True),
("unary_expr -> [unaryop expr]", True),
('unaryop -> ["+"]', True),
('unaryop -> ["-"]', True),
('unaryop -> ["NOT"]', True),
('unaryop -> ["not"]', True),
('value -> ["2.5"]', True),
('value -> ["YEAR(CURDATE())"]', True),
("value -> [boolean]", True),
("value -> [col_ref]", True),
("value -> [function]", True),
("value -> [number]", True),
("value -> [parenval]", True),
("value -> [string]", True),
('where_clause -> ["WHERE", expr where_conj]', True),
('where_clause -> ["WHERE", expr]', True),
('where_conj -> ["AND", expr where_conj]', True),
('where_conj -> ["AND", expr]', True),
]
| allennlp-semparse-master | tests/dataset_readers/grammar_based_text2sql_test.py |
from allennlp.common import Params
from .. import SemparseTestCase
from allennlp_semparse.dataset_readers import WikiTablesDatasetReader
from allennlp_semparse.domain_languages import WikiTablesLanguage
def assert_dataset_correct(dataset):
instances = list(dataset)
assert len(instances) == 2
instance = instances[0]
assert instance.fields.keys() == {
"question",
"metadata",
"table",
"world",
"actions",
"target_action_sequences",
"target_values",
}
question_tokens = [
"what",
"was",
"the",
"last",
"year",
"where",
"this",
"team",
"was",
"a",
"part",
"of",
"the",
"usl",
"a",
"-",
"league",
"?",
]
assert [t.text for t in instance.fields["question"].tokens] == question_tokens
assert instance.fields["metadata"].as_tensor({})["question_tokens"] == question_tokens
# The content of this will be tested indirectly by checking the actions; we'll just make
# sure we get a WikiTablesWorld object in here.
assert isinstance(instance.fields["world"].as_tensor({}), WikiTablesLanguage)
action_fields = instance.fields["actions"].field_list
actions = [action_field.rule for action_field in action_fields]
# We should have been able to read all of the logical forms in the file. If one of them can't
# be parsed, or the action sequences can't be mapped correctly, the DatasetReader will skip the
# logical form, log an error, and keep going (i.e., it won't crash).
num_action_sequences = len(instance.fields["target_action_sequences"].field_list)
assert num_action_sequences == 10
# We should have sorted the logical forms by length. This is the action sequence
# corresponding to the shortest logical form in the examples _by tree size_, which is _not_ the
# first one in the file, or the shortest logical form by _string length_. It's also a totally
# made up logical form, just to demonstrate that we're sorting things correctly.
action_sequence = instance.fields["target_action_sequences"].field_list[0]
action_indices = [action.sequence_index for action in action_sequence.field_list]
actions = [actions[i] for i in action_indices]
assert actions == [
"@start@ -> Number",
"Number -> [<List[Row],NumberColumn:Number>, List[Row], NumberColumn]",
"<List[Row],NumberColumn:Number> -> average",
"List[Row] -> [<List[Row]:List[Row]>, List[Row]]",
"<List[Row]:List[Row]> -> last",
"List[Row] -> [<List[Row],StringColumn,List[str]:List[Row]>, List[Row], StringColumn, List[str]]",
"<List[Row],StringColumn,List[str]:List[Row]> -> filter_in",
"List[Row] -> all_rows",
"StringColumn -> string_column:league",
"List[str] -> string:usl_a_league",
"NumberColumn -> number_column:year",
]
class TestWikiTablesDatasetReader(SemparseTestCase):
def test_reader_reads(self):
offline_search_directory = (
self.FIXTURES_ROOT / "data" / "wikitables" / "action_space_walker_output"
)
params = {
"tables_directory": self.FIXTURES_ROOT / "data" / "wikitables",
"offline_logical_forms_directory": offline_search_directory,
}
reader = WikiTablesDatasetReader.from_params(Params(params))
dataset = reader.read(self.FIXTURES_ROOT / "data" / "wikitables" / "sample_data.examples")
assert_dataset_correct(dataset)
def test_reader_reads_with_lfs_in_tarball(self):
offline_search_directory = (
self.FIXTURES_ROOT
/ "data"
/ "wikitables"
/ "action_space_walker_output_with_single_tarball"
)
params = {
"tables_directory": self.FIXTURES_ROOT / "data" / "wikitables",
"offline_logical_forms_directory": offline_search_directory,
}
reader = WikiTablesDatasetReader.from_params(Params(params))
dataset = reader.read(self.FIXTURES_ROOT / "data" / "wikitables" / "sample_data.examples")
assert_dataset_correct(dataset)
| allennlp-semparse-master | tests/dataset_readers/wikitables_test.py |
from allennlp.common.util import ensure_list
from .. import SemparseTestCase
from allennlp_semparse.dataset_readers import TemplateText2SqlDatasetReader
class TestTemplateText2SqlDatasetReader(SemparseTestCase):
def test_reader(self):
reader = TemplateText2SqlDatasetReader()
instances = reader.read(
str(SemparseTestCase.FIXTURES_ROOT / "data" / "text2sql" / "*.json")
)
instances = ensure_list(instances)
fields = instances[0].fields
tokens = [t.text for t in fields["tokens"].tokens]
tags = fields["slot_tags"].labels
assert tokens == [
"how",
"many",
"buttercup",
"kitchen",
"are",
"there",
"in",
"san",
"francisco",
"?",
]
assert tags == ["O", "O", "name0", "name0", "O", "O", "O", "city_name0", "city_name0", "O"]
assert (
fields["template"].label
== "SELECT COUNT ( * ) FROM LOCATION AS LOCATIONalias0 , RESTAURANT "
"AS RESTAURANTalias0 WHERE LOCATIONalias0 . CITY_NAME = 'city_name0' "
"AND RESTAURANTalias0 . ID = LOCATIONalias0 . RESTAURANT_ID AND "
"RESTAURANTalias0 . NAME = 'name0' ;"
)
fields = instances[1].fields
tokens = [t.text for t in fields["tokens"].tokens]
tags = fields["slot_tags"].labels
assert tokens == [
"how",
"many",
"chinese",
"restaurants",
"are",
"there",
"in",
"the",
"bay",
"area",
"?",
]
assert tags == ["O", "O", "food_type0", "O", "O", "O", "O", "O", "region0", "region0", "O"]
assert (
fields["template"].label
== "SELECT COUNT ( * ) FROM GEOGRAPHIC AS GEOGRAPHICalias0 , RESTAURANT AS "
"RESTAURANTalias0 WHERE GEOGRAPHICalias0 . REGION = 'region0' AND "
"RESTAURANTalias0 . CITY_NAME = GEOGRAPHICalias0 . CITY_NAME AND "
"RESTAURANTalias0 . FOOD_TYPE = 'food_type0' ;"
)
fields = instances[2].fields
tokens = [t.text for t in fields["tokens"].tokens]
tags = fields["slot_tags"].labels
assert tokens == [
"how",
"many",
"places",
"for",
"chinese",
"food",
"are",
"there",
"in",
"the",
"bay",
"area",
"?",
]
assert tags == [
"O",
"O",
"O",
"O",
"food_type0",
"O",
"O",
"O",
"O",
"O",
"region0",
"region0",
"O",
]
assert (
fields["template"].label
== "SELECT COUNT ( * ) FROM GEOGRAPHIC AS GEOGRAPHICalias0 , RESTAURANT AS "
"RESTAURANTalias0 WHERE GEOGRAPHICalias0 . REGION = 'region0' AND "
"RESTAURANTalias0 . CITY_NAME = GEOGRAPHICalias0 . CITY_NAME AND "
"RESTAURANTalias0 . FOOD_TYPE = 'food_type0' ;"
)
fields = instances[3].fields
tokens = [t.text for t in fields["tokens"].tokens]
tags = fields["slot_tags"].labels
assert tokens == [
"how",
"many",
"chinese",
"places",
"are",
"there",
"in",
"the",
"bay",
"area",
"?",
]
assert tags == ["O", "O", "food_type0", "O", "O", "O", "O", "O", "region0", "region0", "O"]
assert (
fields["template"].label
== "SELECT COUNT ( * ) FROM GEOGRAPHIC AS GEOGRAPHICalias0 , RESTAURANT AS "
"RESTAURANTalias0 WHERE GEOGRAPHICalias0 . REGION = 'region0' AND "
"RESTAURANTalias0 . CITY_NAME = GEOGRAPHICalias0 . CITY_NAME AND "
"RESTAURANTalias0 . FOOD_TYPE = 'food_type0' ;"
)
fields = instances[4].fields
tokens = [t.text for t in fields["tokens"].tokens]
tags = fields["slot_tags"].labels
assert tokens == [
"how",
"many",
"places",
"for",
"chinese",
"are",
"there",
"in",
"the",
"bay",
"area",
"?",
]
assert tags == [
"O",
"O",
"O",
"O",
"food_type0",
"O",
"O",
"O",
"O",
"region0",
"region0",
"O",
]
assert (
fields["template"].label
== "SELECT COUNT ( * ) FROM GEOGRAPHIC AS GEOGRAPHICalias0 , RESTAURANT AS "
"RESTAURANTalias0 WHERE GEOGRAPHICalias0 . REGION = 'region0' AND "
"RESTAURANTalias0 . CITY_NAME = GEOGRAPHICalias0 . CITY_NAME AND "
"RESTAURANTalias0 . FOOD_TYPE = 'food_type0' ;"
)
| allennlp-semparse-master | tests/dataset_readers/template_text2sql_test.py |
allennlp-semparse-master | tests/parsimonious_languages/__init__.py |
|
allennlp-semparse-master | tests/parsimonious_languages/contexts/__init__.py |
|
allennlp-semparse-master | tests/parsimonious_languages/worlds/__init__.py |
|
from datetime import datetime
import json
from parsimonious.expressions import Literal, Sequence
from allennlp.common.file_utils import cached_path
from ... import SemparseTestCase
from allennlp_semparse.parsimonious_languages.contexts.atis_tables import (
get_approximate_times,
pm_map_match_to_query_value,
)
from allennlp_semparse.parsimonious_languages.worlds.atis_world import AtisWorld
class TestAtisWorld(SemparseTestCase):
def setup_method(self):
super().setup_method()
test_filename = self.FIXTURES_ROOT / "data" / "atis" / "sample.json"
self.data = open(test_filename).readlines()
self.database_file = cached_path("https://allennlp.s3.amazonaws.com/datasets/atis/atis.db")
def test_atis_global_actions(self):
world = AtisWorld(utterances=[])
valid_actions = world.valid_actions
assert set(valid_actions.keys()) == {
"agg",
"agg_func",
"agg_results",
"aircraft_aircraft_code_string",
"aircraft_basic_type_string",
"aircraft_manufacturer_string",
"aircraft_propulsion_string",
"airline_airline_code_string",
"airline_airline_name_string",
"airport_airport_code_string",
"airport_airport_name_string",
"biexpr",
"binaryop",
"boolean",
"city_city_code_string",
"city_city_name_string",
"city_state_code_string",
"class_of_service_booking_class_string",
"class_of_service_class_description_string",
"col",
"col_ref",
"col_refs",
"condition",
"conditions",
"conj",
"days_day_name_string",
"days_days_code_string",
"distinct",
"fare_basis_booking_class_string",
"fare_basis_class_type_string",
"fare_basis_economy_string",
"fare_basis_fare_basis_code_string",
"fare_fare_basis_code_string",
"fare_one_direction_cost",
"fare_restriction_code_string",
"fare_round_trip_cost",
"fare_round_trip_required_string",
"flight_airline_code_string",
"flight_flight_days_string",
"flight_number",
"flight_stop_stop_airport_string",
"food_service_compartment_string",
"food_service_meal_description_string",
"ground_service_transport_type_string",
"group_by_clause",
"in_clause",
"number",
"pos_value",
"query",
"restriction_restriction_code_string",
"select_results",
"state_state_code_string",
"state_state_name_string",
"statement",
"table_name",
"table_refs",
"ternaryexpr",
"time_range_end",
"time_range_start",
"value",
"where_clause",
}
assert set(valid_actions["statement"]) == {'statement -> [query, ";"]'}
assert set(valid_actions["query"]) == {
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, group_by_clause, ")"]',
'query -> ["SELECT", distinct, select_results, "FROM", table_refs, ' "where_clause]",
}
assert set(valid_actions["select_results"]) == {
"select_results -> [agg]",
"select_results -> [col_refs]",
}
assert set(valid_actions["agg"]) == {
'agg -> [agg_func, "(", col, ")"]',
'agg -> [agg_func, "(", col_ref, ")"]',
}
assert set(valid_actions["agg_func"]) == {
'agg_func -> ["COUNT"]',
'agg_func -> ["MAX"]',
'agg_func -> ["MIN"]',
}
assert set(valid_actions["col_refs"]) == {
"col_refs -> [col_ref]",
'col_refs -> [col_ref, ",", col_refs]',
}
assert set(valid_actions["table_refs"]) == {
"table_refs -> [table_name]",
'table_refs -> [table_name, ",", table_refs]',
}
assert set(valid_actions["where_clause"]) == {
'where_clause -> ["WHERE", "(", conditions, ")"]',
'where_clause -> ["WHERE", conditions]',
}
assert set(valid_actions["conditions"]) == {
'conditions -> ["(", conditions, ")", conj, conditions]',
'conditions -> ["(", conditions, ")"]',
'conditions -> ["NOT", conditions]',
'conditions -> [condition, conj, "(", conditions, ")"]',
"conditions -> [condition, conj, conditions]",
"conditions -> [condition]",
}
assert set(valid_actions["condition"]) == {
"condition -> [biexpr]",
"condition -> [in_clause]",
"condition -> [ternaryexpr]",
}
assert set(valid_actions["in_clause"]) == {'in_clause -> [col_ref, "IN", query]'}
assert set(valid_actions["biexpr"]) == {
'biexpr -> ["aircraft", ".", "aircraft_code", binaryop, '
"aircraft_aircraft_code_string]",
'biexpr -> ["aircraft", ".", "basic_type", binaryop, ' "aircraft_basic_type_string]",
'biexpr -> ["aircraft", ".", "manufacturer", binaryop, '
"aircraft_manufacturer_string]",
'biexpr -> ["aircraft", ".", "propulsion", binaryop, ' "aircraft_propulsion_string]",
'biexpr -> ["airline", ".", "airline_code", binaryop, ' "airline_airline_code_string]",
'biexpr -> ["airline", ".", "airline_name", binaryop, ' "airline_airline_name_string]",
'biexpr -> ["airport", ".", "airport_code", binaryop, ' "airport_airport_code_string]",
'biexpr -> ["airport", ".", "airport_name", binaryop, ' "airport_airport_name_string]",
'biexpr -> ["city", ".", "city_code", binaryop, city_city_code_string]',
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'biexpr -> ["city", ".", "state_code", binaryop, city_state_code_string]',
'biexpr -> ["class_of_service", ".", "booking_class", binaryop, '
"class_of_service_booking_class_string]",
'biexpr -> ["class_of_service", ".", "class_description", binaryop, '
"class_of_service_class_description_string]",
'biexpr -> ["days", ".", "day_name", binaryop, days_day_name_string]',
'biexpr -> ["days", ".", "days_code", binaryop, days_days_code_string]',
'biexpr -> ["fare", ".", "fare_basis_code", binaryop, ' "fare_fare_basis_code_string]",
'biexpr -> ["fare", ".", "one_direction_cost", binaryop, ' "fare_one_direction_cost]",
'biexpr -> ["fare", ".", "restriction_code", binaryop, '
"fare_restriction_code_string]",
'biexpr -> ["fare", ".", "round_trip_cost", binaryop, fare_round_trip_cost]',
'biexpr -> ["fare", ".", "round_trip_required", binaryop, '
"fare_round_trip_required_string]",
'biexpr -> ["fare_basis", ".", "booking_class", binaryop, '
"fare_basis_booking_class_string]",
'biexpr -> ["fare_basis", ".", "class_type", binaryop, '
"fare_basis_class_type_string]",
'biexpr -> ["fare_basis", ".", "economy", binaryop, ' "fare_basis_economy_string]",
'biexpr -> ["fare_basis", ".", "fare_basis_code", binaryop, '
"fare_basis_fare_basis_code_string]",
'biexpr -> ["flight", ".", "airline_code", binaryop, ' "flight_airline_code_string]",
'biexpr -> ["flight", ".", "flight_days", binaryop, ' "flight_flight_days_string]",
'biexpr -> ["flight", ".", "flight_number", binaryop, flight_number]',
'biexpr -> ["flight_stop", ".", "stop_airport", binaryop, '
"flight_stop_stop_airport_string]",
'biexpr -> ["food_service", ".", "compartment", binaryop, '
"food_service_compartment_string]",
'biexpr -> ["food_service", ".", "meal_description", binaryop, '
"food_service_meal_description_string]",
'biexpr -> ["ground_service", ".", "transport_type", binaryop, '
"ground_service_transport_type_string]",
'biexpr -> ["restriction", ".", "restriction_code", binaryop, '
"restriction_restriction_code_string]",
'biexpr -> ["state", ".", "state_code", binaryop, state_state_code_string]',
'biexpr -> ["state", ".", "state_name", binaryop, state_state_name_string]',
"biexpr -> [col_ref, binaryop, value]",
"biexpr -> [value, binaryop, value]",
}
assert set(valid_actions["binaryop"]) == {
'binaryop -> ["*"]',
'binaryop -> ["+"]',
'binaryop -> ["-"]',
'binaryop -> ["/"]',
'binaryop -> ["<"]',
'binaryop -> ["<="]',
'binaryop -> ["="]',
'binaryop -> [">"]',
'binaryop -> [">="]',
'binaryop -> ["IS"]',
}
assert set(valid_actions["ternaryexpr"]) == {
'ternaryexpr -> [col_ref, "BETWEEN", time_range_start, "AND", time_range_end]',
'ternaryexpr -> [col_ref, "NOT", "BETWEEN", time_range_start, "AND", '
"time_range_end]",
}
assert set(valid_actions["value"]) == {
'value -> ["NOT", pos_value]',
"value -> [pos_value]",
}
assert set(valid_actions["pos_value"]) == {
'pos_value -> ["ALL", query]',
'pos_value -> ["ANY", query]',
'pos_value -> ["NULL"]',
"pos_value -> [agg_results]",
"pos_value -> [boolean]",
"pos_value -> [col_ref]",
"pos_value -> [number]",
}
assert set(valid_actions["agg_results"]) == {
(
'agg_results -> ["(", "SELECT", distinct, agg, "FROM", table_name, '
'where_clause, ")"]'
),
'agg_results -> ["SELECT", distinct, agg, "FROM", table_name, where_clause]',
}
assert set(valid_actions["boolean"]) == {'boolean -> ["true"]', 'boolean -> ["false"]'}
assert set(valid_actions["conj"]) == {'conj -> ["OR"]', 'conj -> ["AND"]'}
assert set(valid_actions["distinct"]) == {'distinct -> [""]', 'distinct -> ["DISTINCT"]'}
assert set(valid_actions["number"]) == {
'number -> ["0"]',
'number -> ["1"]',
'number -> ["60"]',
'number -> ["41"]',
}
assert set(valid_actions["col_ref"]) == {
'col_ref -> ["*"]',
"col_ref -> [agg]",
'col_ref -> ["aircraft", ".", "aircraft_code"]',
'col_ref -> ["aircraft", ".", "aircraft_description"]',
'col_ref -> ["aircraft", ".", "basic_type"]',
'col_ref -> ["aircraft", ".", "capacity"]',
'col_ref -> ["aircraft", ".", "manufacturer"]',
'col_ref -> ["aircraft", ".", "pressurized"]',
'col_ref -> ["aircraft", ".", "propulsion"]',
'col_ref -> ["aircraft", ".", "wide_body"]',
'col_ref -> ["airline", ".", "airline_code"]',
'col_ref -> ["airline", ".", "airline_name"]',
'col_ref -> ["airport", ".", "airport_code"]',
'col_ref -> ["airport", ".", "airport_location"]',
'col_ref -> ["airport", ".", "airport_name"]',
'col_ref -> ["airport", ".", "country_name"]',
'col_ref -> ["airport", ".", "minimum_connect_time"]',
'col_ref -> ["airport", ".", "state_code"]',
'col_ref -> ["airport", ".", "time_zone_code"]',
'col_ref -> ["airport_service", ".", "airport_code"]',
'col_ref -> ["airport_service", ".", "city_code"]',
'col_ref -> ["airport_service", ".", "direction"]',
'col_ref -> ["airport_service", ".", "miles_distant"]',
'col_ref -> ["airport_service", ".", "minutes_distant"]',
'col_ref -> ["city", ".", "city_code"]',
'col_ref -> ["city", ".", "city_name"]',
'col_ref -> ["city", ".", "country_name"]',
'col_ref -> ["city", ".", "state_code"]',
'col_ref -> ["city", ".", "time_zone_code"]',
'col_ref -> ["class_of_service", ".", "booking_class"]',
'col_ref -> ["class_of_service", ".", "class_description"]',
'col_ref -> ["class_of_service", ".", "rank"]',
'col_ref -> ["date_day", ".", "day_name"]',
'col_ref -> ["days", ".", "day_name"]',
'col_ref -> ["days", ".", "days_code"]',
'col_ref -> ["equipment_sequence", ".", "aircraft_code"]',
'col_ref -> ["equipment_sequence", ".", "aircraft_code_sequence"]',
'col_ref -> ["fare", ".", "fare_airline"]',
'col_ref -> ["fare", ".", "fare_basis_code"]',
'col_ref -> ["fare", ".", "fare_id"]',
'col_ref -> ["fare", ".", "from_airport"]',
'col_ref -> ["fare", ".", "one_direction_cost"]',
'col_ref -> ["fare", ".", "restriction_code"]',
'col_ref -> ["fare", ".", "round_trip_cost"]',
'col_ref -> ["fare", ".", "round_trip_required"]',
'col_ref -> ["fare", ".", "to_airport"]',
'col_ref -> ["fare_basis", ".", "basis_days"]',
'col_ref -> ["fare_basis", ".", "booking_class"]',
'col_ref -> ["fare_basis", ".", "class_type"]',
'col_ref -> ["fare_basis", ".", "discounted"]',
'col_ref -> ["fare_basis", ".", "economy"]',
'col_ref -> ["fare_basis", ".", "fare_basis_code"]',
'col_ref -> ["fare_basis", ".", "night"]',
'col_ref -> ["fare_basis", ".", "premium"]',
'col_ref -> ["fare_basis", ".", "season"]',
'col_ref -> ["flight", ".", "aircraft_code_sequence"]',
'col_ref -> ["flight", ".", "airline_code"]',
'col_ref -> ["flight", ".", "airline_flight"]',
'col_ref -> ["flight", ".", "arrival_time"]',
'col_ref -> ["flight", ".", "connections"]',
'col_ref -> ["flight", ".", "departure_time"]',
'col_ref -> ["flight", ".", "dual_carrier"]',
'col_ref -> ["flight", ".", "flight_days"]',
'col_ref -> ["flight", ".", "flight_id"]',
'col_ref -> ["flight", ".", "flight_number"]',
'col_ref -> ["flight", ".", "from_airport"]',
'col_ref -> ["flight", ".", "meal_code"]',
'col_ref -> ["flight", ".", "stops"]',
'col_ref -> ["flight", ".", "time_elapsed"]',
'col_ref -> ["flight", ".", "to_airport"]',
'col_ref -> ["flight_fare", ".", "fare_id"]',
'col_ref -> ["flight_fare", ".", "flight_id"]',
'col_ref -> ["flight_leg", ".", "flight_id"]',
'col_ref -> ["flight_leg", ".", "leg_flight"]',
'col_ref -> ["flight_leg", ".", "leg_number"]',
'col_ref -> ["flight_stop", ".", "arrival_airline"]',
'col_ref -> ["flight_stop", ".", "arrival_flight_number"]',
'col_ref -> ["flight_stop", ".", "arrival_time"]',
'col_ref -> ["flight_stop", ".", "departure_airline"]',
'col_ref -> ["flight_stop", ".", "departure_flight_number"]',
'col_ref -> ["flight_stop", ".", "departure_time"]',
'col_ref -> ["flight_stop", ".", "flight_id"]',
'col_ref -> ["flight_stop", ".", "stop_airport"]',
'col_ref -> ["flight_stop", ".", "stop_days"]',
'col_ref -> ["flight_stop", ".", "stop_number"]',
'col_ref -> ["flight_stop", ".", "stop_time"]',
'col_ref -> ["food_service", ".", "compartment"]',
'col_ref -> ["food_service", ".", "meal_code"]',
'col_ref -> ["food_service", ".", "meal_description"]',
'col_ref -> ["food_service", ".", "meal_number"]',
'col_ref -> ["ground_service", ".", "airport_code"]',
'col_ref -> ["ground_service", ".", "city_code"]',
'col_ref -> ["ground_service", ".", "ground_fare"]',
'col_ref -> ["ground_service", ".", "transport_type"]',
'col_ref -> ["month", ".", "month_name"]',
'col_ref -> ["month", ".", "month_number"]',
'col_ref -> ["restriction", ".", "advance_purchase"]',
'col_ref -> ["restriction", ".", "application"]',
'col_ref -> ["restriction", ".", "maximum_stay"]',
'col_ref -> ["restriction", ".", "minimum_stay"]',
'col_ref -> ["restriction", ".", "no_discounts"]',
'col_ref -> ["restriction", ".", "restriction_code"]',
'col_ref -> ["restriction", ".", "saturday_stay_required"]',
'col_ref -> ["restriction", ".", "stopovers"]',
'col_ref -> ["state", ".", "country_name"]',
'col_ref -> ["state", ".", "state_code"]',
'col_ref -> ["state", ".", "state_name"]',
}
assert set(valid_actions["table_name"]) == {
'table_name -> ["aircraft"]',
'table_name -> ["airline"]',
'table_name -> ["airport"]',
'table_name -> ["airport_service"]',
'table_name -> ["city"]',
'table_name -> ["class_of_service"]',
'table_name -> ["date_day"]',
'table_name -> ["days"]',
'table_name -> ["equipment_sequence"]',
'table_name -> ["fare"]',
'table_name -> ["fare_basis"]',
'table_name -> ["flight"]',
'table_name -> ["flight_fare"]',
'table_name -> ["flight_leg"]',
'table_name -> ["flight_stop"]',
'table_name -> ["food_service"]',
'table_name -> ["ground_service"]',
'table_name -> ["month"]',
'table_name -> ["restriction"]',
'table_name -> ["state"]',
}
def test_atis_local_actions(self):
# Check if the triggers activate correcty
world = AtisWorld(["show me the flights from denver at 12 o'clock"])
assert set(world.valid_actions["number"]) == {
'number -> ["0"]',
'number -> ["1"]',
'number -> ["60"]',
'number -> ["41"]',
'number -> ["1200"]',
'number -> ["2400"]',
}
world = AtisWorld(
[
"show me the flights from denver at 12 o'clock",
"show me the delta or united flights in afternoon",
]
)
assert set(world.valid_actions["number"]) == {
'number -> ["0"]',
'number -> ["1"]',
'number -> ["60"]',
'number -> ["41"]',
'number -> ["1200"]',
'number -> ["2400"]',
}
world = AtisWorld(
[
"i would like one coach reservation for \
may ninth from pittsburgh to atlanta leaving \
pittsburgh before 10 o'clock in morning 1991 \
august twenty sixth"
]
)
assert set(world.valid_actions["number"]) == {
'number -> ["0"]',
'number -> ["1"]',
'number -> ["60"]',
'number -> ["41"]',
'number -> ["1200"]',
'number -> ["2200"]',
'number -> ["1000"]',
}
assert set(world.valid_actions["time_range_start"]) == {'time_range_start -> ["0"]'}
assert set(world.valid_actions["time_range_end"]) == {
'time_range_end -> ["1200"]',
'time_range_end -> ["800"]',
}
assert set(world.valid_actions["day_number"]) == {
'day_number -> ["26"]',
'day_number -> ["9"]',
}
assert set(world.valid_actions["month_number"]) == {
'month_number -> ["5"]',
'month_number -> ["8"]',
}
assert set(world.valid_actions["year_number"]) == {'year_number -> ["1991"]'}
def test_atis_simple_action_sequence(self):
world = AtisWorld(
[("give me all flights from boston to " "philadelphia next week arriving after lunch")]
)
action_sequence = world.get_action_sequence(
(
"(SELECT DISTINCT city . city_code , city . city_name "
"FROM city WHERE ( city.city_name = 'BOSTON' ) );"
)
)
assert action_sequence == [
'statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> ["DISTINCT"]',
"select_results -> [col_refs]",
'col_refs -> [col_ref, ",", col_refs]',
'col_ref -> ["city", ".", "city_code"]',
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_name"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'BOSTON'\"]",
]
action_sequence = world.get_action_sequence(
(
"( SELECT airport_service . airport_code "
"FROM airport_service "
"WHERE airport_service . city_code IN ( "
"SELECT city . city_code FROM city "
"WHERE city.city_name = 'BOSTON' ) ) ;"
)
)
assert action_sequence == [
'statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["airport_service", ".", "airport_code"]',
"table_refs -> [table_name]",
'table_name -> ["airport_service"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["airport_service", ".", "city_code"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_code"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'BOSTON'\"]",
]
action_sequence = world.get_action_sequence(
(
"( SELECT airport_service . airport_code "
"FROM airport_service WHERE airport_service . city_code IN "
"( SELECT city . city_code FROM city "
"WHERE city.city_name = 'BOSTON' ) AND 1 = 1) ;"
)
)
assert action_sequence == [
'statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["airport_service", ".", "airport_code"]',
"table_refs -> [table_name]",
'table_name -> ["airport_service"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition, conj, conditions]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["airport_service", ".", "city_code"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_code"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'BOSTON'\"]",
'conj -> ["AND"]',
"conditions -> [condition]",
"condition -> [biexpr]",
"biexpr -> [value, binaryop, value]",
"value -> [pos_value]",
"pos_value -> [number]",
'number -> ["1"]',
'binaryop -> ["="]',
"value -> [pos_value]",
"pos_value -> [number]",
'number -> ["1"]',
]
world = AtisWorld(
[("give me all flights from boston to " "philadelphia next week arriving after lunch")]
)
action_sequence = world.get_action_sequence(
(
"( SELECT DISTINCT flight.flight_id "
"FROM flight WHERE "
"( flight . from_airport IN "
"( SELECT airport_service . airport_code "
"FROM airport_service WHERE airport_service . city_code IN "
"( SELECT city . city_code "
"FROM city "
"WHERE city.city_name = 'BOSTON' )))) ;"
)
)
assert action_sequence == [
'statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> ["DISTINCT"]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["flight", ".", "flight_id"]',
"table_refs -> [table_name]",
'table_name -> ["flight"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
"conditions -> [condition]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["flight", ".", "from_airport"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["airport_service", ".", "airport_code"]',
"table_refs -> [table_name]",
'table_name -> ["airport_service"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["airport_service", ".", "city_code"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_code"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'BOSTON'\"]",
]
def test_atis_long_action_sequence(self):
world = AtisWorld(
[
(
"what is the earliest flight in morning "
"1993 june fourth from boston to pittsburgh"
)
]
)
action_sequence = world.get_action_sequence(
"( SELECT DISTINCT flight.flight_id "
"FROM flight "
"WHERE ( flight.departure_time = ( "
"SELECT MIN ( flight.departure_time ) "
"FROM flight "
"WHERE ( flight.departure_time BETWEEN 0 AND 1200 AND "
"( flight . from_airport IN ( "
"SELECT airport_service . airport_code "
"FROM airport_service WHERE airport_service . city_code "
"IN ( "
"SELECT city . city_code "
"FROM city WHERE city.city_name = 'BOSTON' )) "
"AND flight . to_airport IN ( "
"SELECT airport_service . airport_code "
"FROM airport_service "
"WHERE airport_service . city_code IN ( "
"SELECT city . city_code "
"FROM city "
"WHERE city.city_name = 'PITTSBURGH' )) ) ) ) AND "
"( flight.departure_time BETWEEN 0 AND 1200 AND "
"( flight . from_airport IN ( "
"SELECT airport_service . airport_code "
"FROM airport_service "
"WHERE airport_service . city_code IN ( "
"SELECT city . city_code "
"FROM city WHERE city.city_name = 'BOSTON' )) "
"AND flight . to_airport IN ( "
"SELECT airport_service . airport_code "
"FROM airport_service WHERE airport_service . city_code IN ( "
"SELECT city . city_code "
"FROM city "
"WHERE city.city_name = 'PITTSBURGH' )) ) ) ) ) ;"
)
assert action_sequence == [
'statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> ["DISTINCT"]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["flight", ".", "flight_id"]',
"table_refs -> [table_name]",
'table_name -> ["flight"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
"conditions -> [condition, conj, conditions]",
"condition -> [biexpr]",
"biexpr -> [col_ref, binaryop, value]",
'col_ref -> ["flight", ".", "departure_time"]',
'binaryop -> ["="]',
"value -> [pos_value]",
"pos_value -> [agg_results]",
'agg_results -> ["(", "SELECT", distinct, agg, "FROM", table_name, '
'where_clause, ")"]',
'distinct -> [""]',
'agg -> [agg_func, "(", col_ref, ")"]',
'agg_func -> ["MIN"]',
'col_ref -> ["flight", ".", "departure_time"]',
'table_name -> ["flight"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
"conditions -> [condition, conj, conditions]",
"condition -> [ternaryexpr]",
'ternaryexpr -> [col_ref, "BETWEEN", time_range_start, "AND", time_range_end]',
'col_ref -> ["flight", ".", "departure_time"]',
'time_range_start -> ["0"]',
'time_range_end -> ["1200"]',
'conj -> ["AND"]',
'conditions -> ["(", conditions, ")"]',
"conditions -> [condition, conj, conditions]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["flight", ".", "from_airport"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["airport_service", ".", "airport_code"]',
"table_refs -> [table_name]",
'table_name -> ["airport_service"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["airport_service", ".", "city_code"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_code"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'BOSTON'\"]",
'conj -> ["AND"]',
"conditions -> [condition]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["flight", ".", "to_airport"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["airport_service", ".", "airport_code"]',
"table_refs -> [table_name]",
'table_name -> ["airport_service"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["airport_service", ".", "city_code"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_code"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'PITTSBURGH'\"]",
'conj -> ["AND"]',
'conditions -> ["(", conditions, ")"]',
"conditions -> [condition, conj, conditions]",
"condition -> [ternaryexpr]",
'ternaryexpr -> [col_ref, "BETWEEN", time_range_start, "AND", time_range_end]',
'col_ref -> ["flight", ".", "departure_time"]',
'time_range_start -> ["0"]',
'time_range_end -> ["1200"]',
'conj -> ["AND"]',
'conditions -> ["(", conditions, ")"]',
"conditions -> [condition, conj, conditions]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["flight", ".", "from_airport"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["airport_service", ".", "airport_code"]',
"table_refs -> [table_name]",
'table_name -> ["airport_service"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["airport_service", ".", "city_code"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_code"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'BOSTON'\"]",
'conj -> ["AND"]',
"conditions -> [condition]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["flight", ".", "to_airport"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["airport_service", ".", "airport_code"]',
"table_refs -> [table_name]",
'table_name -> ["airport_service"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [in_clause]",
'in_clause -> [col_ref, "IN", query]',
'col_ref -> ["airport_service", ".", "city_code"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> [""]',
"select_results -> [col_refs]",
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_code"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", conditions]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'PITTSBURGH'\"]",
]
def test_atis_from_json(self):
line = json.loads(self.data[0])
for utterance_idx in range(len(line["interaction"])):
world = AtisWorld(
[
interaction["utterance"]
for interaction in line["interaction"][: utterance_idx + 1]
]
)
action_sequence = world.get_action_sequence(line["interaction"][utterance_idx]["sql"])
assert action_sequence is not None
def test_time_extraction(self):
approximate_times = get_approximate_times([1900])
assert approximate_times == [1830, 1930]
approximate_times = get_approximate_times([515])
assert approximate_times == [445, 545]
pm_times = [
pm_map_match_to_query_value(string)
for string in ["12pm", "1pm", "830pm", "1230pm", "115pm"]
]
assert pm_times == [[1200], [1300], [2030], [1230], [1315]]
def test_atis_helper_methods(self):
world = AtisWorld(
[
(
"what is the earliest flight in morning "
"1993 june fourth from boston to pittsburgh"
)
]
)
assert world.dates == [datetime(1993, 6, 4, 0, 0)]
assert world._get_numeric_database_values("time_range_end") == ["800", "1200"]
assert world._get_sequence_with_spacing(
world.grammar,
[
world.grammar["col_ref"],
Literal("BETWEEN"),
world.grammar["time_range_start"],
Literal("AND"),
world.grammar["time_range_end"],
],
) == Sequence(
world.grammar["col_ref"],
world.grammar["ws"],
Literal("BETWEEN"),
world.grammar["ws"],
world.grammar["time_range_start"],
world.grammar["ws"],
Literal("AND"),
world.grammar["ws"],
world.grammar["time_range_end"],
world.grammar["ws"],
)
world = AtisWorld(["i plan to travel on the tenth of 1993 july"])
assert world.dates == [datetime(1993, 7, 10, 0, 0)]
| allennlp-semparse-master | tests/parsimonious_languages/worlds/atis_world_test.py |
import sqlite3
from parsimonious import Grammar, ParseError
import pytest
from ... import SemparseTestCase
from allennlp_semparse.parsimonious_languages.worlds.text2sql_world import Text2SqlWorld
from allennlp_semparse.parsimonious_languages.contexts.sql_context_utils import (
format_grammar_string,
)
from allennlp_semparse.parsimonious_languages.contexts.sql_context_utils import SqlVisitor
class TestText2SqlWorld(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.schema = str(self.FIXTURES_ROOT / "data" / "text2sql" / "restaurants-schema.csv")
self.database_path = str(self.FIXTURES_ROOT / "data" / "text2sql" / "restaurants.db")
def test_untyped_grammar_has_no_string_or_number_references(self):
world = Text2SqlWorld(self.schema, use_untyped_entities=True)
grammar_dictionary = world.base_grammar_dictionary
for key, value in grammar_dictionary.items():
assert key not in {"number", "string"}
# We don't check for string directly here because
# string_set is a valid non-terminal.
assert all(["number" not in production for production in value])
assert all(["string)" not in production for production in value])
assert all(["string " not in production for production in value])
assert all(["(string " not in production for production in value])
def test_world_modifies_unconstrained_grammar_correctly(self):
world = Text2SqlWorld(self.schema)
grammar_dictionary = world.base_grammar_dictionary
assert grammar_dictionary["table_name"] == ['"RESTAURANT"', '"LOCATION"', '"GEOGRAPHIC"']
assert grammar_dictionary["column_name"] == [
'"STREET_NAME"',
'"RESTAURANT_ID"',
'"REGION"',
'"RATING"',
'"NAME"',
'"HOUSE_NUMBER"',
'"FOOD_TYPE"',
'"COUNTY"',
'"CITY_NAME"',
]
def test_world_modifies_grammar_with_global_values_for_dataset(self):
world = Text2SqlWorld(self.schema)
grammar_dictionary = world.base_grammar_dictionary
# Should have added 2.5 because it is a global value
# for the restaurants dataset.
assert grammar_dictionary["value"] == [
'"2.5"',
"parenval",
'"YEAR(CURDATE())"',
"number",
"boolean",
"function",
"col_ref",
"string",
]
def test_variable_free_world_cannot_parse_as_statements(self):
world = Text2SqlWorld(self.schema)
grammar_dictionary = world.base_grammar_dictionary
for productions in grammar_dictionary.items():
assert "AS" not in productions
sql_with_as = [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"LOCATION",
"AS",
"LOCATIONalias0",
",",
"RESTAURANT",
"WHERE",
"LOCATION",
".",
"CITY_NAME",
"=",
"'city_name0'",
"AND",
"RESTAURANT",
".",
"NAME",
"=",
"LOCATION",
".",
"RESTAURANT_ID",
"AND",
"RESTAURANT",
".",
"NAME",
"=",
"'name0'",
";",
]
grammar = Grammar(format_grammar_string(world.base_grammar_dictionary))
sql_visitor = SqlVisitor(grammar)
with pytest.raises(ParseError):
sql_visitor.parse(" ".join(sql_with_as))
sql = [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"LOCATION",
",",
"RESTAURANT",
"WHERE",
"LOCATION",
".",
"CITY_NAME",
"=",
"'city_name0'",
"AND",
"RESTAURANT",
".",
"NAME",
"=",
"LOCATION",
".",
"RESTAURANT_ID",
"AND",
"RESTAURANT",
".",
"NAME",
"=",
"'name0'",
";",
]
# Without the AS we should still be able to parse it.
sql_visitor = SqlVisitor(grammar)
sql_visitor.parse(" ".join(sql))
def test_grammar_from_world_can_parse_statements(self):
world = Text2SqlWorld(self.schema)
sql = [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"LOCATION",
",",
"RESTAURANT",
"WHERE",
"LOCATION",
".",
"CITY_NAME",
"=",
"'city_name0'",
"AND",
"RESTAURANT",
".",
"NAME",
"=",
"LOCATION",
".",
"RESTAURANT_ID",
"AND",
"RESTAURANT",
".",
"NAME",
"=",
"'name0'",
";",
]
grammar = Grammar(format_grammar_string(world.base_grammar_dictionary))
sql_visitor = SqlVisitor(grammar)
sql_visitor.parse(" ".join(sql))
def test_world_identifies_non_global_rules(self):
world = Text2SqlWorld(self.schema)
assert not world.is_global_rule("value -> [\"'food_type0'\"]")
def test_grammar_from_world_can_produce_entities_as_values(self):
world = Text2SqlWorld(self.schema)
sql = [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"LOCATION",
",",
"RESTAURANT",
"WHERE",
"LOCATION",
".",
"CITY_NAME",
"=",
"'city_name0'",
"AND",
"RESTAURANT",
".",
"NAME",
"=",
"LOCATION",
".",
"RESTAURANT_ID",
"AND",
"RESTAURANT",
".",
"NAME",
"=",
"'name0'",
";",
]
entities = {
"city_name0": {"text": "San fran", "type": "location"},
"name0": {"text": "Matt Gardinios Pizza", "type": "restaurant"},
}
action_sequence, actions = world.get_action_sequence_and_all_actions(sql, entities)
assert "string -> [\"'city_name0'\"]" in action_sequence
assert "string -> [\"'name0'\"]" in action_sequence
assert "string -> [\"'city_name0'\"]" in actions
assert "string -> [\"'name0'\"]" in actions
def test_world_adds_values_from_tables(self):
connection = sqlite3.connect(self.database_path)
cursor = connection.cursor()
world = Text2SqlWorld(self.schema, cursor=cursor, use_prelinked_entities=False)
assert world.base_grammar_dictionary["number"] == [
'"229"',
'"228"',
'"227"',
'"226"',
'"225"',
'"5"',
'"4"',
'"3"',
'"2"',
'"1"',
'"833"',
'"430"',
'"242"',
'"135"',
'"1103"',
]
assert world.base_grammar_dictionary["string"] == [
'"tommy\'s"',
'"rod\'s hickory pit restaurant"',
'"lyons restaurant"',
'"jamerican cuisine"',
'"denny\'s restaurant"',
'"american"',
'"vallejo"',
'"w. el camino real"',
'"el camino real"',
'"e. el camino real"',
'"church st"',
'"broadway"',
'"sunnyvale"',
'"san francisco"',
'"san carlos"',
'"american canyon"',
'"alviso"',
'"albany"',
'"alamo"',
'"alameda"',
'"unknown"',
'"santa clara county"',
'"contra costa county"',
'"alameda county"',
'"bay area"',
]
| allennlp-semparse-master | tests/parsimonious_languages/worlds/text2sql_world_test.py |
allennlp-semparse-master | tests/parsimonious_languages/executors/__init__.py |
|
from ... import SemparseTestCase
from allennlp_semparse.parsimonious_languages.executors import SqlExecutor
class TestSqlExecutor(SemparseTestCase):
def setup_method(self):
super().setup_method()
self._database_file = "https://allennlp.s3.amazonaws.com/datasets/atis/atis.db"
def test_sql_accuracy_is_scored_correctly(self):
sql_query_label = (
"( SELECT airport_service . airport_code "
"FROM airport_service "
"WHERE airport_service . city_code IN ( "
"SELECT city . city_code FROM city "
"WHERE city.city_name = 'BOSTON' ) ) ;"
)
executor = SqlExecutor(self._database_file)
postprocessed_sql_query_label = executor.postprocess_query_sqlite(sql_query_label)
# If the predicted query and the label are the same, then we should get 1.
assert (
executor.evaluate_sql_query(
postprocessed_sql_query_label, [postprocessed_sql_query_label]
)
== 1
)
predicted_sql_query = (
"( SELECT airport_service . airport_code "
"FROM airport_service "
"WHERE airport_service . city_code IN ( "
"SELECT city . city_code FROM city "
"WHERE city.city_name = 'SEATTLE' ) ) ;"
)
postprocessed_predicted_sql_query = executor.postprocess_query_sqlite(predicted_sql_query)
# If the predicted query and the label are different we should get 0.
assert (
executor.evaluate_sql_query(
postprocessed_predicted_sql_query, [postprocessed_sql_query_label]
)
== 0
)
| allennlp-semparse-master | tests/parsimonious_languages/executors/sql_executor_test.py |
import json
import os
from .. import SemparseTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
class TestNlvrParserPredictor(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.inputs = {
"worlds": [
[
[
{
"y_loc": 80,
"type": "triangle",
"color": "#0099ff",
"x_loc": 80,
"size": 20,
}
],
[{"y_loc": 80, "type": "square", "color": "Yellow", "x_loc": 13, "size": 20}],
[{"y_loc": 67, "type": "triangle", "color": "Yellow", "x_loc": 35, "size": 10}],
],
[
[{"y_loc": 8, "type": "square", "color": "Yellow", "x_loc": 57, "size": 30}],
[{"y_loc": 43, "type": "square", "color": "#0099ff", "x_loc": 70, "size": 30}],
[{"y_loc": 59, "type": "square", "color": "Yellow", "x_loc": 47, "size": 10}],
],
],
"identifier": "fake_id",
"sentence": "Each grey box contains atleast one yellow object touching the edge",
}
def test_predictor_with_coverage_parser(self):
archive_dir = self.FIXTURES_ROOT / "nlvr_coverage_semantic_parser" / "serialization"
archive = load_archive(os.path.join(archive_dir, "model.tar.gz"))
predictor = Predictor.from_archive(archive, "nlvr-parser")
result = predictor.predict_json(self.inputs)
assert "logical_form" in result
assert "denotations" in result
# result['denotations'] is a list corresponding to k-best logical forms, where k is 1 by
# default.
assert len(result["denotations"][0]) == 2 # Because there are two worlds in the input.
def test_predictor_with_direct_parser(self):
archive_dir = self.FIXTURES_ROOT / "nlvr_direct_semantic_parser" / "serialization"
archive = load_archive(os.path.join(archive_dir, "model.tar.gz"))
predictor = Predictor.from_archive(archive, "nlvr-parser")
result = predictor.predict_json(self.inputs)
assert "logical_form" in result
assert "denotations" in result
# result['denotations'] is a list corresponding to k-best logical forms, where k is 1 by
# default.
assert len(result["denotations"][0]) == 2 # Because there are two worlds in the input.
def test_predictor_with_string_input(self):
archive_dir = self.FIXTURES_ROOT / "nlvr_coverage_semantic_parser" / "serialization"
archive = load_archive(os.path.join(archive_dir, "model.tar.gz"))
predictor = Predictor.from_archive(archive, "nlvr-parser")
self.inputs["worlds"] = json.dumps(self.inputs["worlds"])
result = predictor.predict_json(self.inputs)
assert "logical_form" in result
assert "denotations" in result
# result['denotations'] is a list corresponding to k-best logical forms, where k is 1 by
# default.
assert len(result["denotations"][0]) == 2 # Because there are two worlds in the input.
def test_predictor_with_single_world(self):
archive_dir = self.FIXTURES_ROOT / "nlvr_coverage_semantic_parser" / "serialization"
archive = load_archive(os.path.join(archive_dir, "model.tar.gz"))
predictor = Predictor.from_archive(archive, "nlvr-parser")
self.inputs["structured_rep"] = self.inputs["worlds"][0]
del self.inputs["worlds"]
result = predictor.predict_json(self.inputs)
assert "logical_form" in result
assert "denotations" in result
# result['denotations'] is a list corresponding to k-best logical forms, where k is 1 by
# default.
assert len(result["denotations"][0]) == 1 # Because there is one world in the input.
def test_predictor_with_single_world_and_string_input(self):
archive_dir = self.FIXTURES_ROOT / "nlvr_coverage_semantic_parser" / "serialization"
archive = load_archive(os.path.join(archive_dir, "model.tar.gz"))
predictor = Predictor.from_archive(archive, "nlvr-parser")
self.inputs["structured_rep"] = json.dumps(self.inputs["worlds"][0])
del self.inputs["worlds"]
result = predictor.predict_json(self.inputs)
assert "logical_form" in result
assert "denotations" in result
# result['denotations'] is a list corresponding to k-best logical forms, where k is 1 by
# default.
assert len(result["denotations"][0]) == 1 # Because there is one world in the input.
| allennlp-semparse-master | tests/predictors/nlvr_parser_test.py |
allennlp-semparse-master | tests/predictors/__init__.py |
|
import pytest
from .. import SemparseTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
class TestWikiTablesParserPredictor(SemparseTestCase):
def test_uses_named_inputs(self):
inputs = {"question": "names", "table": "name\tdate\nmatt\t2017\npradeep\t2018"}
archive_path = self.FIXTURES_ROOT / "wikitables" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "wikitables-parser")
result = predictor.predict_json(inputs)
action_sequence = result.get("best_action_sequence")
if action_sequence:
# We don't currently disallow endless loops in the decoder, and an untrained seq2seq
# model will easily get itself into a loop. An endless loop isn't a finished logical
# form, so decoding doesn't return any finished states, which means no actions. So,
# sadly, we don't have a great test here. This is just testing that the predictor
# runs, basically.
assert len(action_sequence) > 1
assert all([isinstance(action, str) for action in action_sequence])
logical_form = result.get("logical_form")
assert logical_form is not None
def test_answer_present(self):
inputs = {
"question": "Who is 18 years old?",
"table": "Name\tAge\nShallan\t16\nKaladin\t18",
}
archive_path = self.FIXTURES_ROOT / "wikitables" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "wikitables-parser")
result = predictor.predict_json(inputs)
answer = result.get("answer")
assert answer is not None
def test_interactive_beam_search(self):
inputs = {
"question": "Who is 18 years old?",
"table": "Name\tAge\nShallan\t16\nKaladin\t18",
}
archive_path = self.FIXTURES_ROOT / "wikitables" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "wikitables-parser")
# This is not the start of the best sequence, but it will be once we force it.
initial_tokens = [
"@start@ -> Number",
"Number -> [<List[Row],NumberColumn:Number>, List[Row], NumberColumn]",
]
# First let's try an unforced one. Its initial tokens should not be ours.
result = predictor.predict_json(inputs)
best_action_sequence = result["best_action_sequence"]
assert best_action_sequence
assert best_action_sequence[:2] != initial_tokens
# Now let's try forcing it down the path of `initial_sequence`
inputs["initial_sequence"] = initial_tokens
result = predictor.predict_json(inputs)
best_action_sequence = result["best_action_sequence"]
assert best_action_sequence[:2] == initial_tokens
# Should get choices back from beam search
beam_search_choices = result["choices"]
# Make sure that our forced choices appear as beam_search_choices.
for choices, initial_token in zip(beam_search_choices, initial_tokens):
assert any(token == initial_token for _, token in choices)
# Should get back beams too
beam_snapshots = result["beam_snapshots"]
assert len(beam_snapshots) == 1
assert 0 in beam_snapshots
beams = beam_snapshots[0]
for idx, (beam, action) in enumerate(zip(beams, best_action_sequence)):
# First beam should have 1-element sequences, etc...
assert all(len(sequence) == idx + 1 for _, sequence in beam)
assert any(sequence[-1] == action for _, sequence in beam)
def test_answer_present_with_batch_predict(self):
inputs = [
{"question": "Who is 18 years old?", "table": "Name\tAge\nShallan\t16\nKaladin\t18"}
]
archive_path = self.FIXTURES_ROOT / "wikitables" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "wikitables-parser")
result = predictor.predict_batch_json(inputs)
answer = result[0].get("answer")
assert answer is not None
| allennlp-semparse-master | tests/predictors/wikitables_parser_test.py |
from flaky import flaky
from .. import SemparseTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
class TestAtisParserPredictor(SemparseTestCase):
@flaky
def test_atis_parser_uses_named_inputs(self):
inputs = {"utterance": "show me the flights to seattle"}
archive_path = self.FIXTURES_ROOT / "atis" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "atis-parser")
result = predictor.predict_json(inputs)
action_sequence = result.get("best_action_sequence")
if action_sequence:
# An untrained model will likely get into a loop, and not produce at finished states.
# When the model gets into a loop it will not produce any valid SQL, so we don't get
# any actions. This basically just tests if the model runs.
assert len(action_sequence) > 1
assert all([isinstance(action, str) for action in action_sequence])
predicted_sql_query = result.get("predicted_sql_query")
assert predicted_sql_query is not None
@flaky
def test_atis_parser_predicted_sql_present(self):
inputs = {"utterance": "show me flights to seattle"}
archive_path = self.FIXTURES_ROOT / "atis" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "atis-parser")
result = predictor.predict_json(inputs)
predicted_sql_query = result.get("predicted_sql_query")
assert predicted_sql_query is not None
@flaky
def test_atis_parser_batch_predicted_sql_present(self):
inputs = [{"utterance": "show me flights to seattle"}]
archive_path = self.FIXTURES_ROOT / "atis" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "atis-parser")
result = predictor.predict_batch_json(inputs)
predicted_sql_query = result[0].get("predicted_sql_query")
assert predicted_sql_query is not None
| allennlp-semparse-master | tests/predictors/atis_parser_test.py |
from .. import ModelTestCase
from allennlp_semparse.state_machines.states import GrammarStatelet
from allennlp_semparse.models.text2sql_parser import Text2SqlParser
from allennlp_semparse.parsimonious_languages.worlds.text2sql_world import Text2SqlWorld
class TestText2SqlParser(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
str(self.FIXTURES_ROOT / "text2sql" / "experiment.json"),
str(self.FIXTURES_ROOT / "data" / "text2sql" / "restaurants_tiny.json"),
)
self.schema = str(self.FIXTURES_ROOT / "data" / "text2sql" / "restaurants-schema.csv")
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_grammar_statelet(self):
valid_actions = None
world = Text2SqlWorld(self.schema)
sql = ["SELECT", "COUNT", "(", "*", ")", "FROM", "LOCATION", ",", "RESTAURANT", ";"]
action_sequence, valid_actions = world.get_action_sequence_and_all_actions(sql)
grammar_state = GrammarStatelet(
["statement"], valid_actions, Text2SqlParser.is_nonterminal, reverse_productions=True
)
for action in action_sequence:
grammar_state = grammar_state.take_action(action)
assert grammar_state._nonterminal_stack == []
| allennlp-semparse-master | tests/models/text2sql_parser_test.py |
allennlp-semparse-master | tests/models/__init__.py |
|
from flaky import flaky
from ... import ModelTestCase
class TestWikiTablesVariableFreeErm(ModelTestCase):
def setup_method(self):
super().setup_method()
config_path = self.FIXTURES_ROOT / "wikitables" / "experiment-erm.json"
data_path = self.FIXTURES_ROOT / "data" / "wikitables" / "sample_data.examples"
self.set_up_model(config_path, data_path)
@flaky
def test_model_can_train_save_and_load(self):
# We have very few embedded actions on our agenda, and so it's rare that this parameter
# actually gets used. We know this parameter works from our NLVR ERM test, so it's easier
# to just ignore it here than to try to finagle the test to make it so this has a non-zero
# gradient.
ignore = {"_decoder_step._checklist_multiplier"}
self.ensure_model_can_train_save_and_load(self.param_file, gradients_to_ignore=ignore)
| allennlp-semparse-master | tests/models/wikitables/wikitables_erm_semantic_parser_test.py |
allennlp-semparse-master | tests/models/wikitables/__init__.py |
|
from collections import namedtuple
from flaky import flaky
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from ... import ModelTestCase
class TestWikiTablesMmlSemanticParser(ModelTestCase):
def setup_method(self):
super().setup_method()
print(self.FIXTURES_ROOT)
config_path = self.FIXTURES_ROOT / "wikitables" / "experiment.json"
data_path = self.FIXTURES_ROOT / "data" / "wikitables" / "sample_data.examples"
self.set_up_model(config_path, data_path)
@flaky
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_make_output_human_readable(self):
model_batch = self.dataset.as_tensor_dict(self.dataset.get_padding_lengths())
self.model.training = False
forward_output = self.model(**model_batch)
decode_output = self.model.make_output_human_readable(forward_output)
assert "predicted_actions" in decode_output
def test_get_neighbor_indices(self):
worlds, num_entities = self.get_fake_worlds()
tensor = torch.LongTensor([])
neighbor_indices = self.model._get_neighbor_indices(worlds, num_entities, tensor)
# Checks for the correct shape meaning dimension 2 has size num_neighbors,
# padding of -1 is used, and correct neighbor indices.
assert_almost_equal(
neighbor_indices.data.numpy(),
[
[[-1, -1], [4, -1], [4, -1], [5, -1], [1, 2], [3, -1]],
[[-1, -1], [2, -1], [1, -1], [-1, -1], [-1, -1], [-1, -1]],
],
)
def test_get_type_vector(self):
worlds, num_entities = self.get_fake_worlds()
tensor = torch.LongTensor([])
type_vector, _ = self.model._get_type_vector(worlds, num_entities, tensor)
# Verify that the appropriate types are present and padding used for non existent entities.
assert_almost_equal(type_vector.data.numpy(), [[0, 0, 0, 3, 1, 4], [0, 0, 1, 0, 0, 0]])
def test_get_linking_probabilities(self):
worlds, num_entities = self.get_fake_worlds()
# (batch_size, num_question_tokens, num_entities)
linking_scores = [
[[-2, 1, 0, -3, 2, -2], [4, -1, 5, -3, 4, 3]],
[[0, 1, 8, 10, 10, 4], [3, 2, -1, -2, 1, -6]],
]
linking_scores = torch.FloatTensor(linking_scores)
question_mask = torch.LongTensor([[1, 1], [1, 0]])
_, entity_type_dict = self.model._get_type_vector(worlds, num_entities, linking_scores)
# (batch_size, num_question_tokens, num_entities)
entity_probability = self.model._get_linking_probabilities(
worlds, linking_scores, question_mask, entity_type_dict
)
# The following properties in entity_probability are tested for by true_probability:
# (1) It has all 0.0 probabilities when there is no question token, as seen for the
# second word in the second batch.
# (2) It has 0.0 probabilities when an entity is masked, as seen in the last three entities
# for the second batch instance.
# (3) The probabilities for entities of the same type with the same question token should
# sum to at most 1, but not necessarily 1, because some probability mass goes to the
# null entity. We have four entity types here, so each row should sum to at most 4,
# and that number will approach 4 as the unnormalized linking scores for each entity
# get higher.
true_probability = [
[
[0.02788338, 0.56005275, 0.2060319, 0.880797, 0.04742587, 0.11920291],
[0.26714143, 0.00179998, 0.7261657, 0.98201376, 0.04742587, 0.95257413],
],
[[0.21194156, 0.57611686, 0.99966466, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
]
assert_almost_equal(entity_probability.detach().cpu().numpy(), true_probability)
def get_fake_worlds(self):
# Generate a toy WikitablesWorld.
FakeTable = namedtuple("FakeTable", ["entities", "neighbors"])
FakeWorld = namedtuple("FakeWorld", ["table_graph"])
entities = [
["-1", "2010", "2012", "string:bmw", "date_column:year", "string_column:make"],
["-1", "2012", "date_column:year"],
]
neighbors = [
{
"2010": ["date_column:year"],
"2012": ["date_column:year"],
"string:bmw": ["string_column:make"],
"date_column:year": ["2010", "2012"],
"string_column:make": ["string:bmw"],
"-1": [],
},
{"2012": ["date_column:year"], "date_column:year": ["2012"], "-1": []},
]
worlds = [
FakeWorld(FakeTable(entity_list, entity2neighbors))
for entity_list, entity2neighbors in zip(entities, neighbors)
]
num_entities = max([len(entity_list) for entity_list in entities])
return worlds, num_entities
| allennlp-semparse-master | tests/models/wikitables/wikitables_mml_semantic_parser_test.py |
allennlp-semparse-master | tests/models/nlvr/__init__.py |
|
from ... import ModelTestCase
class TestNlvrDirectSemanticParser(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
self.FIXTURES_ROOT / "nlvr_direct_semantic_parser" / "experiment.json",
self.FIXTURES_ROOT / "data" / "nlvr" / "sample_processed_data.jsonl",
)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
| allennlp-semparse-master | tests/models/nlvr/nlvr_direct_semantic_parser_test.py |
from numpy.testing import assert_almost_equal
import torch
import pytest
from allennlp.common import Params
from ... import ModelTestCase
from allennlp.data import Vocabulary
from allennlp.models import Model
from allennlp.models.archival import load_archive
class TestNlvrCoverageSemanticParser(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
self.FIXTURES_ROOT / "nlvr_coverage_semantic_parser" / "experiment.json",
self.FIXTURES_ROOT / "data" / "nlvr" / "sample_grouped_data.jsonl",
)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_ungrouped_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(
self.FIXTURES_ROOT / "nlvr_coverage_semantic_parser" / "ungrouped_experiment.json"
)
def test_mml_initialized_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(
self.FIXTURES_ROOT / "nlvr_coverage_semantic_parser" / "mml_init_experiment.json"
)
def test_get_checklist_info(self):
# Creating a fake all_actions field where actions 0, 2 and 4 are terminal productions.
all_actions = [
("<Set[Object]:Set[Object]> -> top", True, None),
("fake_action", True, None),
("Color -> color_black", True, None),
("fake_action2", True, None),
("int -> 6", True, None),
]
# Of the actions above, those at indices 0 and 4 are on the agenda, and there are padding
# indices at the end.
test_agenda = torch.Tensor([[0], [4], [-1], [-1]])
checklist_info = self.model._get_checklist_info(test_agenda, all_actions)
target_checklist, terminal_actions, checklist_mask = checklist_info
assert_almost_equal(target_checklist.data.numpy(), [[1], [0], [1]])
assert_almost_equal(terminal_actions.data.numpy(), [[0], [2], [4]])
assert_almost_equal(checklist_mask.data.numpy(), [[1], [1], [1]])
def test_initialize_weights_from_archive(self):
original_model_parameters = self.model.named_parameters()
original_model_weights = {
name: parameter.data.clone().numpy() for name, parameter in original_model_parameters
}
mml_model_archive_file = (
self.FIXTURES_ROOT / "nlvr_direct_semantic_parser" / "serialization" / "model.tar.gz"
)
archive = load_archive(mml_model_archive_file)
archived_model_parameters = archive.model.named_parameters()
self.model._initialize_weights_from_archive(archive)
changed_model_parameters = dict(self.model.named_parameters())
for name, archived_parameter in archived_model_parameters:
archived_weight = archived_parameter.data.numpy()
original_weight = original_model_weights[name]
changed_weight = changed_model_parameters[name].data.numpy()
# We want to make sure that the weights in the original model have indeed been changed
# after a call to ``_initialize_weights_from_archive``.
with pytest.raises(AssertionError, match="Arrays are not almost equal"):
assert_almost_equal(original_weight, changed_weight)
# This also includes the sentence token embedder. Those weights will be the same
# because the two models have the same vocabulary.
assert_almost_equal(archived_weight, changed_weight)
def test_get_vocab_index_mapping(self):
mml_model_archive_file = (
self.FIXTURES_ROOT / "nlvr_direct_semantic_parser" / "serialization" / "model.tar.gz"
)
archive = load_archive(mml_model_archive_file)
mapping = self.model._get_vocab_index_mapping(archive.model.vocab)
expected_mapping = [(i, i) for i in range(16)]
assert mapping == expected_mapping
new_vocab = Vocabulary()
def copy_token_at_index(i):
token = self.vocab.get_token_from_index(i, "tokens")
new_vocab.add_token_to_namespace(token, "tokens")
copy_token_at_index(5)
copy_token_at_index(7)
copy_token_at_index(10)
mapping = self.model._get_vocab_index_mapping(new_vocab)
# Mapping of indices from model vocabulary to new vocabulary. 0 and 1 are padding and unk
# tokens.
assert mapping == [(0, 0), (1, 1), (5, 2), (7, 3), (10, 4)]
| allennlp-semparse-master | tests/models/nlvr/nlvr_coverage_semantic_parser_test.py |
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp_semparse.models.atis.atis_semantic_parser import AtisSemanticParser
from allennlp_semparse.parsimonious_languages.worlds import AtisWorld
from allennlp_semparse.state_machines.states import GrammarStatelet
from ... import SemparseTestCase
class TestAtisGrammarStatelet(SemparseTestCase):
def test_atis_grammar_statelet(self):
world = AtisWorld(
[("give me all flights from boston to " "philadelphia next week arriving after lunch")]
)
action_sequence = [
'statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> ["DISTINCT"]',
"select_results -> [col_refs]",
'col_refs -> [col_ref, ",", col_refs]',
'col_ref -> ["city", ".", "city_code"]',
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_name"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'BOSTON'\"]",
]
grammar_state = GrammarStatelet(
["statement"], world.valid_actions, AtisSemanticParser.is_nonterminal
)
for action in action_sequence:
grammar_state = grammar_state.take_action(action)
assert grammar_state._nonterminal_stack == []
| allennlp-semparse-master | tests/models/atis/atis_grammar_statelet_test.py |
allennlp-semparse-master | tests/models/atis/__init__.py |
|
from flaky import flaky
from ... import ModelTestCase
from allennlp_semparse.parsimonious_languages.contexts.sql_context_utils import (
action_sequence_to_sql,
)
class TestAtisSemanticParser(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
str(self.FIXTURES_ROOT / "atis" / "experiment.json"),
str(self.FIXTURES_ROOT / "data" / "atis" / "sample.json"),
)
@flaky
def test_atis_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_action_sequence_to_sql(self):
action_sequence = [
'statement -> [query, ";"]',
'query -> ["(", "SELECT", distinct, select_results, "FROM", table_refs, '
'where_clause, ")"]',
'distinct -> ["DISTINCT"]',
"select_results -> [col_refs]",
'col_refs -> [col_ref, ",", col_refs]',
'col_ref -> ["city", ".", "city_code"]',
"col_refs -> [col_ref]",
'col_ref -> ["city", ".", "city_name"]',
"table_refs -> [table_name]",
'table_name -> ["city"]',
'where_clause -> ["WHERE", "(", conditions, ")"]',
"conditions -> [condition]",
"condition -> [biexpr]",
'biexpr -> ["city", ".", "city_name", binaryop, city_city_name_string]',
'binaryop -> ["="]',
"city_city_name_string -> [\"'BOSTON'\"]",
]
sql_query = action_sequence_to_sql(action_sequence)
assert (
sql_query == "( SELECT DISTINCT city . city_code , city . city_name "
"FROM city WHERE ( city . city_name = 'BOSTON' ) ) ;"
)
| allennlp-semparse-master | tests/models/atis/atis_semantic_parser_test.py |
allennlp-semparse-master | tests/models/quarel/__init__.py |
|
"""
We define a simple deterministic decoder here, that takes steps to add integers to list. At
each step, the decoder takes the last integer in the list, and adds either 1 or 2 to produce the
next element that will be added to the list. We initialize the list with the value 0 (or whatever
you pick), and we say that a sequence is finished when the last element is 4. We define the score
of a state as the negative of the number of elements (excluding the initial value) in the action
history.
"""
from collections import defaultdict
from typing import List, Set, Dict
import torch
from allennlp_semparse.state_machines import State, TransitionFunction
class SimpleState(State["SimpleState"]):
def __init__(
self,
batch_indices: List[int],
action_history: List[List[int]],
score: List[torch.Tensor],
start_values: List[int] = None,
) -> None:
super().__init__(batch_indices, action_history, score)
self.start_values = start_values or [0] * len(batch_indices)
def is_finished(self) -> bool:
return self.action_history[0][-1] == 4
@classmethod
def combine_states(cls, states) -> "SimpleState":
batch_indices = [batch_index for state in states for batch_index in state.batch_indices]
action_histories = [
action_history for state in states for action_history in state.action_history
]
scores = [score for state in states for score in state.score]
start_values = [start_value for state in states for start_value in state.start_values]
return SimpleState(batch_indices, action_histories, scores, start_values)
def __repr__(self):
return f"{self.action_history}"
class SimpleTransitionFunction(TransitionFunction[SimpleState]):
def __init__(
self, valid_actions: Set[int] = None, include_value_in_score: bool = False
) -> None:
# The default allowed actions are adding 1 or 2 to the last element.
self._valid_actions = valid_actions or {1, 2}
# If True, we will add a small multiple of the action take to the score, to encourage
# getting higher numbers first (and to differentiate action sequences).
self._include_value_in_score = include_value_in_score
def take_step(
self, state: SimpleState, max_actions: int = None, allowed_actions: List[Set] = None
) -> List[SimpleState]:
indexed_next_states: Dict[int, List[SimpleState]] = defaultdict(list)
if not allowed_actions:
allowed_actions = [None] * len(state.batch_indices)
for batch_index, action_history, score, start_value, actions in zip(
state.batch_indices,
state.action_history,
state.score,
state.start_values,
allowed_actions,
):
prev_action = action_history[-1] if action_history else start_value
for action in self._valid_actions:
next_item = int(prev_action + action)
if actions and next_item not in actions:
continue
new_history = action_history + [next_item]
# For every action taken, we reduce the score by 1.
new_score = score - 1
if self._include_value_in_score:
new_score += 0.01 * next_item
new_state = SimpleState([batch_index], [new_history], [new_score])
indexed_next_states[batch_index].append(new_state)
next_states: List[SimpleState] = []
for batch_next_states in indexed_next_states.values():
sorted_next_states = [(-state.score[0].data[0], state) for state in batch_next_states]
sorted_next_states.sort(key=lambda x: x[0])
if max_actions is not None:
sorted_next_states = sorted_next_states[:max_actions]
next_states.extend(state[1] for state in sorted_next_states)
return next_states
| allennlp-semparse-master | tests/state_machines/simple_transition_system.py |
import torch
from .. import SemparseTestCase
from allennlp_semparse.state_machines import ConstrainedBeamSearch
from .simple_transition_system import SimpleState, SimpleTransitionFunction
class TestConstrainedBeamSearch(SemparseTestCase):
def test_search(self):
# The simple transition system starts at some number, adds one or two at each state, and
# tries to get to 4. The highest scoring path has the shortest length and the highest
# numbers (so always add two, unless you're at 3). From -3, there are lots of possible
# sequences: [-2, -1, 0, 1, 2, 3, 4], [-1, 1, 3, 4], ... We'll specify a few of those up
# front as "allowed", and use that to test the constrained beam search implementation.
initial_state = SimpleState([0], [[]], [torch.Tensor([0.0])], [-3])
beam_size = 3
allowed_sequences = torch.Tensor(
[
[
[-2, -1, 0, 1, 2, 3, 4],
[-2, 0, 2, 4, -1, -1, -1],
[-1, 1, 3, 4, -1, -1, -1],
[-2, -1, 0, 1, 2, 4, -1],
[-1, 0, 1, 2, 3, 4, -1],
[-1, 1, 2, 3, 4, -1, -1],
]
]
)
mask = torch.Tensor(
[
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0],
]
]
)
beam_search = ConstrainedBeamSearch(beam_size, allowed_sequences, mask)
# Including the value in the score will make us pick states that have higher numbers first.
# So with a beam size of 3, we'll get all of the states that start with `-1` after the
# first step, even though in the end one of the states that starts with `-2` is better than
# two of the states that start with `-1`.
decoder_step = SimpleTransitionFunction(include_value_in_score=True)
best_states = beam_search.search(initial_state, decoder_step)
assert len(best_states) == 1
assert best_states[0][0].action_history[0] == [-1, 1, 3, 4]
assert best_states[0][1].action_history[0] == [-1, 1, 2, 3, 4]
assert best_states[0][2].action_history[0] == [-1, 0, 1, 2, 3, 4]
# With a beam size of 6, we should get the other allowed path of length 4 as the second
# best result.
beam_size = 6
beam_search = ConstrainedBeamSearch(beam_size, allowed_sequences, mask)
decoder_step = SimpleTransitionFunction(include_value_in_score=True)
best_states = beam_search.search(initial_state, decoder_step)
assert len(best_states) == 1
assert best_states[0][0].action_history[0] == [-1, 1, 3, 4]
assert best_states[0][1].action_history[0] == [-2, 0, 2, 4]
| allennlp-semparse-master | tests/state_machines/constrained_beam_search_test.py |
allennlp-semparse-master | tests/state_machines/__init__.py |
|
import torch
from .. import SemparseTestCase
from allennlp_semparse.state_machines import util
class TestStateMachinesUtil(SemparseTestCase):
def test_create_allowed_transitions(self):
targets = torch.Tensor(
[[[2, 3, 4], [1, 3, 4], [1, 2, 4]], [[3, 4, 0], [2, 3, 4], [0, 0, 0]]]
)
target_mask = torch.Tensor(
[[[1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 0], [1, 1, 1], [0, 0, 0]]]
)
prefix_tree = util.construct_prefix_tree(targets, target_mask)
# There were two instances in this batch.
assert len(prefix_tree) == 2
# The first instance had six valid action sequence prefixes.
assert len(prefix_tree[0]) == 6
assert prefix_tree[0][()] == {1, 2}
assert prefix_tree[0][(1,)] == {2, 3}
assert prefix_tree[0][(1, 2)] == {4}
assert prefix_tree[0][(1, 3)] == {4}
assert prefix_tree[0][(2,)] == {3}
assert prefix_tree[0][(2, 3)] == {4}
# The second instance had four valid action sequence prefixes.
assert len(prefix_tree[1]) == 4
assert prefix_tree[1][()] == {2, 3}
assert prefix_tree[1][(2,)] == {3}
assert prefix_tree[1][(2, 3)] == {4}
assert prefix_tree[1][(3,)] == {4}
| allennlp-semparse-master | tests/state_machines/util_test.py |
import torch
from allennlp.common import Params
from .. import SemparseTestCase
from allennlp_semparse.state_machines import BeamSearch
from .simple_transition_system import SimpleState, SimpleTransitionFunction
class TestBeamSearch(SemparseTestCase):
def test_search(self):
beam_search = BeamSearch.from_params(Params({"beam_size": 4}))
initial_state = SimpleState(
[0, 1, 2, 3],
[[], [], [], []],
[torch.Tensor([0.0]), torch.Tensor([0.0]), torch.Tensor([0.0]), torch.Tensor([0.0])],
[-3, 1, -20, 5],
)
decoder_step = SimpleTransitionFunction(include_value_in_score=True)
best_states = beam_search.search(
5, initial_state, decoder_step, keep_final_unfinished_states=False
)
# Instance with batch index 2 needed too many steps to finish, and batch index 3 had no
# path to get to a finished state. (See the simple transition system definition; goal is
# to end up at 4, actions are either add one or two to starting value.)
assert len(best_states) == 2
assert best_states[0][0].action_history[0] == [-1, 1, 3, 4]
assert best_states[1][0].action_history[0] == [3, 4]
best_states = beam_search.search(
5, initial_state, decoder_step, keep_final_unfinished_states=True
)
# Now we're keeping final unfinished states, which allows a "best state" for the instances
# that didn't have one before. Our previous best states for the instances that finish
# doesn't change, because the score for taking another step is always negative at these
# values.
assert len(best_states) == 4
assert best_states[0][0].action_history[0] == [-1, 1, 3, 4]
assert best_states[1][0].action_history[0] == [3, 4]
assert best_states[2][0].action_history[0] == [-18, -16, -14, -12, -10]
assert best_states[3][0].action_history[0] == [7, 9, 11, 13, 15]
def test_constraints(self):
# The simple transition system starts at some number, adds one or two at each state, and
# tries to get to 4. The highest scoring path has the shortest length and the highest
# numbers (so always add two, unless you're at 3). From -3, there are lots of possible
# sequences: [-2, -1, 0, 1, 2, 3, 4], [-1, 1, 3, 4], ... We'll specify a few of those up
# front as "allowed", and use that to test the constrained beam search implementation.
initial_state = SimpleState([0], [[]], [torch.Tensor([0.0])], [-3])
beam_size = 3
initial_sequence = torch.Tensor([-2, -1, 0, 1])
beam_search = BeamSearch(beam_size, initial_sequence=initial_sequence)
decoder_step = SimpleTransitionFunction(include_value_in_score=True)
best_states = beam_search.search(7, initial_state, decoder_step)
assert len(best_states) == 1
# After the constraint runs out, we generate [3], [2],
# then we generate [3, 5], [3, 4], [2, 4], the latter two of which are finished,
# then we generate [3, 5, 7], [3, 5, 6], and we're out of steps, so we keep the former
assert best_states[0][0].action_history[0] == [-2, -1, 0, 1, 3, 4]
assert best_states[0][1].action_history[0] == [-2, -1, 0, 1, 2, 4]
assert best_states[0][2].action_history[0] == [-2, -1, 0, 1, 3, 5, 7]
# Now set the beam size to 6, we generate [3], [2]
# then [3, 5], [2, 3], [3, 4], [2, 4] (the latter two of which are finished)
# then [3, 5, 6], [3, 5, 7], [2, 3, 5], [2, 3, 4] (the last is finished)
beam_size = 6
beam_search = BeamSearch(
beam_size, initial_sequence=initial_sequence, keep_beam_details=True
)
decoder_step = SimpleTransitionFunction(include_value_in_score=True)
best_states = beam_search.search(
7, initial_state, decoder_step, keep_final_unfinished_states=False
)
assert len(best_states) == 1
assert len(best_states[0]) == 3
assert best_states[0][0].action_history[0] == [-2, -1, 0, 1, 3, 4]
assert best_states[0][1].action_history[0] == [-2, -1, 0, 1, 2, 4]
assert best_states[0][2].action_history[0] == [-2, -1, 0, 1, 2, 3, 4]
# Check that beams are correct
best_action_sequence = best_states[0][0].action_history[0]
beam_snapshots = beam_search.beam_snapshots
assert len(beam_snapshots) == 1
beam_snapshots0 = beam_snapshots.get(0)
assert beam_snapshots0 is not None
for i, beam in enumerate(beam_snapshots0):
assert all(len(sequence) == i + 1 for _, sequence in beam)
if i < len(best_action_sequence):
assert any(sequence[-1] == best_action_sequence[i] for _, sequence in beam)
| allennlp-semparse-master | tests/state_machines/beam_search_test.py |
allennlp-semparse-master | tests/state_machines/trainers/__init__.py |
|
import math
from numpy.testing import assert_almost_equal
import torch
from ... import SemparseTestCase
from allennlp_semparse.state_machines.trainers import MaximumMarginalLikelihood
from ..simple_transition_system import SimpleState, SimpleTransitionFunction
class TestMaximumMarginalLikelihood(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.initial_state = SimpleState(
[0, 1], [[], []], [torch.Tensor([0.0]), torch.Tensor([0.0])], [0, 1]
)
self.decoder_step = SimpleTransitionFunction()
self.targets = torch.Tensor(
[[[2, 3, 4], [1, 3, 4], [1, 2, 4]], [[3, 4, 0], [2, 3, 4], [0, 0, 0]]]
)
self.target_mask = torch.Tensor(
[[[1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 0], [1, 1, 1], [0, 0, 0]]]
)
self.supervision = (self.targets, self.target_mask)
# High beam size ensures exhaustive search.
self.trainer = MaximumMarginalLikelihood()
def test_decode(self):
decoded_info = self.trainer.decode(self.initial_state, self.decoder_step, self.supervision)
# Our loss is the negative log sum of the scores from each target sequence. The score for
# each sequence in our simple transition system is just `-sequence_length`.
instance0_loss = math.log(math.exp(-3) * 3) # all three sequences have length 3
instance1_loss = math.log(math.exp(-2) + math.exp(-3)) # one has length 2, one has length 3
expected_loss = -(instance0_loss + instance1_loss) / 2
assert_almost_equal(decoded_info["loss"].data.numpy(), expected_loss)
| allennlp-semparse-master | tests/state_machines/trainers/maximum_marginal_likelihood_test.py |
import torch
import numpy as np
from numpy.testing import assert_almost_equal
from ... import SemparseTestCase
from allennlp_semparse.state_machines.trainers import ExpectedRiskMinimization
from ..simple_transition_system import SimpleState, SimpleTransitionFunction
class TestExpectedRiskMinimization(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.initial_state = SimpleState([0], [[0]], [torch.Tensor([0.0])])
self.decoder_step = SimpleTransitionFunction()
# Cost is the number of odd elements in the action history.
self.supervision = lambda state: torch.Tensor(
[sum([x % 2 != 0 for x in state.action_history[0]])]
)
# High beam size ensures exhaustive search.
self.trainer = ExpectedRiskMinimization(
beam_size=100, normalize_by_length=False, max_decoding_steps=10
)
def test_get_finished_states(self):
finished_states = self.trainer._get_finished_states(self.initial_state, self.decoder_step)
state_info = [(state.action_history[0], state.score[0].item()) for state in finished_states]
# There will be exactly five finished states with the following paths. Each score is the
# negative of one less than the number of elements in the action history.
assert len(finished_states) == 5
assert ([0, 2, 4], -2) in state_info
assert ([0, 1, 2, 4], -3) in state_info
assert ([0, 1, 3, 4], -3) in state_info
assert ([0, 2, 3, 4], -3) in state_info
assert ([0, 1, 2, 3, 4], -4) in state_info
def test_decode(self):
decoded_info = self.trainer.decode(self.initial_state, self.decoder_step, self.supervision)
# The best state corresponds to the shortest path.
best_state = decoded_info["best_final_states"][0][0]
assert best_state.action_history[0] == [0, 2, 4]
# The scores and costs corresponding to the finished states will be
# [0, 2, 4] : -2, 0
# [0, 1, 2, 4] : -3, 1
# [0, 1, 3, 4] : -3, 2
# [0, 2, 3, 4] : -3, 1
# [0, 1, 2, 3, 4] : -4, 2
# This is the normalization factor while re-normalizing probabilities on the beam
partition = np.exp(-2) + np.exp(-3) + np.exp(-3) + np.exp(-3) + np.exp(-4)
expected_loss = (
(np.exp(-2) * 0)
+ (np.exp(-3) * 1)
+ (np.exp(-3) * 2)
+ (np.exp(-3) * 1)
+ (np.exp(-4) * 2)
) / partition
assert_almost_equal(decoded_info["loss"].data.numpy(), expected_loss)
| allennlp-semparse-master | tests/state_machines/trainers/expected_risk_minimization_test.py |
from numpy.testing import assert_almost_equal
import torch
from ... import SemparseTestCase
from allennlp.modules import Attention
from allennlp_semparse.nltk_languages.type_declarations.type_declaration import is_nonterminal
from allennlp_semparse.state_machines.states import GrammarBasedState, GrammarStatelet, RnnStatelet
from allennlp_semparse.state_machines.transition_functions import BasicTransitionFunction
class TestBasicTransitionFunction(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.decoder_step = BasicTransitionFunction(
encoder_output_dim=2,
action_embedding_dim=2,
input_attention=Attention.by_name("dot_product")(),
add_action_bias=False,
)
batch_indices = [0, 1, 0]
action_history = [[1], [3, 4], []]
score = [torch.FloatTensor([x]) for x in [0.1, 1.1, 2.2]]
hidden_state = torch.FloatTensor([[i, i] for i in range(len(batch_indices))])
memory_cell = torch.FloatTensor([[i, i] for i in range(len(batch_indices))])
previous_action_embedding = torch.FloatTensor([[i, i] for i in range(len(batch_indices))])
attended_question = torch.FloatTensor([[i, i] for i in range(len(batch_indices))])
# This maps non-terminals to valid actions, where the valid actions are grouped by _type_.
# We have "global" actions, which are from the global grammar, and "linked" actions, which
# are instance-specific and are generated based on question attention. Each action type
# has a tuple which is (input representation, output representation, action ids).
valid_actions = {
"e": {
"global": (
torch.FloatTensor([[0, 0], [-1, -1], [-2, -2]]),
torch.FloatTensor([[-1, -1], [-2, -2], [-3, -3]]),
[0, 1, 2],
),
"linked": (
torch.FloatTensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]),
torch.FloatTensor([[3, 3], [4, 4]]),
[3, 4],
),
},
"d": {
"global": (torch.FloatTensor([[0, 0]]), torch.FloatTensor([[-1, -1]]), [0]),
"linked": (
torch.FloatTensor([[-0.1, -0.2, -0.3], [-0.4, -0.5, -0.6], [-0.7, -0.8, -0.9]]),
torch.FloatTensor([[5, 5], [6, 6], [7, 7]]),
[1, 2, 3],
),
},
}
grammar_state = [
GrammarStatelet([nonterminal], valid_actions, is_nonterminal)
for _, nonterminal in zip(batch_indices, ["e", "d", "e"])
]
self.encoder_outputs = torch.FloatTensor(
[[[1, 2], [3, 4], [5, 6]], [[10, 11], [12, 13], [14, 15]]]
)
self.encoder_output_mask = torch.FloatTensor([[1, 1, 1], [1, 1, 0]])
self.possible_actions = [
[
("e -> f", False, None),
("e -> g", True, None),
("e -> h", True, None),
("e -> i", True, None),
("e -> j", True, None),
],
[
("d -> q", True, None),
("d -> g", True, None),
("d -> h", True, None),
("d -> i", True, None),
],
]
rnn_state = []
for i in range(len(batch_indices)):
rnn_state.append(
RnnStatelet(
hidden_state[i],
memory_cell[i],
previous_action_embedding[i],
attended_question[i],
self.encoder_outputs,
self.encoder_output_mask,
)
)
self.state = GrammarBasedState(
batch_indices=batch_indices,
action_history=action_history,
score=score,
rnn_state=rnn_state,
grammar_state=grammar_state,
possible_actions=self.possible_actions,
)
def test_take_step(self):
new_states = self.decoder_step.take_step(
self.state, max_actions=1, allowed_actions=[{2, 3}, {0}, {4}]
)
assert len(new_states) == 2
new_state = new_states[0]
assert new_state.batch_indices == [0]
# We're not going to try to guess which action was taken (or set model weights so that we
# know which action will be taken); we'll just check that we got one of the actions we were
# expecting.
expected_possibilities = set([((4,), ("j",)), ((1, 2), ("h",)), ((1, 3), ("i",))])
actual = (
tuple(new_state.action_history[0]),
tuple(new_state.grammar_state[0]._nonterminal_stack),
)
assert actual in expected_possibilities
# These should just be copied from the prior state, no matter which action we took.
assert_almost_equal(
new_state.rnn_state[0].encoder_outputs.cpu().numpy(), self.encoder_outputs.cpu().numpy()
)
assert_almost_equal(
new_state.rnn_state[0].encoder_output_mask.cpu().numpy(),
self.encoder_output_mask.cpu().numpy(),
)
assert new_state.possible_actions == self.possible_actions
new_state = new_states[1]
# For batch instance 1, we should have selected action 0 from group index 1 - there was
# only one allowed action.
assert new_state.batch_indices == [1]
# These two have values taken from what's defined in setup_method() - the prior action
# history ([3, 4]) and the nonterminals corresponding to the action we picked ('q').
assert new_state.action_history == [[3, 4, 0]]
assert new_state.grammar_state[0]._nonterminal_stack == ["q"]
# And these should just be copied from the prior state.
assert_almost_equal(
new_state.rnn_state[0].encoder_outputs.cpu().numpy(), self.encoder_outputs.cpu().numpy()
)
assert_almost_equal(
new_state.rnn_state[0].encoder_output_mask.cpu().numpy(),
self.encoder_output_mask.cpu().numpy(),
)
assert new_state.possible_actions == self.possible_actions
| allennlp-semparse-master | tests/state_machines/transition_functions/basic_transition_function_test.py |
allennlp-semparse-master | tests/state_machines/transition_functions/__init__.py |
|
import pytest
from ... import SemparseTestCase
from allennlp_semparse.state_machines.states import GrammarStatelet
def is_nonterminal(symbol: str) -> bool:
if symbol == "identity":
return False
if "lambda " in symbol:
return False
if symbol in {"x", "y", "z"}:
return False
return True
class TestGrammarStatelet(SemparseTestCase):
def test_is_finished_just_uses_nonterminal_stack(self):
state = GrammarStatelet(["s"], {}, is_nonterminal)
assert not state.is_finished()
state = GrammarStatelet([], {}, is_nonterminal)
assert state.is_finished()
def test_get_valid_actions_uses_top_of_stack(self):
s_actions = object()
t_actions = object()
e_actions = object()
state = GrammarStatelet(["s"], {"s": s_actions, "t": t_actions}, is_nonterminal)
assert state.get_valid_actions() == s_actions
state = GrammarStatelet(["t"], {"s": s_actions, "t": t_actions}, is_nonterminal)
assert state.get_valid_actions() == t_actions
state = GrammarStatelet(
["e"], {"s": s_actions, "t": t_actions, "e": e_actions}, is_nonterminal
)
assert state.get_valid_actions() == e_actions
def test_take_action_crashes_with_mismatched_types(self):
with pytest.raises(AssertionError):
state = GrammarStatelet(["s"], {}, is_nonterminal)
state.take_action("t -> identity")
| allennlp-semparse-master | tests/state_machines/states/grammar_statelet_test.py |
allennlp-semparse-master | tests/state_machines/states/__init__.py |
|
import pytest
import torch
from numpy.testing import assert_almost_equal
from ... import SemparseTestCase
from allennlp_semparse.state_machines.states import LambdaGrammarStatelet
def is_nonterminal(symbol: str) -> bool:
if symbol == "identity":
return False
if "lambda " in symbol:
return False
if symbol in {"x", "y", "z"}:
return False
return True
class TestLambdaGrammarStatelet(SemparseTestCase):
def test_is_finished_just_uses_nonterminal_stack(self):
state = LambdaGrammarStatelet(["s"], {}, {}, {}, is_nonterminal)
assert not state.is_finished()
state = LambdaGrammarStatelet([], {}, {}, {}, is_nonterminal)
assert state.is_finished()
def test_get_valid_actions_uses_top_of_stack(self):
s_actions = object()
t_actions = object()
e_actions = object()
state = LambdaGrammarStatelet(
["s"], {}, {"s": s_actions, "t": t_actions}, {}, is_nonterminal
)
assert state.get_valid_actions() == s_actions
state = LambdaGrammarStatelet(
["t"], {}, {"s": s_actions, "t": t_actions}, {}, is_nonterminal
)
assert state.get_valid_actions() == t_actions
state = LambdaGrammarStatelet(
["e"], {}, {"s": s_actions, "t": t_actions, "e": e_actions}, {}, is_nonterminal
)
assert state.get_valid_actions() == e_actions
def test_get_valid_actions_adds_lambda_productions(self):
state = LambdaGrammarStatelet(
["s"],
{("s", "x"): ["s"]},
{"s": {"global": (torch.Tensor([1, 1]), torch.Tensor([2, 2]), [1, 2])}},
{"s -> x": (torch.Tensor([5]), torch.Tensor([6]), 5)},
is_nonterminal,
)
actions = state.get_valid_actions()
assert_almost_equal(actions["global"][0].cpu().numpy(), [1, 1, 5])
assert_almost_equal(actions["global"][1].cpu().numpy(), [2, 2, 6])
assert actions["global"][2] == [1, 2, 5]
# We're doing this assert twice to make sure we haven't accidentally modified the state.
actions = state.get_valid_actions()
assert_almost_equal(actions["global"][0].cpu().numpy(), [1, 1, 5])
assert_almost_equal(actions["global"][1].cpu().numpy(), [2, 2, 6])
assert actions["global"][2] == [1, 2, 5]
def test_get_valid_actions_adds_lambda_productions_only_for_correct_type(self):
state = LambdaGrammarStatelet(
["t"],
{("s", "x"): ["t"]},
{
"s": {"global": (torch.Tensor([1, 1]), torch.Tensor([2, 2]), [1, 2])},
"t": {"global": (torch.Tensor([3, 3]), torch.Tensor([4, 4]), [3, 4])},
},
{"s -> x": (torch.Tensor([5]), torch.Tensor([6]), 5)},
is_nonterminal,
)
actions = state.get_valid_actions()
assert_almost_equal(actions["global"][0].cpu().numpy(), [3, 3])
assert_almost_equal(actions["global"][1].cpu().numpy(), [4, 4])
assert actions["global"][2] == [3, 4]
# We're doing this assert twice to make sure we haven't accidentally modified the state.
actions = state.get_valid_actions()
assert_almost_equal(actions["global"][0].cpu().numpy(), [3, 3])
assert_almost_equal(actions["global"][1].cpu().numpy(), [4, 4])
assert actions["global"][2] == [3, 4]
def test_take_action_gives_correct_next_states_with_non_lambda_productions(self):
# state.take_action() doesn't read or change these objects, it just passes them through, so
# we'll use some sentinels to be sure of that.
valid_actions = object()
context_actions = object()
state = LambdaGrammarStatelet(["s"], {}, valid_actions, context_actions, is_nonterminal)
next_state = state.take_action("s -> [t, r]")
expected_next_state = LambdaGrammarStatelet(
["r", "t"], {}, valid_actions, context_actions, is_nonterminal
)
assert next_state.__dict__ == expected_next_state.__dict__
state = LambdaGrammarStatelet(
["r", "t"], {}, valid_actions, context_actions, is_nonterminal
)
next_state = state.take_action("t -> identity")
expected_next_state = LambdaGrammarStatelet(
["r"], {}, valid_actions, context_actions, is_nonterminal
)
assert next_state.__dict__ == expected_next_state.__dict__
def test_take_action_crashes_with_mismatched_types(self):
with pytest.raises(AssertionError):
state = LambdaGrammarStatelet(["s"], {}, {}, {}, is_nonterminal)
state.take_action("t -> identity")
def test_take_action_gives_correct_next_states_with_lambda_productions(self):
# state.take_action() doesn't read or change these objects, it just passes them through, so
# we'll use some sentinels to be sure of that.
valid_actions = object()
context_actions = object()
state = LambdaGrammarStatelet(
["t", "<s,d>"], {}, valid_actions, context_actions, is_nonterminal
)
next_state = state.take_action("<s,d> -> [lambda x, d]")
expected_next_state = LambdaGrammarStatelet(
["t", "d"], {("s", "x"): ["d"]}, valid_actions, context_actions, is_nonterminal
)
assert next_state.__dict__ == expected_next_state.__dict__
state = expected_next_state
next_state = state.take_action("d -> [<s,r>, d]")
expected_next_state = LambdaGrammarStatelet(
["t", "d", "<s,r>"],
{("s", "x"): ["d", "<s,r>"]},
valid_actions,
context_actions,
is_nonterminal,
)
assert next_state.__dict__ == expected_next_state.__dict__
state = expected_next_state
next_state = state.take_action("<s,r> -> [lambda y, r]")
expected_next_state = LambdaGrammarStatelet(
["t", "d", "r"],
{("s", "x"): ["d", "r"], ("s", "y"): ["r"]},
valid_actions,
context_actions,
is_nonterminal,
)
assert next_state.__dict__ == expected_next_state.__dict__
state = expected_next_state
next_state = state.take_action("r -> identity")
expected_next_state = LambdaGrammarStatelet(
["t", "d"], {("s", "x"): ["d"]}, valid_actions, context_actions, is_nonterminal
)
assert next_state.__dict__ == expected_next_state.__dict__
state = expected_next_state
next_state = state.take_action("d -> x")
expected_next_state = LambdaGrammarStatelet(
["t"], {}, valid_actions, context_actions, is_nonterminal
)
assert next_state.__dict__ == expected_next_state.__dict__
| allennlp-semparse-master | tests/state_machines/states/lambda_grammar_statelet_test.py |
from typing import Set
from .. import SemparseTestCase
from allennlp_semparse import DomainLanguage, ActionSpaceWalker, predicate
class Object:
pass
class FakeLanguageWithAssertions(DomainLanguage):
@predicate
def object_exists(self, items: Set[Object]) -> bool:
return True
@predicate
def black(self, items: Set[Object]) -> Set[Object]:
return items
@predicate
def triangle(self, items: Set[Object]) -> Set[Object]:
return items
@predicate
def touch_wall(self, items: Set[Object]) -> Set[Object]:
return items
@predicate
def all_objects(self) -> Set[Object]:
return set()
class TestActionSpaceWalker(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.world = FakeLanguageWithAssertions(start_types={bool})
self.walker = ActionSpaceWalker(self.world, max_path_length=10)
def test_get_logical_forms_with_agenda(self):
black_logical_forms = self.walker.get_logical_forms_with_agenda(
["<Set[Object]:Set[Object]> -> black"]
)
# These are all the possible logical forms with black
assert len(black_logical_forms) == 25
shortest_logical_form = self.walker.get_logical_forms_with_agenda(
["<Set[Object]:Set[Object]> -> black"], 1
)[0]
# This is the shortest complete logical form with black
assert shortest_logical_form == "(object_exists (black all_objects))"
agenda = [
"<Set[Object]:Set[Object]> -> black",
"<Set[Object]:Set[Object]> -> triangle",
"<Set[Object]:Set[Object]> -> touch_wall",
]
black_triangle_touch_forms = self.walker.get_logical_forms_with_agenda(agenda)
# Permutations of the three functions. There will not be repetitions of any functions
# because we limit the length of paths to 10 above.
assert set(black_triangle_touch_forms) == set(
[
"(object_exists (black (triangle (touch_wall all_objects))))",
"(object_exists (black (touch_wall (triangle all_objects))))",
"(object_exists (triangle (black (touch_wall all_objects))))",
"(object_exists (triangle (touch_wall (black all_objects))))",
"(object_exists (touch_wall (black (triangle all_objects))))",
"(object_exists (touch_wall (triangle (black all_objects))))",
]
)
def test_get_logical_forms_with_agenda_and_partial_match(self):
black_logical_forms = self.walker.get_logical_forms_with_agenda(
["<Set[Object]:Set[Object]> -> black"]
)
# These are all the possible logical forms with black
assert len(black_logical_forms) == 25
shortest_logical_form = self.walker.get_logical_forms_with_agenda(
["<Set[Object]:Set[Object]> -> black"], 1
)[0]
# This is the shortest complete logical form with black
assert shortest_logical_form == "(object_exists (black all_objects))"
agenda = [
"<Set[Object]:Set[Object]> -> black",
"<Set[Object]:Set[Object]> -> triangle",
"<Set[Object]:Set[Object]> -> touch_wall",
]
black_triangle_touch_forms = self.walker.get_logical_forms_with_agenda(
agenda, allow_partial_match=True
)
# The first six logical forms will contain permutations of all three functions.
assert set(black_triangle_touch_forms[:6]) == set(
[
"(object_exists (black (triangle (touch_wall all_objects))))",
"(object_exists (black (touch_wall (triangle all_objects))))",
"(object_exists (triangle (black (touch_wall all_objects))))",
"(object_exists (triangle (touch_wall (black all_objects))))",
"(object_exists (touch_wall (black (triangle all_objects))))",
"(object_exists (touch_wall (triangle (black all_objects))))",
]
)
# The next six will be the shortest six with two agenda items.
assert set(black_triangle_touch_forms[6:12]) == set(
[
"(object_exists (black (triangle all_objects)))",
"(object_exists (black (touch_wall all_objects)))",
"(object_exists (triangle (black all_objects)))",
"(object_exists (triangle (touch_wall all_objects)))",
"(object_exists (touch_wall (black all_objects)))",
"(object_exists (touch_wall (triangle all_objects)))",
]
)
# After a bunch of longer logical forms with two agenda items, we have the shortest three
# with one agenda item.
assert set(black_triangle_touch_forms[30:33]) == set(
[
"(object_exists (black all_objects))",
"(object_exists (triangle all_objects))",
"(object_exists (touch_wall all_objects))",
]
)
def test_get_logical_forms_with_empty_agenda_returns_all_logical_forms(self, caplog):
empty_agenda_logical_forms = self.walker.get_logical_forms_with_agenda(
[], allow_partial_match=True
)
first_four_logical_forms = empty_agenda_logical_forms[:4]
assert set(first_four_logical_forms) == {
"(object_exists all_objects)",
"(object_exists (black all_objects))",
"(object_exists (touch_wall all_objects))",
"(object_exists (triangle all_objects))",
}
assert "Agenda is empty! Returning all paths instead." in caplog.text
def test_get_logical_forms_with_unmatched_agenda_returns_all_logical_forms(self, caplog):
agenda = ["<Set[Object]:Set[Object]> -> purple"]
empty_agenda_logical_forms = self.walker.get_logical_forms_with_agenda(
agenda, allow_partial_match=True
)
first_four_logical_forms = empty_agenda_logical_forms[:4]
assert set(first_four_logical_forms) == {
"(object_exists all_objects)",
"(object_exists (black all_objects))",
"(object_exists (touch_wall all_objects))",
"(object_exists (triangle all_objects))",
}
assert "Agenda items not in any of the paths found. Returning all paths." in caplog.text
empty_set = self.walker.get_logical_forms_with_agenda(agenda, allow_partial_match=False)
assert empty_set == []
def test_get_logical_forms_with_agenda_ignores_null_set_item(self, caplog):
agenda = [
"<Set[Object]:Set[Object]> -> yellow",
"<Set[Object]:Set[Object]> -> black",
"<Set[Object]:Set[Object]> -> triangle",
"<Set[Object]:Set[Object]> -> touch_wall",
]
yellow_black_triangle_touch_forms = self.walker.get_logical_forms_with_agenda(agenda)
# Permutations of the three functions, after ignoring yellow. There will not be repetitions
# of any functions because we limit the length of paths to 10 above.
assert set(yellow_black_triangle_touch_forms) == set(
[
"(object_exists (black (triangle (touch_wall all_objects))))",
"(object_exists (black (touch_wall (triangle all_objects))))",
"(object_exists (triangle (black (touch_wall all_objects))))",
"(object_exists (triangle (touch_wall (black all_objects))))",
"(object_exists (touch_wall (black (triangle all_objects))))",
"(object_exists (touch_wall (triangle (black all_objects))))",
]
)
log = "<Set[Object]:Set[Object]> -> yellow is not in any of the paths found! Ignoring it."
assert log in caplog.text
def test_get_all_logical_forms(self):
# get_all_logical_forms should sort logical forms by length.
ten_shortest_logical_forms = self.walker.get_all_logical_forms(max_num_logical_forms=10)
shortest_logical_form = ten_shortest_logical_forms[0]
assert shortest_logical_form == "(object_exists all_objects)"
length_three_logical_forms = ten_shortest_logical_forms[1:4]
assert set(length_three_logical_forms) == {
"(object_exists (black all_objects))",
"(object_exists (touch_wall all_objects))",
"(object_exists (triangle all_objects))",
}
| allennlp-semparse-master | tests/common/action_space_walker_test.py |
allennlp-semparse-master | tests/common/__init__.py |
|
from .. import SemparseTestCase
from allennlp_semparse.common import util
class TestSemparseUtil(SemparseTestCase):
def test_lisp_to_nested_expression(self):
logical_form = "((reverse fb:row.row.year) (fb:row.row.league fb:cell.usl_a_league))"
expression = util.lisp_to_nested_expression(logical_form)
assert expression == [
["reverse", "fb:row.row.year"],
["fb:row.row.league", "fb:cell.usl_a_league"],
]
logical_form = "(count (and (division 1) (tier (!= null))))"
expression = util.lisp_to_nested_expression(logical_form)
assert expression == ["count", ["and", ["division", "1"], ["tier", ["!=", "null"]]]]
| allennlp-semparse-master | tests/common/util_test.py |
import pytest
from .. import SemparseTestCase
from allennlp_semparse.common import Date, ExecutionError
class TestDate(SemparseTestCase):
def test_date_comparison_works(self):
assert Date(2013, 12, 31) > Date(2013, 12, 30)
assert Date(2013, 12, 31) == Date(2013, 12, -1)
assert Date(2013, -1, -1) >= Date(2013, 12, 31)
assert (Date(2013, 12, -1) > Date(2013, 12, 31)) is False
with pytest.raises(ExecutionError, match="only compare Dates with Dates"):
assert (Date(2013, 12, 31) > 2013) is False
with pytest.raises(ExecutionError, match="only compare Dates with Dates"):
assert (Date(2013, 12, 31) >= 2013) is False
with pytest.raises(ExecutionError, match="only compare Dates with Dates"):
assert Date(2013, 12, 31) != 2013
assert (Date(2018, 1, 1) >= Date(-1, 2, 1)) is False
assert (Date(2018, 1, 1) < Date(-1, 2, 1)) is False
# When year is unknown in both cases, we can compare months and days.
assert Date(-1, 2, 1) < Date(-1, 2, 3)
# If both year and month are not know in both cases, the comparison is undefined, and both
# < and >= return False.
assert (Date(-1, -1, 1) < Date(-1, -1, 3)) is False
assert (Date(-1, -1, 1) >= Date(-1, -1, 3)) is False
# Same when year is known, but months are not.
assert (Date(2018, -1, 1) < Date(2018, -1, 3)) is False
assert (Date(2018, -1, 1) >= Date(2018, -1, 3)) is False
| allennlp-semparse-master | tests/common/date_test.py |
allennlp-semparse-master | tests/common/wikitables/__init__.py |
|
from ... import SemparseTestCase
from allennlp.data.tokenizers import SpacyTokenizer
from allennlp_semparse.common import Date
from allennlp_semparse.common.wikitables import TableQuestionContext
class TestTableQuestionContext(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.tokenizer = SpacyTokenizer(pos_tags=True)
def test_table_data(self):
question = "what was the attendance when usl a league played?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/wikitables/sample_table.tagged"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
assert table_question_context.table_data == [
{
"date_column:year": Date(2001, -1, -1),
"number_column:year": 2001.0,
"string_column:year": "2001",
"number_column:division": 2.0,
"string_column:division": "2",
"string_column:league": "usl_a_league",
"string_column:regular_season": "4th_western",
"number_column:regular_season": 4.0,
"string_column:playoffs": "quarterfinals",
"string_column:open_cup": "did_not_qualify",
"number_column:open_cup": None,
"number_column:avg_attendance": 7169.0,
"string_column:avg_attendance": "7_169",
},
{
"date_column:year": Date(2005, -1, -1),
"number_column:year": 2005.0,
"string_column:year": "2005",
"number_column:division": 2.0,
"string_column:division": "2",
"string_column:league": "usl_first_division",
"string_column:regular_season": "5th",
"number_column:regular_season": 5.0,
"string_column:playoffs": "quarterfinals",
"string_column:open_cup": "4th_round",
"number_column:open_cup": 4.0,
"number_column:avg_attendance": 6028.0,
"string_column:avg_attendance": "6_028",
},
]
def test_table_data_from_untagged_file(self):
question = "what was the attendance when usl a league played?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/wikitables/sample_table.tsv"
table_lines = [line.strip() for line in open(test_file).readlines()]
table_question_context = TableQuestionContext.read_from_lines(table_lines, question_tokens)
# The content in the table represented by the untagged file we are reading here is the same as the one we
# had in the tagged file above, except that we have a "Score" column instead of "Avg. Attendance" column,
# which is changed to test the num2 extraction logic. I've shown the values not being extracted here as
# well and commented them out.
assert table_question_context.table_data == [
{
"number_column:year": 2001.0,
# The value extraction logic we have for untagged lines does
# not extract this value as a date.
# 'date_column:year': Date(2001, -1, -1),
"string_column:year": "2001",
"number_column:division": 2.0,
"string_column:division": "2",
"string_column:league": "usl_a_league",
"string_column:regular_season": "4th_western",
# We only check for strings that are entirely numbers. So 4.0
# will not be extracted.
# 'number_column:regular_season': 4.0,
"string_column:playoffs": "quarterfinals",
"string_column:open_cup": "did_not_qualify",
# 'number_column:open_cup': None,
"number_column:score": 20.0,
"num2_column:score": 30.0,
"string_column:score": "20_30",
},
{
"number_column:year": 2005.0,
# 'date_column:year': Date(2005, -1, -1),
"string_column:year": "2005",
"number_column:division": 2.0,
"string_column:division": "2",
"string_column:league": "usl_first_division",
"string_column:regular_season": "5th",
# Same here as in the "division" column for the first row.
# 5.0 will not be extracted from "5th".
# 'number_column:regular_season': 5.0,
"string_column:playoffs": "quarterfinals",
"string_column:open_cup": "4th_round",
# 'number_column:open_cup': 4.0,
"number_column:score": 50.0,
"num2_column:score": 40.0,
"string_column:score": "50_40",
},
]
def test_number_extraction(self):
question = """how many players on the 191617 illinois fighting illini men's basketball team
had more than 100 points scored?"""
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-7.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
_, number_entities = table_question_context.get_entities_from_question()
assert number_entities == [("191617", 5), ("100", 16)]
def test_date_extraction(self):
question = "how many laps did matt kenset complete on february 26, 2006."
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-8.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
_, number_entities = table_question_context.get_entities_from_question()
assert number_entities == [("2", 8), ("26", 9), ("2006", 11)]
def test_date_extraction_2(self):
question = """how many different players scored for the san jose earthquakes during their
1979 home opener against the timbers?"""
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-6.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
_, number_entities = table_question_context.get_entities_from_question()
assert number_entities == [("1979", 12)]
def test_multiword_entity_extraction(self):
question = "was the positioning better the year of the france venue or the year of the south korea venue?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-3.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
entities, _ = table_question_context.get_entities_from_question()
assert entities == [
("string:france", ["string_column:venue"]),
("string:south_korea", ["string_column:venue"]),
]
def test_rank_number_extraction(self):
question = "what was the first tamil-language film in 1943?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-1.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
_, numbers = table_question_context.get_entities_from_question()
assert numbers == [("1", 3), ("1943", 9)]
def test_null_extraction(self):
question = "on what date did the eagles score the least points?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-2.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
entities, numbers = table_question_context.get_entities_from_question()
# "Eagles" does not appear in the table.
assert entities == []
assert numbers == []
def test_numerical_column_type_extraction(self):
question = """how many players on the 191617 illinois fighting illini men's basketball team
had more than 100 points scored?"""
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-7.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
column_names = table_question_context.column_names
assert "number_column:games_played" in column_names
assert "number_column:field_goals" in column_names
assert "number_column:free_throws" in column_names
assert "number_column:points" in column_names
def test_date_column_type_extraction_1(self):
question = "how many were elected?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-5.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
column_names = table_question_context.column_names
assert "date_column:first_elected" in column_names
def test_date_column_type_extraction_2(self):
question = "how many were elected?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-9.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
column_names = table_question_context.column_names
assert "date_column:date_of_appointment" in column_names
assert "date_column:date_of_election" in column_names
def test_string_column_types_extraction(self):
question = "how many were elected?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-10.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
column_names = table_question_context.column_names
assert "string_column:birthplace" in column_names
assert "string_column:advocate" in column_names
assert "string_column:notability" in column_names
assert "string_column:name" in column_names
def test_number_and_entity_extraction(self):
question = "other than m1 how many notations have 1 in them?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-11.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
string_entities, number_entities = table_question_context.get_entities_from_question()
assert string_entities == [
("string:m1", ["string_column:notation"]),
("string:1", ["string_column:position"]),
]
assert number_entities == [("1", 2), ("1", 7)]
def test_get_knowledge_graph(self):
question = "other than m1 how many notations have 1 in them?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-11.table"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
knowledge_graph = table_question_context.get_table_knowledge_graph()
entities = knowledge_graph.entities
# -1 is not in entities because there are no date columns in the table.
assert sorted(entities) == [
"1",
"number_column:notation",
"number_column:position",
"string:1",
"string:m1",
"string_column:mnemonic",
"string_column:notation",
"string_column:position",
"string_column:short_name",
"string_column:swara",
]
neighbors = knowledge_graph.neighbors
# Each number extracted from the question will have all number and date columns as
# neighbors. Each string entity extracted from the question will only have the corresponding
# column as the neighbor.
neighbors_with_sets = {key: set(value) for key, value in neighbors.items()}
assert neighbors_with_sets == {
"1": {"number_column:position", "number_column:notation"},
"string_column:mnemonic": set(),
"string_column:short_name": set(),
"string_column:swara": set(),
"number_column:position": {"1"},
"number_column:notation": {"1"},
"string:m1": {"string_column:notation"},
"string:1": {"string_column:position"},
"string_column:notation": {"string:m1"},
"string_column:position": {"string:1"},
}
entity_text = knowledge_graph.entity_text
assert entity_text == {
"1": "1",
"string:m1": "m1",
"string:1": "1",
"string_column:notation": "notation",
"number_column:notation": "notation",
"string_column:mnemonic": "mnemonic",
"string_column:short_name": "short name",
"string_column:swara": "swara",
"number_column:position": "position",
"string_column:position": "position",
}
def test_knowledge_graph_has_correct_neighbors(self):
question = "when was the attendance greater than 5000?"
question_tokens = self.tokenizer.tokenize(question)
test_file = f"{self.FIXTURES_ROOT}/data/wikitables/sample_table.tagged"
table_question_context = TableQuestionContext.read_from_file(test_file, question_tokens)
knowledge_graph = table_question_context.get_table_knowledge_graph()
neighbors = knowledge_graph.neighbors
# '5000' is neighbors with number and date columns. '-1' is in entities because there is a
# date column, which is its only neighbor.
assert set(neighbors.keys()) == {
"date_column:year",
"number_column:year",
"string_column:year",
"number_column:division",
"string_column:division",
"string_column:league",
"string_column:regular_season",
"number_column:regular_season",
"string_column:playoffs",
"string_column:open_cup",
"number_column:open_cup",
"number_column:avg_attendance",
"string_column:avg_attendance",
"5000",
"-1",
}
assert set(neighbors["date_column:year"]) == {"5000", "-1"}
assert neighbors["number_column:year"] == ["5000"]
assert neighbors["string_column:year"] == []
assert neighbors["number_column:division"] == ["5000"]
assert neighbors["string_column:division"] == []
assert neighbors["string_column:league"] == []
assert neighbors["string_column:regular_season"] == []
assert neighbors["number_column:regular_season"] == ["5000"]
assert neighbors["string_column:playoffs"] == []
assert neighbors["string_column:open_cup"] == []
assert neighbors["number_column:open_cup"] == ["5000"]
assert neighbors["number_column:avg_attendance"] == ["5000"]
assert neighbors["string_column:avg_attendance"] == []
assert set(neighbors["5000"]) == {
"date_column:year",
"number_column:year",
"number_column:division",
"number_column:avg_attendance",
"number_column:regular_season",
"number_column:open_cup",
}
assert neighbors["-1"] == ["date_column:year"]
| allennlp-semparse-master | tests/common/wikitables/table_question_context_test.py |
allennlp-semparse-master | tests/common/sql/__init__.py |
|
import json
from ... import SemparseTestCase
from allennlp_semparse.common.sql import text2sql_utils
class TestText2SqlUtils(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.data = self.FIXTURES_ROOT / "data" / "text2sql" / "restaurants_tiny.json"
def test_process_sql_data_blob(self):
data = json.load(open(str(self.data)))
dataset = text2sql_utils.process_sql_data([data[0]])
dataset = list(dataset)
sql_data = dataset[0]
# Check that question de-duplication happens by default
# (otherwise there would be more than 1 dataset element).
assert len(dataset) == 1
assert sql_data.text == [
"how",
"many",
"buttercup",
"kitchen",
"are",
"there",
"in",
"san",
"francisco",
"?",
]
assert sql_data.text_with_variables == [
"how",
"many",
"name0",
"are",
"there",
"in",
"city_name0",
"?",
]
assert sql_data.sql == [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"LOCATION",
"AS",
"LOCATIONalias0",
",",
"RESTAURANT",
"AS",
"RESTAURANTalias0",
"WHERE",
"LOCATIONalias0",
".",
"CITY_NAME",
"=",
"'city_name0'",
"AND",
"RESTAURANTalias0",
".",
"ID",
"=",
"LOCATIONalias0",
".",
"RESTAURANT_ID",
"AND",
"RESTAURANTalias0",
".",
"NAME",
"=",
"'name0'",
";",
]
assert sql_data.text_variables == {
"city_name0": "san francisco",
"name0": "buttercup kitchen",
}
assert sql_data.sql_variables == {
"city_name0": {"text": "san francisco", "type": "city_name"},
"name0": {"text": "buttercup kitchen", "type": "name"},
}
dataset = text2sql_utils.process_sql_data([data[1]])
correct_text = [
[
[
"how",
"many",
"chinese",
"restaurants",
"are",
"there",
"in",
"the",
"bay",
"area",
"?",
],
[
"how",
"many",
"food_type0",
"restaurants",
"are",
"there",
"in",
"the",
"region0",
"?",
],
],
[
[
"how",
"many",
"places",
"for",
"chinese",
"food",
"are",
"there",
"in",
"the",
"bay",
"area",
"?",
],
[
"how",
"many",
"places",
"for",
"food_type0",
"food",
"are",
"there",
"in",
"the",
"region0",
"?",
],
],
[
[
"how",
"many",
"chinese",
"places",
"are",
"there",
"in",
"the",
"bay",
"area",
"?",
],
[
"how",
"many",
"food_type0",
"places",
"are",
"there",
"in",
"the",
"region0",
"?",
],
],
[
[
"how",
"many",
"places",
"for",
"chinese",
"are",
"there",
"in",
"the",
"bay",
"area",
"?",
],
[
"how",
"many",
"places",
"for",
"food_type0",
"are",
"there",
"in",
"the",
"region0",
"?",
],
],
]
for i, sql_data in enumerate(dataset):
assert sql_data.sql == [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"GEOGRAPHIC",
"AS",
"GEOGRAPHICalias0",
",",
"RESTAURANT",
"AS",
"RESTAURANTalias0",
"WHERE",
"GEOGRAPHICalias0",
".",
"REGION",
"=",
"'region0'",
"AND",
"RESTAURANTalias0",
".",
"CITY_NAME",
"=",
"GEOGRAPHICalias0",
".",
"CITY_NAME",
"AND",
"RESTAURANTalias0",
".",
"FOOD_TYPE",
"=",
"'food_type0'",
";",
]
assert sql_data.text_variables == {"region0": "bay area", "food_type0": "chinese"}
assert sql_data.sql_variables == {
"region0": {"text": "bay area", "type": "region"},
"food_type0": {"text": "chinese", "type": "food_type"},
}
assert sql_data.text == correct_text[i][0]
assert sql_data.text_with_variables == correct_text[i][1]
def test_process_sql_data_can_yield_all_queries(self):
data = json.load(open(str(self.data)))
dataset = text2sql_utils.process_sql_data([data[0]], use_all_queries=True)
dataset = list(dataset)
assert len(dataset) == 3
def test_replace_variables(self):
sentence = ["how", "many", "name0", "are", "there", "in", "city_name0", "?"]
sentence_variables = {"city_name0": "san francisco", "name0": "buttercup kitchen"}
tokens, tags = text2sql_utils.replace_variables(sentence, sentence_variables)
assert tokens == [
"how",
"many",
"buttercup",
"kitchen",
"are",
"there",
"in",
"san",
"francisco",
"?",
]
assert tags == ["O", "O", "name0", "name0", "O", "O", "O", "city_name0", "city_name0", "O"]
def test_clean_and_split_sql(self):
sql = (
"SELECT COUNT( * ) FROM LOCATION AS LOCATIONalias0 , RESTAURANT AS RESTAURANTalias0 "
'WHERE LOCATIONalias0.CITY_NAME = "city_name0" AND RESTAURANTalias0.ID = '
'LOCATIONalias0.RESTAURANT_ID AND RESTAURANTalias0.NAME = "name0" ;'
)
cleaned = text2sql_utils.clean_and_split_sql(sql)
assert cleaned == [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"LOCATION",
"AS",
"LOCATIONalias0",
",",
"RESTAURANT",
"AS",
"RESTAURANTalias0",
"WHERE",
"LOCATIONalias0",
".",
"CITY_NAME",
"=",
"'city_name0'",
"AND",
"RESTAURANTalias0",
".",
"ID",
"=",
"LOCATIONalias0",
".",
"RESTAURANT_ID",
"AND",
"RESTAURANTalias0",
".",
"NAME",
"=",
"'name0'",
";",
]
def test_clean_unneeded_aliases(self):
sql = [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"LOCATION",
"AS",
"LOCATIONalias0",
",",
"RESTAURANT",
"AS",
"RESTAURANTalias0",
"WHERE",
"LOCATIONalias0",
".",
"CITY_NAME",
"=",
"'city_name0'",
"AND",
"RESTAURANTalias0",
".",
"ID",
"=",
"LOCATIONalias0",
".",
"RESTAURANT_ID",
"AND",
"RESTAURANTalias0",
".",
"NAME",
"=",
"'name0'",
";",
]
cleaned = text2sql_utils.clean_unneeded_aliases(sql)
assert cleaned == [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"LOCATION",
",",
"RESTAURANT",
"WHERE",
"LOCATION",
".",
"CITY_NAME",
"=",
"'city_name0'",
"AND",
"RESTAURANT",
".",
"ID",
"=",
"LOCATION",
".",
"RESTAURANT_ID",
"AND",
"RESTAURANT",
".",
"NAME",
"=",
"'name0'",
";",
]
# Check we don't mangle decimal numbers:
assert text2sql_utils.clean_unneeded_aliases(["2.5"]) == ["2.5"]
# Check we don't remove non-trivial aliases:
sql = [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"MAX",
"(",
"LOCATION",
".",
"ID",
")",
"AS",
"LOCATIONalias0",
";",
]
assert text2sql_utils.clean_unneeded_aliases(sql) == sql
def test_read_database_schema(self):
schema = text2sql_utils.read_dataset_schema(
self.FIXTURES_ROOT / "data" / "text2sql" / "restaurants-schema.csv"
)
# Make it easier to compare:
schema = {
k: [(x.name, x.column_type, x.is_primary_key) for x in v] for k, v in schema.items()
}
assert schema == {
"RESTAURANT": [
("RESTAURANT_ID", "int(11)", True),
("NAME", "varchar(255)", False),
("FOOD_TYPE", "varchar(255)", False),
("CITY_NAME", "varchar(255)", False),
("RATING", '"decimal(1', False),
],
"LOCATION": [
("RESTAURANT_ID", "int(11)", True),
("HOUSE_NUMBER", "int(11)", False),
("STREET_NAME", "varchar(255)", False),
("CITY_NAME", "varchar(255)", False),
],
"GEOGRAPHIC": [
("CITY_NAME", "varchar(255)", True),
("COUNTY", "varchar(255)", False),
("REGION", "varchar(255)", False),
],
}
def test_resolve_primary_keys_in_schema(self):
schema = text2sql_utils.read_dataset_schema(
self.FIXTURES_ROOT / "data" / "text2sql" / "restaurants-schema.csv"
)
sql = [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"MAX",
"(",
"LOCATION",
".",
"ID",
")",
"AS",
"LOCATIONalias0",
";",
]
resolved = text2sql_utils.resolve_primary_keys_in_schema(sql, schema)
assert resolved == [
"SELECT",
"COUNT",
"(",
"*",
")",
"FROM",
"MAX",
"(",
"LOCATION",
".",
"RESTAURANT_ID",
")",
"AS",
"LOCATIONalias0",
";",
]
| allennlp-semparse-master | tests/common/sql/text2sql_utils_test.py |
from typing import List
import pytest
from .. import SemparseTestCase
from allennlp.data.tokenizers import Token
from allennlp.data.tokenizers import SpacyTokenizer
from allennlp_semparse.common import Date, ExecutionError
from allennlp_semparse.common.wikitables import TableQuestionContext
from allennlp_semparse.domain_languages.wikitables_language import WikiTablesLanguage
from .domain_language_test import check_productions_match
class TestWikiTablesLanguage(SemparseTestCase):
# TODO(mattg, pradeep): Add tests for the ActionSpaceWalker as well.
def setup_method(self):
super().setup_method()
# Adding a bunch of random tokens in here so we get them as constants in the language.
question_tokens = [
Token(x)
for x in [
"what",
"was",
"the",
"last",
"year",
"2013",
"?",
"quarterfinals",
"a_league",
"2010",
"8000",
"did_not_qualify",
"2001",
"2",
"23",
"2005",
"1",
"2002",
"usl_a_league",
"usl_first_division",
]
]
self.table_file = self.FIXTURES_ROOT / "data" / "wikitables" / "sample_table.tagged"
self.table_context = TableQuestionContext.read_from_file(self.table_file, question_tokens)
self.language = WikiTablesLanguage(self.table_context)
def _get_world_with_question_tokens(self, tokens: List[Token]) -> WikiTablesLanguage:
table_context = TableQuestionContext.read_from_file(self.table_file, tokens)
world = WikiTablesLanguage(table_context)
return world
def _get_world_with_question_tokens_and_table_file(
self, tokens: List[Token], table_file: str
) -> WikiTablesLanguage:
table_context = TableQuestionContext.read_from_file(table_file, tokens)
world = WikiTablesLanguage(table_context)
return world
def test_execute_fails_with_unknown_function(self):
logical_form = "(unknown_function all_rows string_column:league)"
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_select(self):
logical_form = "(select_string all_rows string_column:league)"
cell_list = self.language.execute(logical_form)
assert set(cell_list) == {"usl_a_league", "usl_first_division"}
def test_execute_works_with_select_number(self):
logical_form = "(select_number all_rows number_column:division)"
selected_number = self.language.execute(logical_form)
assert selected_number == 2.0
def test_execute_works_with_argmax(self):
logical_form = (
"(select_string (argmax all_rows number_column:avg_attendance) string_column:league)"
)
cell_list = self.language.execute(logical_form)
assert cell_list == ["usl_a_league"]
def test_execute_works_with_argmax_on_dates(self):
logical_form = "(select_string (argmax all_rows date_column:year) string_column:league)"
cell_list = self.language.execute(logical_form)
assert cell_list == ["usl_first_division"]
def test_execute_works_with_argmin(self):
logical_form = (
"(select_date (argmin all_rows number_column:avg_attendance) date_column:year)"
)
cell_list = self.language.execute(logical_form)
assert cell_list == Date(2005, 3, -1)
def test_execute_works_with_argmin_on_dates(self):
logical_form = "(select_string (argmin all_rows date_column:year) string_column:league)"
cell_list = self.language.execute(logical_form)
assert cell_list == ["usl_a_league"]
def test_execute_works_with_filter_number_greater(self):
# Selecting cell values from all rows that have attendance greater than the min value of
# attendance.
logical_form = """(select_string (filter_number_greater all_rows number_column:avg_attendance
(min_number all_rows number_column:avg_attendance)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ["usl_a_league"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_number_greater all_rows number_column:avg_attendance
all_rows) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_number_greater all_rows number_column:avg_attendance
string:usl_first_division) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_date_greater(self):
# Selecting cell values from all rows that have date greater than 2002.
logical_form = """(select_string (filter_date_greater all_rows date_column:year
(date 2002 -1 -1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ["usl_first_division"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_greater all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_greater_equals(self):
# Counting rows that have attendance greater than or equal to the min value of attendance.
logical_form = """(count (filter_number_greater_equals all_rows number_column:avg_attendance
(min_number all_rows number_column:avg_attendance)))"""
count_result = self.language.execute(logical_form)
assert count_result == 2
# Replacing the filter value with an invalid value.
logical_form = (
"""(count (filter_number_greater all_rows number_column:avg_attendance all_rows))"""
)
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
# Replacing the filter value with an invalid value.
logical_form = """(count (filter_number_greater all_rows number_column:avg_attendance
string:usl_a_league))"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_date_greater_equals(self):
# Selecting cell values from all rows that have date greater than or equal to 2005 February
# 1st.
logical_form = """(select_string (filter_date_greater_equals all_rows date_column:year
(date 2005 2 1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ["usl_first_division"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_greater_equals all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_lesser(self):
# Selecting cell values from all rows that have date lesser than 2005.
logical_form = """(select_string (filter_number_lesser all_rows number_column:avg_attendance
(max_number all_rows number_column:avg_attendance)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ["usl_first_division"]
def test_execute_works_with_filter_date_lesser(self):
# Selecting cell values from all rows that have date less that 2005 January
logical_form = """(select_string (filter_date_lesser all_rows date_column:year
(date 2005 1 -1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ["usl_a_league"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_lesser all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_lesser_equals(self):
# Counting rows that have year lesser than or equal to 2005.
logical_form = (
"""(count (filter_number_lesser_equals all_rows number_column:avg_attendance 8000))"""
)
count_result = self.language.execute(logical_form)
assert count_result == 2
def test_execute_works_with_filter_date_lesser_equals(self):
# Selecting cell values from all rows that have date less that or equal to 2001 February 23
logical_form = """(select_string (filter_date_lesser_equals all_rows date_column:year
(date 2001 2 23)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ["usl_a_league"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_lesser_equals all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_equals(self):
# Counting rows that have year equal to 2010.
logical_form = (
"""(count (filter_number_equals all_rows number_column:avg_attendance 8000))"""
)
count_result = self.language.execute(logical_form)
assert count_result == 0
def test_execute_works_with_filter_date_equals(self):
# Selecting cell values from all rows that have date not equal to 2001
logical_form = """(select_string (filter_date_equals all_rows date_column:year
(date 2001 -1 -1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ["usl_a_league"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_equals all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_not_equals(self):
# Counting rows that have year not equal to 2010.
logical_form = (
"""(count (filter_number_not_equals all_rows number_column:avg_attendance 8000))"""
)
count_result = self.language.execute(logical_form)
assert count_result == 2
def test_execute_works_with_filter_date_not_equals(self):
# Selecting cell values from all rows that have date not equal to 2001
logical_form = """(select_string (filter_date_not_equals all_rows date_column:year
(date 2001 -1 -1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ["usl_first_division"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_not_equals all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_in(self):
# Selecting "regular season" from rows that have "did not qualify" in "open cup" column.
logical_form = """(select_string (filter_in all_rows string_column:open_cup string:did_not_qualify)
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["4th_western"]
def test_execute_works_with_select_nested_in_filter_in(self):
logical_form = """(filter_in all_rows string_column:regular_season (select_string (first all_rows)
string_column:regular_season))"""
row_list = self.language.execute(logical_form)
assert row_list == self.language.execute("(first all_rows)")
def test_execute_works_with_filter_not_in(self):
# Selecting "regular season" from rows that do not have "did not qualify" in "open cup" column.
logical_form = """(select_string (filter_not_in all_rows string_column:open_cup string:did_not_qualify)
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["5th"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_not_in all_rows string_column:open_cup 2000)
string_column:regular_season)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_first(self):
# Selecting "regular season" from the first row.
logical_form = """(select_string (first all_rows) string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["4th_western"]
def test_execute_logs_warning_with_first_on_empty_list(self, caplog):
# Selecting "regular season" from the first row where year is greater than 2010.
logical_form = """(select_string (first (filter_date_greater all_rows date_column:year
(date 2010 -1 -1)))
string_column:regular_season)"""
self.language.execute(logical_form)
assert "Trying to get first row from an empty list" in caplog.text
def test_execute_works_with_last(self):
# Selecting "regular season" from the last row where year is not equal to 2010.
logical_form = """(select_string (last (filter_date_not_equals all_rows date_column:year
(date 2010 -1 -1)))
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["5th"]
def test_execute_logs_warning_with_last_on_empty_list(self, caplog):
# Selecting "regular season" from the last row where year is greater than 2010.
logical_form = """(select_string (last (filter_date_greater all_rows date_column:year
(date 2010 -1 -1)))
string_column:regular_season)"""
self.language.execute(logical_form)
assert "Trying to get last row from an empty list" in caplog.text
def test_execute_works_with_previous(self):
# Selecting "regular season" from the row before last where year is not equal to 2010.
logical_form = """(select_string (previous (last (filter_date_not_equals
all_rows date_column:year (date 2010 -1 -1))))
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["4th_western"]
def test_execute_works_with_next(self):
# Selecting "regular season" from the row after first where year is not equal to 2010.
logical_form = """(select_string (next (first (filter_date_not_equals
all_rows date_column:year (date 2010 -1 -1))))
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["5th"]
def test_execute_works_with_max_date(self):
logical_form = """(max_date all_rows date_column:year)"""
cell_list = self.language.execute(logical_form)
assert str(cell_list) == "2005"
def test_execute_works_with_min_date(self):
logical_form = """(min_date all_rows date_column:year)"""
cell_list = self.language.execute(logical_form)
assert str(cell_list) == "2001"
def test_execute_works_with_mode_number(self):
# Most frequent division value.
logical_form = """(mode_number all_rows number_column:division)"""
cell_list = self.language.execute(logical_form)
assert cell_list == 2.0
logical_form = """(mode_number
(filter_in all_rows string_column:league string:a_league)
number_column:division)"""
cell_list = self.language.execute(logical_form)
assert cell_list == 2.0
def test_execute_works_with_mode_string(self):
logical_form = """(mode_string all_rows string_column:league)"""
cell_list = self.language.execute(logical_form)
# Returns the string values with frequency 1 (which is the max frequency)
assert cell_list == ["usl_a_league", "usl_first_division"]
def test_execute_works_with_mode_date(self):
logical_form = """(mode_date all_rows date_column:year)"""
cell_list = self.language.execute(logical_form)
assert str(cell_list) == "2001"
def test_execute_works_with_same_as(self):
# Select the "league" from all the rows that have the same value under "playoffs" as the
# row that has the string "a league" under "league".
logical_form = """(select_string (same_as (filter_in all_rows string_column:league string:a_league)
string_column:playoffs)
string_column:league)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["usl_a_league", "usl_first_division"]
def test_execute_works_with_sum(self):
# Get total "avg attendance".
logical_form = """(sum all_rows number_column:avg_attendance)"""
sum_value = self.language.execute(logical_form)
assert sum_value == 13197
# Total "avg attendance" where "playoffs" has "quarterfinals"
logical_form = """(sum (filter_in all_rows string_column:playoffs string:quarterfinals)
number_column:avg_attendance)"""
sum_value = self.language.execute(logical_form)
assert sum_value == 13197
def test_execute_works_with_average(self):
# Get average "avg attendance".
logical_form = """(average all_rows number_column:avg_attendance)"""
avg_value = self.language.execute(logical_form)
assert avg_value == 6598.5
# Average "avg attendance" where "playoffs" has "quarterfinals"
logical_form = """(average (filter_in all_rows string_column:playoffs string:quarterfinals)
number_column:avg_attendance)"""
avg_value = self.language.execute(logical_form)
assert avg_value == 6598.5
def test_execute_works_with_diff(self):
# Difference in "avg attendance" between rows with "usl_a_league" and "usl_first_division"
# in "league" columns.
logical_form = """(diff (filter_in all_rows string_column:league string:usl_a_league)
(filter_in all_rows string_column:league string:usl_first_division)
number_column:avg_attendance)"""
avg_value = self.language.execute(logical_form)
assert avg_value == 1141
def test_execute_fails_with_diff_on_non_numerical_columns(self):
logical_form = """(diff (filter_in all_rows string_column:league string:usl_a_league)
(filter_in all_rows string_column:league string:usl_first_division)
string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_number_comparison_works(self):
# TableQuestionContext normlaizes all strings according to some rules. We want to ensure
# that the original numerical values of number cells is being correctly processed here.
tokens = SpacyTokenizer().tokenize("when was the attendance the highest?")
tagged_file = self.FIXTURES_ROOT / "data" / "corenlp_processed_tables" / "TEST-2.table"
language = self._get_world_with_question_tokens_and_table_file(tokens, tagged_file)
result = language.execute(
"(select_date (argmax all_rows number_column:attendance) date_column:date)"
)
assert result == Date(-1, 11, 10)
def test_evaluate_logical_form(self):
logical_form = """(select_string (same_as (filter_in all_rows string_column:league string:a_league)
string_column:playoffs)
string_column:league)"""
assert self.language.evaluate_logical_form(
logical_form, ["USL A-League", "USL First Division"]
)
def test_evaluate_logical_form_with_invalid_logical_form(self):
logical_form = """(select_string (same_as (filter_in all_rows string_column:league INVALID_CONSTANT)
string_column:playoffs)
string_column:league)"""
assert not self.language.evaluate_logical_form(
logical_form, ["USL A-League", "USL First Division"]
)
def test_get_nonterminal_productions_all_column_types(self):
# This test is long, but worth it. These are all of the valid actions in the grammar, and
# we want to be sure they are what we expect.
productions = self.language.get_nonterminal_productions()
assert set(productions.keys()) == {
"@start@",
"<List[Row],StringColumn:List[str]>",
"<List[Row],DateColumn:Date>",
"<List[Row],NumberColumn,Number:List[Row]>",
"<List[Row],ComparableColumn:List[Row]>",
"<List[Row],Column:List[Row]>",
"<List[Row],List[Row],NumberColumn:Number>",
"<List[Row],StringColumn,List[str]:List[Row]>",
"<Number,Number,Number:Date>",
"<List[Row],DateColumn,Date:List[Row]>",
"<List[Row],NumberColumn:Number>",
"<List[Row]:List[Row]>",
"<List[Row],StringColumn:List[str]>",
"<List[Row]:Number>",
"List[str]",
"List[Row]",
"Date",
"Number",
"StringColumn",
"NumberColumn",
"ComparableColumn",
"Column",
"DateColumn",
"List[str]",
}
check_productions_match(productions["@start@"], ["Date", "Number", "List[str]"])
check_productions_match(
productions["<List[Row],StringColumn:List[str]>"], ["select_string", "mode_string"]
)
check_productions_match(
productions["<List[Row],DateColumn:Date>"],
["select_date", "max_date", "min_date", "mode_date"],
)
check_productions_match(
productions["<List[Row],NumberColumn,Number:List[Row]>"],
[
"filter_number_equals",
"filter_number_greater",
"filter_number_greater_equals",
"filter_number_lesser",
"filter_number_lesser_equals",
"filter_number_not_equals",
],
)
check_productions_match(
productions["<List[Row],ComparableColumn:List[Row]>"], ["argmax", "argmin"]
)
check_productions_match(productions["<List[Row],Column:List[Row]>"], ["same_as"])
check_productions_match(productions["<List[Row],List[Row],NumberColumn:Number>"], ["diff"])
check_productions_match(
productions["<List[Row],StringColumn,List[str]:List[Row]>"],
["filter_in", "filter_not_in"],
)
check_productions_match(productions["<Number,Number,Number:Date>"], ["date"])
check_productions_match(
productions["<List[Row],DateColumn,Date:List[Row]>"],
[
"filter_date_equals",
"filter_date_greater",
"filter_date_greater_equals",
"filter_date_lesser",
"filter_date_lesser_equals",
"filter_date_not_equals",
],
)
check_productions_match(
productions["<List[Row],NumberColumn:Number>"],
["average", "max_number", "min_number", "sum", "select_number", "mode_number"],
)
check_productions_match(
productions["<List[Row]:List[Row]>"], ["first", "last", "next", "previous"]
)
check_productions_match(productions["<List[Row]:Number>"], ["count"])
check_productions_match(
productions["List[Row]"],
[
"all_rows",
"[<List[Row],Column:List[Row]>, List[Row], Column]",
"[<List[Row],DateColumn,Date:List[Row]>, List[Row], DateColumn, Date]",
"[<List[Row],ComparableColumn:List[Row]>, List[Row], ComparableColumn]",
"[<List[Row],NumberColumn,Number:List[Row]>, List[Row], NumberColumn, Number]",
"[<List[Row],StringColumn,List[str]:List[Row]>, List[Row], StringColumn, List[str]]",
"[<List[Row]:List[Row]>, List[Row]]",
],
)
check_productions_match(
productions["Date"],
[
"[<Number,Number,Number:Date>, Number, Number, Number]",
"[<List[Row],DateColumn:Date>, List[Row], DateColumn]",
],
)
# Some of the number productions are instance-specific, and some of them are from the
# grammar.
check_productions_match(
productions["Number"],
[
"2001",
"2002",
"2005",
"2010",
"2013",
"-1",
"1",
"2",
"23",
"8000",
"[<List[Row],NumberColumn:Number>, List[Row], NumberColumn]",
"[<List[Row],List[Row],NumberColumn:Number>, List[Row], List[Row], NumberColumn]",
"[<List[Row]:Number>, List[Row]]",
],
)
# These are the columns in table, and are instance specific.
check_productions_match(
productions["StringColumn"],
[
"string_column:league",
"string_column:playoffs",
"string_column:open_cup",
"string_column:year",
"string_column:division",
"string_column:avg_attendance",
"string_column:regular_season",
],
)
check_productions_match(productions["DateColumn"], ["date_column:year"])
check_productions_match(
productions["NumberColumn"],
[
"number_column:avg_attendance",
"number_column:open_cup",
"number_column:regular_season",
"number_column:division",
"number_column:year",
],
)
check_productions_match(
productions["ComparableColumn"],
[
"date_column:year",
"number_column:avg_attendance",
"number_column:open_cup",
"number_column:regular_season",
"number_column:division",
"number_column:year",
],
)
check_productions_match(
productions["Column"],
[
"string_column:league",
"string_column:playoffs",
"string_column:open_cup",
"string_column:year",
"string_column:division",
"string_column:avg_attendance",
"string_column:regular_season",
"date_column:year",
"number_column:avg_attendance",
"number_column:open_cup",
"number_column:regular_season",
"number_column:division",
"number_column:year",
],
)
# Strings come from the question - any span in the question that shows up as a cell in the
# table is a valid string production.
check_productions_match(
productions["List[str]"],
[
"string:quarterfinals",
"string:did_not_qualify",
"string:a_league",
"string:usl_first_division",
"string:usl_a_league",
"string:1",
"string:2",
"string:2005",
"string:2001",
"[<List[Row],StringColumn:List[str]>, List[Row], StringColumn]",
],
)
def test_world_processes_logical_forms_correctly(self):
logical_form = (
"(select_date (filter_in all_rows string_column:league string:usl_a_league)"
" date_column:year)"
)
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.action_sequence_to_logical_form(action_sequence) == logical_form
def test_world_gets_correct_actions(self):
logical_form = """(select_date (filter_in all_rows string_column:league string:usl_a_league)
date_column:year)"""
expected_sequence = [
"@start@ -> Date",
"Date -> [<List[Row],DateColumn:Date>, List[Row], DateColumn]",
"<List[Row],DateColumn:Date> -> select_date",
"List[Row] -> [<List[Row],StringColumn,List[str]:List[Row]>, List[Row], StringColumn, List[str]]",
"<List[Row],StringColumn,List[str]:List[Row]> -> filter_in",
"List[Row] -> all_rows",
"StringColumn -> string_column:league",
"List[str] -> string:usl_a_league",
"DateColumn -> date_column:year",
]
assert self.language.logical_form_to_action_sequence(logical_form) == expected_sequence
def test_world_processes_logical_forms_with_number_correctly(self):
logical_form = (
"(select_date (filter_number_greater all_rows number_column:avg_attendance 8000) "
"date_column:year)"
)
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.action_sequence_to_logical_form(action_sequence) == logical_form
def test_world_processes_logical_forms_with_date_correctly(self):
logical_form = (
"(select_date (filter_date_greater all_rows date_column:year (date 2013 -1 -1)) "
"date_column:year)"
)
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.action_sequence_to_logical_form(action_sequence) == logical_form
def test_world_processes_logical_forms_with_generic_function_correctly(self):
logical_form = "(select_string (argmax all_rows date_column:year) string_column:league)"
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.action_sequence_to_logical_form(action_sequence) == logical_form
def test_get_agenda(self):
tokens = [
Token(x)
for x in [
"what",
"was",
"the",
"difference",
"in",
"attendance",
"between",
"years",
"2001",
"and",
"2005",
"?",
]
]
world = self._get_world_with_question_tokens(tokens)
# "year" column does not match because "years" occurs in the question.
assert set(world.get_agenda()) == {
"Number -> 2001",
"Number -> 2005",
"List[str] -> string:2005",
"List[str] -> string:2001",
"<List[Row],DateColumn,Date:List[Row]> -> filter_date_equals",
"<List[Row],List[Row],NumberColumn:Number> -> diff",
}
# Conservative agenda does not have strings and numbers because they have multiple types.
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],List[Row],NumberColumn:Number> -> diff",
"<List[Row],DateColumn,Date:List[Row]> -> filter_date_equals",
}
tokens = [
Token(x)
for x in [
"what",
"was",
"the",
"total",
"avg.",
"attendance",
"in",
"years",
"2001",
"and",
"2005",
"?",
]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"Number -> 2001",
"Number -> 2005",
"List[str] -> string:2005",
"List[str] -> string:2001",
"<List[Row],NumberColumn:Number> -> sum",
"<List[Row],DateColumn,Date:List[Row]> -> filter_date_equals",
"StringColumn -> string_column:avg_attendance",
"NumberColumn -> number_column:avg_attendance",
}
# Conservative disallows "sum" for the question word "total" too.
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],DateColumn,Date:List[Row]> -> filter_date_equals"
}
tokens = [Token(x) for x in ["what", "was", "the", "average", "avg.", "attendance", "?"]]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],NumberColumn:Number> -> average",
"StringColumn -> string_column:avg_attendance",
"NumberColumn -> number_column:avg_attendance",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],NumberColumn:Number> -> average"
}
tokens = [Token(x) for x in ["what", "was", "the", "largest", "avg.", "attendance", "?"]]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],ComparableColumn:List[Row]> -> argmax",
"StringColumn -> string_column:avg_attendance",
"NumberColumn -> number_column:avg_attendance",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],ComparableColumn:List[Row]> -> argmax"
}
tokens = [Token(x) for x in ["when", "was", "the", "least", "avg.", "attendance", "?"]]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin",
"StringColumn -> string_column:avg_attendance",
"<List[Row],DateColumn:Date> -> select_date",
"NumberColumn -> number_column:avg_attendance",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin",
"<List[Row],DateColumn:Date> -> select_date",
}
tokens = [
Token(x)
for x in [
"what",
"was",
"the",
"attendance",
"after",
"the",
"time",
"with",
"the",
"least",
"avg.",
"attendance",
"?",
]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin",
"StringColumn -> string_column:avg_attendance",
"<List[Row]:List[Row]> -> next",
"NumberColumn -> number_column:avg_attendance",
}
# conservative disallows "after" mapping to "next"
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin"
}
tokens = [
Token(x)
for x in [
"what",
"was",
"the",
"attendance",
"below",
"the",
"row",
"with",
"the",
"least",
"avg.",
"attendance",
"?",
]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin",
"StringColumn -> string_column:avg_attendance",
"<List[Row]:List[Row]> -> next",
"NumberColumn -> number_column:avg_attendance",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin",
"<List[Row]:List[Row]> -> next",
}
tokens = [
Token(x)
for x in [
"what",
"was",
"the",
"attendance",
"before",
"the",
"time",
"with",
"the",
"least",
"avg.",
"attendance",
"?",
]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin",
"StringColumn -> string_column:avg_attendance",
"<List[Row]:List[Row]> -> previous",
"NumberColumn -> number_column:avg_attendance",
}
# conservative disallows "before" mapping to "previous"
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin"
}
tokens = [
Token(x)
for x in [
"what",
"was",
"the",
"attendance",
"above",
"the",
"row",
"with",
"the",
"least",
"avg.",
"attendance",
"?",
]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin",
"StringColumn -> string_column:avg_attendance",
"<List[Row]:List[Row]> -> previous",
"NumberColumn -> number_column:avg_attendance",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],ComparableColumn:List[Row]> -> argmin",
"<List[Row]:List[Row]> -> previous",
}
tokens = [
Token(x)
for x in [
"when",
"was",
"the",
"avg.",
"attendance",
"same",
"as",
"when",
"the",
"league",
"was",
"usl",
"a",
"league",
"?",
]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"StringColumn -> string_column:avg_attendance",
"NumberColumn -> number_column:avg_attendance",
"StringColumn -> string_column:league",
"List[str] -> string:usl_a_league",
"<List[Row],Column:List[Row]> -> same_as",
"<List[Row],DateColumn:Date> -> select_date",
}
assert set(world.get_agenda(conservative=True)) == {
"StringColumn -> string_column:league",
"List[str] -> string:usl_a_league",
"<List[Row],Column:List[Row]> -> same_as",
"<List[Row],DateColumn:Date> -> select_date",
}
tokens = [Token(x) for x in ["what", "is", "the", "least", "avg.", "attendance", "?"]]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],NumberColumn:Number> -> min_number",
"StringColumn -> string_column:avg_attendance",
"NumberColumn -> number_column:avg_attendance",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],NumberColumn:Number> -> min_number"
}
tokens = [Token(x) for x in ["when", "did", "the", "team", "not", "qualify", "?"]]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],DateColumn:Date> -> select_date",
"List[str] -> string:qualify",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],DateColumn:Date> -> select_date",
"List[str] -> string:qualify",
}
tokens = [
Token(x)
for x in ["when", "was", "the", "avg.", "attendance", "at", "least", "7000", "?"]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],NumberColumn,Number:List[Row]> -> filter_number_greater_equals",
"<List[Row],DateColumn:Date> -> select_date",
"NumberColumn -> number_column:avg_attendance",
"StringColumn -> string_column:avg_attendance",
"Number -> 7000",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],NumberColumn,Number:List[Row]> -> filter_number_greater_equals",
"<List[Row],DateColumn:Date> -> select_date",
"Number -> 7000",
}
tokens = [
Token(x)
for x in ["when", "was", "the", "avg.", "attendance", "more", "than", "7000", "?"]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],NumberColumn,Number:List[Row]> -> filter_number_greater",
"<List[Row],DateColumn:Date> -> select_date",
"NumberColumn -> number_column:avg_attendance",
"StringColumn -> string_column:avg_attendance",
"Number -> 7000",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],NumberColumn,Number:List[Row]> -> filter_number_greater",
"<List[Row],DateColumn:Date> -> select_date",
"Number -> 7000",
}
tokens = [
Token(x)
for x in ["when", "was", "the", "avg.", "attendance", "at", "most", "7000", "?"]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],NumberColumn,Number:List[Row]> -> filter_number_lesser_equals",
"<List[Row],DateColumn:Date> -> select_date",
"NumberColumn -> number_column:avg_attendance",
"StringColumn -> string_column:avg_attendance",
"Number -> 7000",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],NumberColumn,Number:List[Row]> -> filter_number_lesser_equals",
"<List[Row],DateColumn:Date> -> select_date",
"Number -> 7000",
}
tokens = [
Token(x)
for x in ["when", "was", "the", "avg.", "attendance", "no", "more", "than", "7000", "?"]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row],NumberColumn,Number:List[Row]> -> filter_number_lesser_equals",
"<List[Row],DateColumn:Date> -> select_date",
"NumberColumn -> number_column:avg_attendance",
"StringColumn -> string_column:avg_attendance",
"Number -> 7000",
}
assert set(world.get_agenda(conservative=True)) == {
"<List[Row],NumberColumn,Number:List[Row]> -> filter_number_lesser_equals",
"<List[Row],DateColumn:Date> -> select_date",
"Number -> 7000",
}
tokens = [Token(x) for x in ["what", "was", "the", "top", "year", "?"]]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row]:List[Row]> -> first",
"StringColumn -> string_column:year",
"NumberColumn -> number_column:year",
"DateColumn -> date_column:year",
}
assert set(world.get_agenda(conservative=True)) == {"<List[Row]:List[Row]> -> first"}
tokens = [
Token(x) for x in ["what", "was", "the", "year", "in", "the", "bottom", "row", "?"]
]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
"<List[Row]:List[Row]> -> last",
"StringColumn -> string_column:year",
"NumberColumn -> number_column:year",
"DateColumn -> date_column:year",
}
assert set(world.get_agenda(conservative=True)) == {"<List[Row]:List[Row]> -> last"}
| allennlp-semparse-master | tests/domain_languages/wikitables_language_test.py |
from typing import Callable, List
import pytest
from .. import SemparseTestCase
from allennlp_semparse.common import ExecutionError, ParsingError
from allennlp_semparse import DomainLanguage, predicate, predicate_with_side_args
class Arithmetic(DomainLanguage):
def __init__(
self, allow_function_currying: bool = False, allow_function_composition: bool = False
):
super().__init__(
start_types={int},
allowed_constants={
# We unfortunately have to explicitly enumerate all allowed constants in the
# grammar. Because we'll be inducing a grammar for this language for use with a
# semantic parser, we need the grammar to be finite, which means we can't allow
# arbitrary constants (you can't parameterize an infinite categorical
# distribution). So our Arithmetic language will have to only operate on simple
# numbers.
"1": 1,
"2": 2,
"3": 3,
"4": 4,
"5": 5,
"6": 6,
"7": 7,
"8": 8,
"9": 9,
"10": 10,
"20": 20,
"-5": -5,
"-2": -2,
},
allow_function_currying=allow_function_currying,
allow_function_composition=allow_function_composition,
)
@predicate
def add(self, num1: int, num2: int) -> int:
return num1 + num2
@predicate
def sum(self, numbers: List[int]) -> int:
return sum(numbers)
# Unfortunately, to make lists, we need to have some function with a fixed number of list
# elements that we can predict. No variable number of arguments - that gives us an infinite
# number of production rules in our grammar.
@predicate
def list1(self, num1: int) -> List[int]:
return [num1]
@predicate
def list2(self, num1: int, num2: int) -> List[int]:
return [num1, num2]
@predicate
def list3(self, num1: int, num2: int, num3: int) -> List[int]:
return [num1, num2, num3]
@predicate
def list4(self, num1: int, num2: int, num3: int, num4: int) -> List[int]:
return [num1, num2, num3, num4]
@predicate
def subtract(self, num1: int, num2: int) -> int:
return num1 - num2
@predicate
def power(self, num1: int, num2: int) -> int:
return num1**num2
@predicate
def multiply(self, num1: int, num2: int) -> int:
return num1 * num2
@predicate
def divide(self, num1: int, num2: int) -> int:
return num1 // num2
@predicate
def halve(self, num1: int) -> int:
return num1 // 2
@predicate
def three(self) -> int:
return 3
@predicate
def three_less(self, function: Callable[[int, int], int]) -> Callable[[int, int], int]:
"""
Wraps a function into a new function that always returns three less than what the original
function would. Totally senseless function that's just here to test higher-order
functions.
"""
def new_function(num1: int, num2: int) -> int:
return function(num1, num2) - 3
return new_function
@predicate
def append(self, list_: List[int], num: int) -> List[int]:
return list_ + [num]
def not_a_predicate(self) -> int:
return 5
def check_productions_match(actual_rules: List[str], expected_right_sides: List[str]):
actual_right_sides = [rule.split(" -> ")[1] for rule in actual_rules]
assert set(actual_right_sides) == set(expected_right_sides)
class TestDomainLanguage(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.language = Arithmetic()
self.curried_language = Arithmetic(
allow_function_currying=True, allow_function_composition=True
)
def test_constant_logical_form(self):
assert self.language.execute("5") == 5
assert self.language.execute("2") == 2
assert self.language.execute("20") == 20
assert self.language.execute("3") == 3
with pytest.raises(ExecutionError, match="Unrecognized constant"):
self.language.execute('"add"')
def test_error_message_with_wrong_arguments(self):
with pytest.raises(ExecutionError):
self.language.execute("(add)")
with pytest.raises(ExecutionError):
self.language.execute("(add 2)")
def test_not_all_functions_are_predicates(self):
# This should not execute to 5, but instead be treated as a constant.
with pytest.raises(ExecutionError, match="Unrecognized constant"):
self.language.execute("not_a_predicate")
def test_basic_logical_form(self):
assert self.language.execute("three") == 3
assert self.language.execute("(add 2 3)") == 5
assert self.language.execute("(subtract 2 3)") == -1
assert self.language.execute("(halve 20)") == 10
def test_list_types(self):
assert self.language.execute("(sum (list1 2))") == 2
assert self.language.execute("(sum (list2 2 3))") == 5
assert self.language.execute("(sum (list4 2 10 -2 -5))") == 5
assert self.language.execute("(sum (list4 2 three (halve 4) (add -5 -2)))") == 0
def test_nested_logical_form(self):
assert self.language.execute("(add 2 (subtract 4 2))") == 4
assert self.language.execute("(halve (multiply (divide 9 3) (power 2 3)))") == 12
def test_higher_order_logical_form(self):
assert self.language.execute("((three_less add) 2 (subtract 4 2))") == 1
def test_execute_action_sequence(self):
# Repeats tests from above, but using `execute_action_sequence` instead of `execute`.
logical_form = "(add 2 (subtract 4 2))"
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.execute_action_sequence(action_sequence) == 4
logical_form = "(halve (multiply (divide 9 3) (power 2 3)))"
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.execute_action_sequence(action_sequence) == 12
logical_form = "((three_less add) 2 (subtract 4 2))"
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.execute_action_sequence(action_sequence) == 1
logical_form = "((three_less add) three (subtract 4 2))"
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.execute_action_sequence(action_sequence) == 2
def test_execute_function_composition(self):
assert self.curried_language.execute("((* halve halve) 8)") == 2
assert self.curried_language.execute("((* sum list1) 8)") == 8
assert self.curried_language.execute("(multiply 4 ((* sum list1) 6))") == 24
assert self.curried_language.execute("(halve ((* halve halve) 8))") == 1
assert self.curried_language.execute("((* (* halve halve) (three_less multiply)) 2 4)") == 1
def test_execute_function_currying(self):
assert self.curried_language.execute("((multiply 3) 6)") == 18
assert self.curried_language.execute("(sum ((list2 1) 7))") == 8
assert self.curried_language.execute("((append 3) (list1 2))") == [2, 3]
assert self.curried_language.execute("((append (list1 4)) 6)") == [4, 6]
assert self.curried_language.execute("((list3 1 2) 3)") == [1, 2, 3]
def test_execute_action_sequence_function_composition(self):
# Repeats tests from above, but using `execute_action_sequence` instead of `execute`.
logical_form = "((* halve halve) 8)"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
assert self.curried_language.execute_action_sequence(action_sequence) == 2
logical_form = "((* sum list1) 8)"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
assert self.curried_language.execute_action_sequence(action_sequence) == 8
logical_form = "(multiply 4 ((* sum list1) 6))"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
assert self.curried_language.execute_action_sequence(action_sequence) == 24
logical_form = "(halve ((* halve halve) 8))"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
assert self.curried_language.execute_action_sequence(action_sequence) == 1
def test_execute_action_sequence_function_currying(self):
# Repeats tests from above, but using `execute_action_sequence` instead of `execute`.
logical_form = "((multiply 3) 6)"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
assert self.curried_language.execute_action_sequence(action_sequence) == 18
logical_form = "(sum ((list3 1 2) 7))"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
assert self.curried_language.execute_action_sequence(action_sequence) == 10
def test_currying_composed_functions(self):
# Testing all of our operations (conversion and execution) for currying composed functions.
logical_form = "(((* sum list3) 1 2) 7)"
action_sequence = [
"@start@ -> int",
"int -> [<int:int>, int]",
"<int:int> -> [<int,int,int:int>, int, int]",
"<int,int,int:int> -> [*, <List[int]:int>, <int,int,int:List[int]>]",
"<List[int]:int> -> sum",
"<int,int,int:List[int]> -> list3",
"int -> 1",
"int -> 2",
"int -> 7",
]
generated_logical_form = self.curried_language.action_sequence_to_logical_form(
action_sequence
)
assert generated_logical_form == logical_form
generated_action_sequence = self.curried_language.logical_form_to_action_sequence(
logical_form
)
assert generated_action_sequence == action_sequence
assert self.curried_language.execute(logical_form) == 10
assert self.curried_language.execute_action_sequence(action_sequence) == 10
def test_get_nonterminal_productions(self):
valid_actions = self.language.get_nonterminal_productions()
assert set(valid_actions.keys()) == {
"@start@",
"int",
"List[int]",
"<int:int>",
"<int,int:int>",
"<List[int]:int>",
"<List[int],int:List[int]>",
"<int:List[int]>",
"<int,int:List[int]>",
"<int,int,int:List[int]>",
"<int,int,int,int:List[int]>",
"<<int,int:int>:<int,int:int>>",
}
check_productions_match(valid_actions["@start@"], ["int"])
check_productions_match(
valid_actions["int"],
[
"[<int,int:int>, int, int]",
"[<int:int>, int]",
"[<List[int]:int>, List[int]]",
"three",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"20",
"-5",
"-2",
],
)
check_productions_match(
valid_actions["List[int]"],
[
"[<int:List[int]>, int]",
"[<int,int:List[int]>, int, int]",
"[<int,int,int:List[int]>, int, int, int]",
"[<int,int,int,int:List[int]>, int, int, int, int]",
"[<List[int],int:List[int]>, List[int], int]",
],
)
check_productions_match(valid_actions["<int:int>"], ["halve"])
check_productions_match(
valid_actions["<int,int:int>"],
[
"[<<int,int:int>:<int,int:int>>, <int,int:int>]",
"add",
"subtract",
"multiply",
"divide",
"power",
],
)
check_productions_match(valid_actions["<List[int],int:List[int]>"], ["append"])
check_productions_match(valid_actions["<List[int]:int>"], ["sum"])
check_productions_match(valid_actions["<int:List[int]>"], ["list1"])
check_productions_match(valid_actions["<int,int:List[int]>"], ["list2"])
check_productions_match(valid_actions["<int,int,int:List[int]>"], ["list3"])
check_productions_match(valid_actions["<int,int,int,int:List[int]>"], ["list4"])
check_productions_match(valid_actions["<<int,int:int>:<int,int:int>>"], ["three_less"])
def test_get_nonterminal_productions_curried_language_and_function_composition(self):
valid_actions = self.curried_language.get_nonterminal_productions()
assert set(valid_actions.keys()) == {
"@start@",
"int",
"List[int]",
"<int:int>",
"<int,int:int>",
"<List[int]:int>",
"<int:List[int]>",
"<int,int:List[int]>",
"<int,int,int:List[int]>",
"<int,int,int,int:List[int]>",
"<List[int],int:List[int]>",
"<<int,int:int>:<int,int:int>>",
# Types induced by allowing function composition
"<List[int]:List[int]>", # also induced from currying
"<int,int,int,int:int>",
"<int,int,int:int>",
"<List[int],int:int>",
}
check_productions_match(valid_actions["@start@"], ["int"])
check_productions_match(
valid_actions["int"],
[
"[<int,int:int>, int, int]",
"[<int:int>, int]",
"[<List[int]:int>, List[int]]",
"three",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"20",
"-5",
"-2",
],
)
check_productions_match(
valid_actions["List[int]"],
[
"[<int:List[int]>, int]",
"[<int,int:List[int]>, int, int]",
"[<int,int,int:List[int]>, int, int, int]",
"[<int,int,int,int:List[int]>, int, int, int, int]",
"[<List[int],int:List[int]>, List[int], int]",
],
)
check_productions_match(
valid_actions["<int:int>"],
[
"halve",
# Production due to function composition
"[*, <int:int>, <int:int>]",
"[*, <List[int]:int>, <int:List[int]>]",
# Production due to function currying
"[<int,int:int>, int]",
],
)
check_productions_match(
valid_actions["<int,int:int>"],
[
"[<<int,int:int>:<int,int:int>>, <int,int:int>]",
"add",
"subtract",
"multiply",
"divide",
"power",
# Production due to function composition
"[*, <int:int>, <int,int:int>]",
"[*, <List[int]:int>, <int,int:List[int]>]",
],
)
check_productions_match(
valid_actions["<List[int]:int>"],
[
"sum",
# Production due to function composition
"[*, <int:int>, <List[int]:int>]",
"[*, <List[int]:int>, <List[int]:List[int]>]",
],
)
check_productions_match(
valid_actions["<int:List[int]>"],
[
"list1",
# Production due to function composition
"[*, <int:List[int]>, <int:int>]",
"[*, <List[int]:List[int]>, <int:List[int]>]",
# Production due to function currying
"[<List[int],int:List[int]>, List[int]]",
"[<int,int:List[int]>, int]",
"[<int,int,int:List[int]>, int, int]",
"[<int,int,int,int:List[int]>, int, int, int]",
],
)
check_productions_match(
valid_actions["<List[int],int:List[int]>"],
["append", "[*, <List[int]:List[int]>, <List[int],int:List[int]>]"],
)
check_productions_match(
valid_actions["<int,int:List[int]>"],
[
"list2",
"[*, <int:List[int]>, <int,int:int>]",
"[*, <List[int]:List[int]>, <int,int:List[int]>]",
],
)
check_productions_match(
valid_actions["<int,int,int:List[int]>"],
["list3", "[*, <List[int]:List[int]>, <int,int,int:List[int]>]"],
)
check_productions_match(
valid_actions["<int,int,int,int:List[int]>"],
["list4", "[*, <List[int]:List[int]>, <int,int,int,int:List[int]>]"],
)
check_productions_match(
valid_actions["<<int,int:int>:<int,int:int>>"],
["three_less", "[*, <<int,int:int>:<int,int:int>>, <<int,int:int>:<int,int:int>>]"],
)
# Production due to function composition
check_productions_match(
valid_actions["<List[int]:List[int]>"],
[
"[*, <int:List[int]>, <List[int]:int>]",
"[*, <List[int]:List[int]>, <List[int]:List[int]>]",
"[<List[int],int:List[int]>, int]",
],
)
check_productions_match(
valid_actions["<int,int,int,int:int>"],
[
"[*, <List[int]:int>, <int,int,int,int:List[int]>]",
],
)
check_productions_match(
valid_actions["<int,int,int:int>"],
[
"[*, <List[int]:int>, <int,int,int:List[int]>]",
],
)
check_productions_match(
valid_actions["<List[int],int:int>"],
[
"[*, <List[int]:int>, <List[int],int:List[int]>]",
],
)
def test_logical_form_to_action_sequence(self):
action_sequence = self.language.logical_form_to_action_sequence("(add 2 3)")
assert action_sequence == [
"@start@ -> int",
"int -> [<int,int:int>, int, int]",
"<int,int:int> -> add",
"int -> 2",
"int -> 3",
]
action_sequence = self.language.logical_form_to_action_sequence(
"(halve (subtract 8 three))"
)
assert action_sequence == [
"@start@ -> int",
"int -> [<int:int>, int]",
"<int:int> -> halve",
"int -> [<int,int:int>, int, int]",
"<int,int:int> -> subtract",
"int -> 8",
"int -> three",
]
logical_form = "(halve (multiply (divide 9 three) (power 2 3)))"
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert action_sequence == [
"@start@ -> int",
"int -> [<int:int>, int]",
"<int:int> -> halve",
"int -> [<int,int:int>, int, int]",
"<int,int:int> -> multiply",
"int -> [<int,int:int>, int, int]",
"<int,int:int> -> divide",
"int -> 9",
"int -> three",
"int -> [<int,int:int>, int, int]",
"<int,int:int> -> power",
"int -> 2",
"int -> 3",
]
def test_logical_form_to_action_sequence_with_higher_order_functions(self):
action_sequence = self.language.logical_form_to_action_sequence("((three_less add) 2 3)")
assert action_sequence == [
"@start@ -> int",
"int -> [<int,int:int>, int, int]",
"<int,int:int> -> [<<int,int:int>:<int,int:int>>, <int,int:int>]",
"<<int,int:int>:<int,int:int>> -> three_less",
"<int,int:int> -> add",
"int -> 2",
"int -> 3",
]
def test_logical_form_to_action_sequence_with_function_composition(self):
action_sequence = self.curried_language.logical_form_to_action_sequence(
"((* halve halve) 8)"
)
assert action_sequence == [
"@start@ -> int",
"int -> [<int:int>, int]",
"<int:int> -> [*, <int:int>, <int:int>]",
"<int:int> -> halve",
"<int:int> -> halve",
"int -> 8",
]
action_sequence = self.curried_language.logical_form_to_action_sequence("((* sum list1) 8)")
assert action_sequence == [
"@start@ -> int",
"int -> [<int:int>, int]",
"<int:int> -> [*, <List[int]:int>, <int:List[int]>]",
"<List[int]:int> -> sum",
"<int:List[int]> -> list1",
"int -> 8",
]
# Trying a mix of regular composition and function-composition
action_sequence = self.curried_language.logical_form_to_action_sequence(
"(halve ((* halve halve) 8))"
)
assert action_sequence == [
"@start@ -> int",
"int -> [<int:int>, int]",
"<int:int> -> halve",
"int -> [<int:int>, int]",
"<int:int> -> [*, <int:int>, <int:int>]",
"<int:int> -> halve",
"<int:int> -> halve",
"int -> 8",
]
# Idea is to execute multiply(4, sum(list3(2, 4))(6)) where list3 is curried and then
# composed with sum
action_sequence = self.curried_language.logical_form_to_action_sequence(
"(multiply 4 ((* sum (list3 2 4)) 6))"
)
assert action_sequence == [
"@start@ -> int",
"int -> [<int,int:int>, int, int]",
"<int,int:int> -> multiply",
"int -> 4",
"int -> [<int:int>, int]",
"<int:int> -> [*, <List[int]:int>, <int:List[int]>]",
"<List[int]:int> -> sum",
"<int:List[int]> -> [<int,int,int:List[int]>, int, int]",
"<int,int,int:List[int]> -> list3",
"int -> 2",
"int -> 4",
"int -> 6",
]
action_sequence = self.curried_language.logical_form_to_action_sequence(
"((* (* halve halve) (three_less multiply)) 2 4)"
)
assert action_sequence == [
"@start@ -> int",
"int -> [<int,int:int>, int, int]",
"<int,int:int> -> [*, <int:int>, <int,int:int>]",
"<int:int> -> [*, <int:int>, <int:int>]",
"<int:int> -> halve",
"<int:int> -> halve",
"<int,int:int> -> [<<int,int:int>:<int,int:int>>, <int,int:int>]",
"<<int,int:int>:<int,int:int>> -> three_less",
"<int,int:int> -> multiply",
"int -> 2",
"int -> 4",
]
def test_logical_form_to_action_sequence_with_function_currying(self):
action_sequence = self.curried_language.logical_form_to_action_sequence("((multiply 3) 6)")
assert action_sequence == [
"@start@ -> int",
"int -> [<int:int>, int]",
"<int:int> -> [<int,int:int>, int]",
"<int,int:int> -> multiply",
"int -> 3",
"int -> 6",
]
action_sequence = self.curried_language.logical_form_to_action_sequence(
"(sum ((list3 1 2) 7))"
)
assert action_sequence == [
"@start@ -> int",
"int -> [<List[int]:int>, List[int]]",
"<List[int]:int> -> sum",
"List[int] -> [<int:List[int]>, int]",
"<int:List[int]> -> [<int,int,int:List[int]>, int, int]",
"<int,int,int:List[int]> -> list3",
"int -> 1",
"int -> 2",
"int -> 7",
]
def test_action_sequence_to_logical_form(self):
logical_form = "(add 2 3)"
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
recovered_logical_form = self.language.action_sequence_to_logical_form(action_sequence)
assert recovered_logical_form == logical_form
logical_form = "(halve (multiply (divide 9 three) (power 2 3)))"
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
recovered_logical_form = self.language.action_sequence_to_logical_form(action_sequence)
assert recovered_logical_form == logical_form
logical_form = "((three_less add) 2 3)"
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
recovered_logical_form = self.language.action_sequence_to_logical_form(action_sequence)
assert recovered_logical_form == logical_form
def test_action_sequence_to_logical_form_with_function_composition(self):
logical_form = "((* halve halve) 8)"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
recovered_logical_form = self.curried_language.action_sequence_to_logical_form(
action_sequence
)
assert recovered_logical_form == logical_form
logical_form = "((* sum list1) 8)"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
recovered_logical_form = self.curried_language.action_sequence_to_logical_form(
action_sequence
)
assert recovered_logical_form == logical_form
logical_form = "(multiply 4 ((* sum list1) 6))"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
recovered_logical_form = self.curried_language.action_sequence_to_logical_form(
action_sequence
)
assert recovered_logical_form == logical_form
logical_form = "(halve ((* halve halve) 8))"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
recovered_logical_form = self.curried_language.action_sequence_to_logical_form(
action_sequence
)
assert recovered_logical_form == logical_form
def test_action_sequence_to_logical_form_with_function_currying(self):
logical_form = "((multiply 3) 6)"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
recovered_logical_form = self.curried_language.action_sequence_to_logical_form(
action_sequence
)
assert recovered_logical_form == logical_form
logical_form = "(sum ((list3 1 2) 7))"
action_sequence = self.curried_language.logical_form_to_action_sequence(logical_form)
recovered_logical_form = self.curried_language.action_sequence_to_logical_form(
action_sequence
)
assert recovered_logical_form == logical_form
def test_logical_form_parsing_fails_on_bad_inputs(self):
# We don't catch all type inconsistencies in the code, but we _do_ catch some. If we add
# more that we catch, this is a good place to test for them.
with pytest.raises(ParsingError, match="Wrong number of arguments"):
self.language.logical_form_to_action_sequence("(halve 2 3)")
with pytest.raises(ParsingError, match="Wrong number of arguments"):
self.language.logical_form_to_action_sequence("(add 3)")
with pytest.raises(ParsingError, match="unallowed start type"):
self.language.logical_form_to_action_sequence("add")
with pytest.raises(ParsingError, match="Zero-arg function or constant"):
self.language.logical_form_to_action_sequence("(sum (3 2))")
with pytest.raises(ParsingError, match="did not have expected type"):
self.language.logical_form_to_action_sequence("(sum (add 2 3))")
def test_execution_with_side_arguments(self):
class SideArgumentLanguage(DomainLanguage):
def __init__(self) -> None:
super().__init__(start_types={int}, allowed_constants={"1": 1, "2": 2, "3": 3})
@predicate_with_side_args(["num2"])
def add(self, num1: int, num2: int) -> int:
return num1 + num2
@predicate_with_side_args(["num"])
def current_number(self, num: int) -> int:
return num
language = SideArgumentLanguage()
# (add 1)
action_sequence = [
"@start@ -> int",
"int -> [<int:int>, int]",
"<int:int> -> add",
"int -> 1",
]
# For each action in the action sequence, we pass state. We only actually _use_ the state
# when the action we've predicted at that step needs the state. In this case, the third
# action will get {'num2': 3} passed to the `add()` function.
state = [{"num2": 1}, {"num2": 2}, {"num2": 3}, {"num2": 4}]
assert language.execute_action_sequence(action_sequence, state) == 4
# (add current_number)
action_sequence = [
"@start@ -> int",
"int -> [<int:int>, int]",
"<int:int> -> add",
"int -> current_number",
]
state = [
{"num2": 1, "num": 5},
{"num2": 2, "num": 6},
{"num2": 3, "num": 7},
{"num2": 4, "num": 8},
]
assert language.execute_action_sequence(action_sequence, state) == 11
| allennlp-semparse-master | tests/domain_languages/domain_language_test.py |
allennlp-semparse-master | tests/domain_languages/__init__.py |
|
import json
from .. import SemparseTestCase
from allennlp_semparse.domain_languages import NlvrLanguage
from allennlp_semparse.domain_languages.nlvr_language import Box
class TestNlvrLanguage(SemparseTestCase):
def setup_method(self):
super().setup_method()
test_filename = self.FIXTURES_ROOT / "data" / "nlvr" / "sample_ungrouped_data.jsonl"
data = [json.loads(line)["structured_rep"] for line in open(test_filename).readlines()]
box_lists = [
[Box(object_reps, i) for i, object_reps in enumerate(box_rep)] for box_rep in data
]
self.languages = [NlvrLanguage(boxes) for boxes in box_lists]
# y_loc increases as we go down from top to bottom, and x_loc from left to right. That is,
# the origin is at the top-left corner.
custom_rep = [
[
{"y_loc": 79, "size": 20, "type": "triangle", "x_loc": 27, "color": "Yellow"},
{"y_loc": 55, "size": 10, "type": "circle", "x_loc": 47, "color": "Black"},
],
[
{"y_loc": 44, "size": 30, "type": "square", "x_loc": 10, "color": "#0099ff"},
{"y_loc": 74, "size": 30, "type": "square", "x_loc": 40, "color": "Yellow"},
],
[{"y_loc": 60, "size": 10, "type": "triangle", "x_loc": 12, "color": "#0099ff"}],
]
self.custom_language = NlvrLanguage(
[Box(object_rep, i) for i, object_rep in enumerate(custom_rep)]
)
def test_logical_form_with_assert_executes_correctly(self):
executor = self.languages[0]
# Utterance is "There is a circle closely touching a corner of a box." and label is "True".
logical_form_true = "(object_count_greater_equals (touch_corner (circle (all_objects))) 1)"
assert executor.execute(logical_form_true) is True
logical_form_false = "(object_count_equals (touch_corner (circle (all_objects))) 9)"
assert executor.execute(logical_form_false) is False
def test_logical_form_with_box_filter_executes_correctly(self):
executor = self.languages[2]
# Utterance is "There is a box without a blue item." and label is "False".
logical_form = "(box_exists (member_color_none_equals all_boxes color_blue))"
assert executor.execute(logical_form) is False
def test_logical_form_with_box_filter_within_object_filter_executes_correctly(self):
executor = self.languages[2]
# Utterance is "There are at least three blue items in boxes with blue items" and label
# is "True".
logical_form = "(object_count_greater_equals \
(object_in_box (member_color_any_equals all_boxes color_blue)) 3)"
assert executor.execute(logical_form) is True
def test_logical_form_with_same_color_executes_correctly(self):
executor = self.languages[1]
# Utterance is "There are exactly two blocks of the same color." and label is "True".
logical_form = "(object_count_equals (same_color all_objects) 2)"
assert executor.execute(logical_form) is True
def test_logical_form_with_same_shape_executes_correctly(self):
executor = self.languages[0]
# Utterance is "There are less than three black objects of the same shape" and label is "False".
logical_form = "(object_count_lesser (same_shape (black (all_objects))) 3)"
assert executor.execute(logical_form) is False
def test_logical_form_with_touch_wall_executes_correctly(self):
executor = self.languages[0]
# Utterance is "There are two black circles touching a wall" and label is "False".
logical_form = "(object_count_greater_equals (touch_wall (black (circle (all_objects)))) 2)"
assert executor.execute(logical_form) is False
def test_logical_form_with_not_executes_correctly(self):
executor = self.languages[2]
# Utterance is "There are at most two medium triangles not touching a wall." and label is "True".
logical_form = (
"(object_count_lesser_equals ((negate_filter touch_wall) "
"(medium (triangle (all_objects)))) 2)"
)
assert executor.execute(logical_form) is True
def test_logical_form_with_color_comparison_executes_correctly(self):
executor = self.languages[0]
# Utterance is "The color of the circle touching the wall is black." and label is "True".
logical_form = "(object_color_all_equals (circle (touch_wall (all_objects))) color_black)"
assert executor.execute(logical_form) is True
def test_spatial_relations_return_objects_in_the_same_box(self):
# "above", "below", "top", "bottom" are relations defined only for objects within the same
# box. So they should not return objects from other boxes.
# Asserting that the color of the objects above the yellow triangle is only black (it is not
# yellow or blue, which are colors of objects from other boxes)
assert (
self.custom_language.execute(
"(object_color_all_equals (above (yellow (triangle all_objects)))" " color_black)"
)
is True
)
# Asserting that the only shape below the blue square is a square.
assert (
self.custom_language.execute(
"(object_shape_all_equals (below (blue (square all_objects)))" " shape_square)"
)
is True
)
# Asserting the shape of the object at the bottom in the box with a circle is triangle.
logical_form = (
"(object_shape_all_equals (bottom (object_in_box"
" (member_shape_any_equals all_boxes shape_circle))) shape_triangle)"
)
assert self.custom_language.execute(logical_form) is True
# Asserting the shape of the object at the top of the box with all squares is a square (!).
logical_form = (
"(object_shape_all_equals (top (object_in_box"
" (member_shape_all_equals all_boxes shape_square))) shape_square)"
)
assert self.custom_language.execute(logical_form) is True
def test_touch_object_executes_correctly(self):
# Assert that there is a yellow square touching a blue square.
assert (
self.custom_language.execute(
"(object_exists (yellow (square (touch_object (blue " "(square all_objects))))))"
)
is True
)
# Assert that the triangle does not touch the circle (they are out of vertical range).
assert (
self.custom_language.execute(
"(object_shape_none_equals (touch_object (triangle all_objects))" " shape_circle)"
)
is True
)
def test_spatial_relations_with_objects_from_different_boxes(self):
# When the objects are from different boxes, top and bottom should return objects from
# respective boxes.
# There are triangles in two boxes, so top should return the top objects from both boxes.
assert (
self.custom_language.execute(
"(object_count_equals (top (object_in_box (member_shape_any_equals "
"all_boxes shape_triangle))) 2)"
)
is True
)
def test_same_and_different_execute_correctly(self):
# All the objects in the box with two objects of the same shape are squares.
assert (
self.custom_language.execute(
"(object_shape_all_equals "
"(object_in_box (member_shape_same (member_count_equals all_boxes 2)))"
" shape_square)"
)
is True
)
# There is a circle in the box with objects of different shapes.
assert (
self.custom_language.execute(
"(object_shape_any_equals (object_in_box "
"(member_shape_different all_boxes)) shape_circle)"
)
is True
)
def test_get_action_sequence_handles_multi_arg_functions(self):
language = self.languages[0]
# box_color_filter
logical_form = "(box_exists (member_color_all_equals all_boxes color_blue))"
action_sequence = language.logical_form_to_action_sequence(logical_form)
assert "Set[Box] -> [<Set[Box],Color:Set[Box]>, Set[Box], Color]" in action_sequence
# box_shape_filter
logical_form = "(box_exists (member_shape_all_equals all_boxes shape_square))"
action_sequence = language.logical_form_to_action_sequence(logical_form)
assert "Set[Box] -> [<Set[Box],Shape:Set[Box]>, Set[Box], Shape]" in action_sequence
# box_count_filter
logical_form = "(box_exists (member_count_equals all_boxes 3))"
action_sequence = language.logical_form_to_action_sequence(logical_form)
assert "Set[Box] -> [<Set[Box],int:Set[Box]>, Set[Box], int]" in action_sequence
# assert_color
logical_form = "(object_color_all_equals all_objects color_blue)"
action_sequence = language.logical_form_to_action_sequence(logical_form)
assert "bool -> [<Set[Object],Color:bool>, Set[Object], Color]" in action_sequence
# assert_shape
logical_form = "(object_shape_all_equals all_objects shape_square)"
action_sequence = language.logical_form_to_action_sequence(logical_form)
assert "bool -> [<Set[Object],Shape:bool>, Set[Object], Shape]" in action_sequence
# assert_box_count
logical_form = "(box_count_equals all_boxes 1)"
action_sequence = language.logical_form_to_action_sequence(logical_form)
assert "bool -> [<Set[Box],int:bool>, Set[Box], int]" in action_sequence
# assert_object_count
logical_form = "(object_count_equals all_objects 1)"
action_sequence = language.logical_form_to_action_sequence(logical_form)
assert "bool -> [<Set[Object],int:bool>, Set[Object], int]" in action_sequence
def test_logical_form_with_object_filter_returns_correct_action_sequence(self):
language = self.languages[0]
logical_form = "(object_color_all_equals (circle (touch_wall all_objects)) color_black)"
action_sequence = language.logical_form_to_action_sequence(logical_form)
assert action_sequence == [
"@start@ -> bool",
"bool -> [<Set[Object],Color:bool>, Set[Object], Color]",
"<Set[Object],Color:bool> -> object_color_all_equals",
"Set[Object] -> [<Set[Object]:Set[Object]>, Set[Object]]",
"<Set[Object]:Set[Object]> -> circle",
"Set[Object] -> [<Set[Object]:Set[Object]>, Set[Object]]",
"<Set[Object]:Set[Object]> -> touch_wall",
"Set[Object] -> all_objects",
"Color -> color_black",
]
def test_logical_form_with_negate_filter_returns_correct_action_sequence(self):
language = self.languages[0]
logical_form = "(object_exists ((negate_filter touch_wall) all_objects))"
action_sequence = language.logical_form_to_action_sequence(logical_form)
negate_filter_production = (
"<Set[Object]:Set[Object]> -> "
"[<<Set[Object]:Set[Object]>:<Set[Object]:Set[Object]>>, "
"<Set[Object]:Set[Object]>]"
)
assert action_sequence == [
"@start@ -> bool",
"bool -> [<Set[Object]:bool>, Set[Object]]",
"<Set[Object]:bool> -> object_exists",
"Set[Object] -> [<Set[Object]:Set[Object]>, Set[Object]]",
negate_filter_production,
"<<Set[Object]:Set[Object]>:<Set[Object]:Set[Object]>> -> negate_filter",
"<Set[Object]:Set[Object]> -> touch_wall",
"Set[Object] -> all_objects",
]
def test_logical_form_with_box_filter_returns_correct_action_sequence(self):
language = self.languages[0]
logical_form = "(box_exists (member_color_none_equals all_boxes color_blue))"
action_sequence = language.logical_form_to_action_sequence(logical_form)
assert action_sequence == [
"@start@ -> bool",
"bool -> [<Set[Box]:bool>, Set[Box]]",
"<Set[Box]:bool> -> box_exists",
"Set[Box] -> [<Set[Box],Color:Set[Box]>, Set[Box], Color]",
"<Set[Box],Color:Set[Box]> -> member_color_none_equals",
"Set[Box] -> all_boxes",
"Color -> color_blue",
]
def test_get_agenda_for_sentence(self):
language = self.languages[0]
agenda = language.get_agenda_for_sentence("there is a tower with exactly two yellow blocks")
assert set(agenda) == set(
["Color -> color_yellow", "<Set[Box]:bool> -> box_exists", "int -> 2"]
)
agenda = language.get_agenda_for_sentence(
"There is at most one yellow item closely touching " "the bottom of a box."
)
assert set(agenda) == set(
[
"<Set[Object]:Set[Object]> -> yellow",
"<Set[Object]:Set[Object]> -> touch_bottom",
"int -> 1",
]
)
agenda = language.get_agenda_for_sentence(
"There is at most one yellow item closely touching " "the right wall of a box."
)
assert set(agenda) == set(
[
"<Set[Object]:Set[Object]> -> yellow",
"<Set[Object]:Set[Object]> -> touch_right",
"int -> 1",
]
)
agenda = language.get_agenda_for_sentence(
"There is at most one yellow item closely touching " "the left wall of a box."
)
assert set(agenda) == set(
[
"<Set[Object]:Set[Object]> -> yellow",
"<Set[Object]:Set[Object]> -> touch_left",
"int -> 1",
]
)
agenda = language.get_agenda_for_sentence(
"There is at most one yellow item closely touching " "a wall of a box."
)
assert set(agenda) == set(
[
"<Set[Object]:Set[Object]> -> yellow",
"<Set[Object]:Set[Object]> -> touch_wall",
"int -> 1",
]
)
agenda = language.get_agenda_for_sentence("There is exactly one square touching any edge")
assert set(agenda) == set(
[
"<Set[Object]:Set[Object]> -> square",
"<Set[Object]:Set[Object]> -> touch_wall",
"int -> 1",
]
)
agenda = language.get_agenda_for_sentence(
"There is exactly one square not touching any edge"
)
assert set(agenda) == set(
[
"<Set[Object]:Set[Object]> -> square",
"<Set[Object]:Set[Object]> -> touch_wall",
"int -> 1",
"<<Set[Object]:Set[Object]>:<Set[Object]:Set[Object]>> -> negate_filter",
]
)
agenda = language.get_agenda_for_sentence(
"There is only 1 tower with 1 blue block at the base"
)
assert set(agenda) == set(
[
"<Set[Object]:Set[Object]> -> blue",
"int -> 1",
"<Set[Object]:Set[Object]> -> bottom",
"int -> 1",
]
)
agenda = language.get_agenda_for_sentence(
"There is only 1 tower that has 1 blue block at the top"
)
assert set(agenda) == set(
[
"<Set[Object]:Set[Object]> -> blue",
"int -> 1",
"<Set[Object]:Set[Object]> -> top",
"int -> 1",
"Set[Box] -> all_boxes",
]
)
agenda = language.get_agenda_for_sentence(
"There is exactly one square touching the blue " "triangle"
)
assert set(agenda) == set(
[
"<Set[Object]:Set[Object]> -> square",
"<Set[Object]:Set[Object]> -> blue",
"<Set[Object]:Set[Object]> -> triangle",
"<Set[Object]:Set[Object]> -> touch_object",
"int -> 1",
]
)
def test_get_agenda_for_sentence_correctly_adds_object_filters(self):
# In logical forms that contain "box_exists" at the top, there can never be object filtering
# operations like "blue", "square" etc. In those cases, strings like "blue" and "square" in
# sentences should map to "color_blue" and "shape_square" respectively.
language = self.languages[0]
agenda = language.get_agenda_for_sentence(
"there is a box with exactly two yellow triangles " "touching the top edge"
)
assert "<Set[Object]:Set[Object]> -> yellow" not in agenda
assert "Color -> color_yellow" in agenda
assert "<Set[Object]:Set[Object]> -> triangle" not in agenda
assert "Shape -> shape_triangle" in agenda
assert "<Set[Object]:Set[Object]> -> touch_top" not in agenda
agenda = language.get_agenda_for_sentence(
"there are exactly two yellow triangles touching the" " top edge"
)
assert "<Set[Object]:Set[Object]> -> yellow" in agenda
assert "Color -> color_yellow" not in agenda
assert "<Set[Object]:Set[Object]> -> triangle" in agenda
assert "Shape -> shape_triangle" not in agenda
assert "<Set[Object]:Set[Object]> -> touch_top" in agenda
| allennlp-semparse-master | tests/domain_languages/nlvr_language_test.py |
allennlp-semparse-master | tests/fields/__init__.py |
|
from collections import defaultdict
from numpy.testing import assert_almost_equal
from .. import SemparseTestCase
from allennlp.data import Vocabulary
from allennlp.data.fields import ListField
from allennlp_semparse.fields import ProductionRuleField
class TestProductionRuleField(SemparseTestCase):
def setup_method(self):
super(TestProductionRuleField, self).setup_method()
self.vocab = Vocabulary()
self.s_rule_index = self.vocab.add_token_to_namespace(
"S -> [NP, VP]", namespace="rule_labels"
)
self.np_index = self.vocab.add_token_to_namespace("NP -> test", namespace="rule_labels")
def test_field_counts_vocab_items_correctly(self):
field = ProductionRuleField("S -> [NP, VP]", is_global_rule=True)
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["rule_labels"]["S -> [NP, VP]"] == 1
field = ProductionRuleField("S -> [NP, VP]", is_global_rule=False)
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["rule_labels"]["S -> [NP, VP]"] == 0
def test_index_converts_field_correctly(self):
field = ProductionRuleField("S -> [NP, VP]", is_global_rule=True)
field.index(self.vocab)
assert field._rule_id == self.s_rule_index
def test_padding_lengths_are_computed_correctly(self):
field = ProductionRuleField("S -> [NP, VP]", is_global_rule=True)
field.index(self.vocab)
assert field.get_padding_lengths() == {}
def test_as_tensor_produces_correct_output(self):
field = ProductionRuleField("S -> [NP, VP]", is_global_rule=True)
field.index(self.vocab)
tensor_tuple = field.as_tensor(field.get_padding_lengths())
assert isinstance(tensor_tuple, tuple)
assert len(tensor_tuple) == 4
assert tensor_tuple[0] == "S -> [NP, VP]"
assert tensor_tuple[1] is True
assert_almost_equal(tensor_tuple[2].detach().cpu().numpy(), [self.s_rule_index])
field = ProductionRuleField("S -> [NP, VP]", is_global_rule=False)
field.index(self.vocab)
tensor_tuple = field.as_tensor(field.get_padding_lengths())
assert isinstance(tensor_tuple, tuple)
assert len(tensor_tuple) == 4
assert tensor_tuple[0] == "S -> [NP, VP]"
assert tensor_tuple[1] is False
assert tensor_tuple[2] is None
def test_batch_tensors_does_not_modify_list(self):
field = ProductionRuleField("S -> [NP, VP]", is_global_rule=True)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict1 = field.as_tensor(padding_lengths)
field = ProductionRuleField("NP -> test", is_global_rule=True)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict2 = field.as_tensor(padding_lengths)
tensor_list = [tensor_dict1, tensor_dict2]
assert field.batch_tensors(tensor_list) == tensor_list
def test_doubly_nested_field_works(self):
field1 = ProductionRuleField("S -> [NP, VP]", is_global_rule=True)
field2 = ProductionRuleField("NP -> test", is_global_rule=True)
field3 = ProductionRuleField("VP -> eat", is_global_rule=False)
list_field = ListField([ListField([field1, field2, field3]), ListField([field1, field2])])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
tensors = list_field.as_tensor(padding_lengths)
assert isinstance(tensors, list)
assert len(tensors) == 2
assert isinstance(tensors[0], list)
assert len(tensors[0]) == 3
assert isinstance(tensors[1], list)
assert len(tensors[1]) == 3
tensor_tuple = tensors[0][0]
assert tensor_tuple[0] == "S -> [NP, VP]"
assert tensor_tuple[1] is True
assert_almost_equal(tensor_tuple[2].detach().cpu().numpy(), [self.s_rule_index])
tensor_tuple = tensors[0][1]
assert tensor_tuple[0] == "NP -> test"
assert tensor_tuple[1] is True
assert_almost_equal(tensor_tuple[2].detach().cpu().numpy(), [self.np_index])
tensor_tuple = tensors[0][2]
assert tensor_tuple[0] == "VP -> eat"
assert tensor_tuple[1] is False
assert tensor_tuple[2] is None
tensor_tuple = tensors[1][0]
assert tensor_tuple[0] == "S -> [NP, VP]"
assert tensor_tuple[1] is True
assert_almost_equal(tensor_tuple[2].detach().cpu().numpy(), [self.s_rule_index])
tensor_tuple = tensors[1][1]
assert tensor_tuple[0] == "NP -> test"
assert tensor_tuple[1] is True
assert_almost_equal(tensor_tuple[2].detach().cpu().numpy(), [self.np_index])
# This item was just padding.
tensor_tuple = tensors[1][2]
assert tensor_tuple[0] == ""
assert tensor_tuple[1] is False
assert tensor_tuple[2] is None
def test_production_rule_field_can_print(self):
field = ProductionRuleField("S -> [NP, VP]", is_global_rule=True)
print(field)
| allennlp-semparse-master | tests/fields/production_rule_field_test.py |
from collections import defaultdict
import pytest
from numpy.testing import assert_almost_equal
import torch
from .. import SemparseTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer
from allennlp.data.tokenizers import SpacyTokenizer
from allennlp_semparse.common.wikitables import TableQuestionContext
from allennlp_semparse.fields import KnowledgeGraphField
class TestKnowledgeGraphField(SemparseTestCase):
def setup_method(self):
self.tokenizer = SpacyTokenizer(pos_tags=True)
self.utterance = self.tokenizer.tokenize("where is mersin?")
self.token_indexers = {"tokens": SingleIdTokenIndexer("tokens")}
table_file = self.FIXTURES_ROOT / "data" / "wikitables" / "tables" / "341.tagged"
self.graph = TableQuestionContext.read_from_file(
table_file, self.utterance
).get_table_knowledge_graph()
self.vocab = Vocabulary()
self.name_index = self.vocab.add_token_to_namespace("name", namespace="tokens")
self.in_index = self.vocab.add_token_to_namespace("in", namespace="tokens")
self.english_index = self.vocab.add_token_to_namespace("english", namespace="tokens")
self.location_index = self.vocab.add_token_to_namespace("location", namespace="tokens")
self.mersin_index = self.vocab.add_token_to_namespace("mersin", namespace="tokens")
self.oov_index = self.vocab.get_token_index("random OOV string", namespace="tokens")
self.edirne_index = self.oov_index
self.field = KnowledgeGraphField(
self.graph, self.utterance, self.token_indexers, self.tokenizer
)
super().setup_method()
def test_count_vocab_items(self):
namespace_token_counts = defaultdict(lambda: defaultdict(int))
self.field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["tokens"] == {
"name": 1,
"in": 2,
"english": 2,
"location": 1,
"mersin": 1,
}
def test_get_padding_lengths_raises_if_not_indexed(self):
with pytest.raises(ConfigurationError):
self.field.get_padding_lengths()
def test_padding_lengths_are_computed_correctly(self):
self.field.index(self.vocab)
assert self.field.get_padding_lengths() == {
"num_entities": 3,
"num_utterance_tokens": 4,
"num_fields": 3,
"list_tokens___tokens": 3,
}
self.field._token_indexers["token_characters"] = TokenCharactersIndexer(
min_padding_length=1
)
self.field.index(self.vocab)
assert self.field.get_padding_lengths() == {
"num_entities": 3,
"num_utterance_tokens": 4,
"num_fields": 3,
"list_tokens___tokens": 3,
"list_token_characters___token_characters": 3,
"list_token_characters___num_token_characters": 8,
}
def test_as_tensor_produces_correct_output(self):
self.field.index(self.vocab)
padding_lengths = self.field.get_padding_lengths()
padding_lengths["num_utterance_tokens"] += 1
padding_lengths["num_entities"] += 1
padding_lengths["num_fields"] += 1
tensor_dict = self.field.as_tensor(padding_lengths)
assert tensor_dict.keys() == {"text", "linking"}
expected_text_tensor = [
[self.mersin_index, 0, 0],
[self.location_index, self.in_index, self.english_index],
[self.name_index, self.in_index, self.english_index],
[0, 0, 0],
]
assert_almost_equal(
tensor_dict["text"]["tokens"]["tokens"].detach().cpu().numpy(), expected_text_tensor
)
linking_tensor = tensor_dict["linking"].detach().cpu().numpy()
expected_linking_tensor = [
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # string:mersin, "where"
[0, 0, 0, 0, 0, -1.5, 0, 0, 0, 0], # string:mersin, "is"
[0, 1, 1, 1, 1, 1, 0, 0, 1, 1], # string:mersin, "mersin"
[0, 0, 0, 0, 0, -5, 0, 0, 0, 0], # string:mersin, "?"
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
], # string:mersin, padding
[
[0, 0, 0, 0, 0, -2.6, 0, 0, 0, 0], # string_column:name_in_english, "where"
[0, 0, 0, 0, 0, -7.5, 0, 0, 0, 0], # string_column:name_in_english, "is"
[0, 0, 0, 0, 0, -1.8333, 1, 1, 0, 0], # string_column:..in_english, "mersin"
[0, 0, 0, 0, 0, -18, 0, 0, 0, 0], # string_column:name_in_english, "?"
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
], # string_column:name_in_english, padding
[
[0, 0, 0, 0, 0, -1.6, 0, 0, 0, 0], # string_..:location_in_english, "where"
[0, 0, 0, 0, 0, -5.5, 0, 0, 0, 0], # string_column:location_in_english, "is"
[0, 0, 0, 0, 0, -1, 0, 0, 0, 0], # string_column:location_in_english, "mersin"
[0, 0, 0, 0, 0, -14, 0, 0, 0, 0], # string_column:location_in_english, "?"
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
], # string_column:location_in_english, padding
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # padding, "where"
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # padding, "is"
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # padding, "mersin"
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # padding, "?"
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
] # padding, padding
for entity_index, entity_features in enumerate(expected_linking_tensor):
for question_index, feature_vector in enumerate(entity_features):
assert_almost_equal(
linking_tensor[entity_index, question_index],
feature_vector,
decimal=4,
err_msg=f"{entity_index} {question_index}",
)
def test_lemma_feature_extractor(self):
utterance = self.tokenizer.tokenize("Names in English")
field = KnowledgeGraphField(self.graph, self.utterance, self.token_indexers, self.tokenizer)
entity = "string_column:name_in_english"
lemma_feature = field._contains_lemma_match(
entity, field._entity_text_map[entity], utterance[0], 0, utterance
)
assert lemma_feature == 1
def test_span_overlap_fraction(self):
utterance = self.tokenizer.tokenize("what is the name in english of mersin?")
field = KnowledgeGraphField(self.graph, self.utterance, self.token_indexers, self.tokenizer)
entity = "string_column:name_in_english"
entity_text = field._entity_text_map[entity]
feature_values = [
field._span_overlap_fraction(entity, entity_text, token, i, utterance)
for i, token in enumerate(utterance)
]
assert feature_values == [0, 0, 0, 1, 1, 1, 0, 0, 0]
def test_batch_tensors(self):
self.field.index(self.vocab)
padding_lengths = self.field.get_padding_lengths()
tensor_dict1 = self.field.as_tensor(padding_lengths)
tensor_dict2 = self.field.as_tensor(padding_lengths)
batched_tensor_dict = self.field.batch_tensors([tensor_dict1, tensor_dict2])
assert batched_tensor_dict.keys() == {"text", "linking"}
expected_single_tensor = [
[self.mersin_index, 0, 0],
[self.location_index, self.in_index, self.english_index],
[self.name_index, self.in_index, self.english_index],
]
expected_batched_tensor = [expected_single_tensor, expected_single_tensor]
assert_almost_equal(
batched_tensor_dict["text"]["tokens"]["tokens"].detach().cpu().numpy(),
expected_batched_tensor,
)
expected_linking_tensor = torch.stack([tensor_dict1["linking"], tensor_dict2["linking"]])
assert_almost_equal(
batched_tensor_dict["linking"].detach().cpu().numpy(),
expected_linking_tensor.detach().cpu().numpy(),
)
def test_field_initialized_with_empty_constructor(self):
try:
self.field.empty_field()
except AssertionError as e:
pytest.fail(str(e), pytrace=True)
| allennlp-semparse-master | tests/fields/knowledge_graph_field_test.py |
allennlp-semparse-master | tests/nltk_languages/__init__.py |
|
allennlp-semparse-master | tests/nltk_languages/contexts/__init__.py |
|
from ... import SemparseTestCase
from allennlp_semparse.nltk_languages.worlds.world import World
class FakeWorldWithoutRecursion(World):
def all_possible_actions(self):
# The logical forms this grammar allows are
# (unary_function argument)
# (binary_function argument argument)
actions = [
"@start@ -> t",
"t -> [<e,t>, e]",
"<e,t> -> unary_function",
"<e,t> -> [<e,<e,t>>, e]",
"<e,<e,t>> -> binary_function",
"e -> argument",
]
return actions
class FakeWorldWithRecursion(FakeWorldWithoutRecursion):
def all_possible_actions(self):
# In addition to the forms allowed by ``FakeWorldWithoutRecursion``, this world allows
# (unary_function (identity .... (argument)))
# (binary_function (identity .... (argument)) (identity .... (argument)))
actions = super(FakeWorldWithRecursion, self).all_possible_actions()
actions.extend(["e -> [<e,e>, e]", "<e,e> -> identity"])
return actions
class TestWorld(SemparseTestCase):
def setup_method(self):
super().setup_method()
self.world_without_recursion = FakeWorldWithoutRecursion()
self.world_with_recursion = FakeWorldWithRecursion()
def test_get_paths_to_root_without_recursion(self):
argument_paths = self.world_without_recursion.get_paths_to_root("e -> argument")
assert argument_paths == [
["e -> argument", "t -> [<e,t>, e]", "@start@ -> t"],
["e -> argument", "<e,t> -> [<e,<e,t>>, e]", "t -> [<e,t>, e]", "@start@ -> t"],
]
unary_function_paths = self.world_without_recursion.get_paths_to_root(
"<e,t> -> unary_function"
)
assert unary_function_paths == [
["<e,t> -> unary_function", "t -> [<e,t>, e]", "@start@ -> t"]
]
binary_function_paths = self.world_without_recursion.get_paths_to_root(
"<e,<e,t>> -> binary_function"
)
assert binary_function_paths == [
[
"<e,<e,t>> -> binary_function",
"<e,t> -> [<e,<e,t>>, e]",
"t -> [<e,t>, e]",
"@start@ -> t",
]
]
def test_get_paths_to_root_with_recursion(self):
argument_paths = self.world_with_recursion.get_paths_to_root("e -> argument")
# Argument now has 4 paths, and the two new paths are with the identity function occurring
# (only once) within unary and binary functions.
assert argument_paths == [
["e -> argument", "t -> [<e,t>, e]", "@start@ -> t"],
["e -> argument", "<e,t> -> [<e,<e,t>>, e]", "t -> [<e,t>, e]", "@start@ -> t"],
["e -> argument", "e -> [<e,e>, e]", "t -> [<e,t>, e]", "@start@ -> t"],
[
"e -> argument",
"e -> [<e,e>, e]",
"<e,t> -> [<e,<e,t>>, e]",
"t -> [<e,t>, e]",
"@start@ -> t",
],
]
identity_paths = self.world_with_recursion.get_paths_to_root("<e,e> -> identity")
# Two identity paths, one through each of unary and binary function productions.
assert identity_paths == [
["<e,e> -> identity", "e -> [<e,e>, e]", "t -> [<e,t>, e]", "@start@ -> t"],
[
"<e,e> -> identity",
"e -> [<e,e>, e]",
"<e,t> -> [<e,<e,t>>, e]",
"t -> [<e,t>, e]",
"@start@ -> t",
],
]
| allennlp-semparse-master | tests/nltk_languages/worlds/world_test.py |
allennlp-semparse-master | tests/nltk_languages/worlds/__init__.py |
|
allennlp-semparse-master | tests/nltk_languages/type_declarations/__init__.py |
|
from ... import SemparseTestCase
from allennlp_semparse.nltk_languages.type_declarations import type_declaration as types
from allennlp_semparse.nltk_languages.type_declarations.type_declaration import (
ANY_TYPE,
BinaryOpType,
ComplexType,
NamedBasicType,
UnaryOpType,
)
ROW_TYPE = NamedBasicType("row")
CELL_TYPE = NamedBasicType("cell")
class TestTypeDeclaration(SemparseTestCase):
def test_basic_types_conflict_on_names(self):
type_a = NamedBasicType("A")
type_b = NamedBasicType("B")
assert type_a.resolve(type_b) is None
def test_unary_ops_resolve_correctly(self):
unary_type = UnaryOpType()
# Resolution should fail against a basic type
assert unary_type.resolve(ROW_TYPE) is None
# Resolution should fail against a complex type where the argument and return types are not same
assert unary_type.resolve(ComplexType(CELL_TYPE, ROW_TYPE)) is None
# Resolution should resolve ANY_TYPE given the other type
resolution = unary_type.resolve(ComplexType(ANY_TYPE, ROW_TYPE))
assert resolution == UnaryOpType(ROW_TYPE)
resolution = unary_type.resolve(ComplexType(CELL_TYPE, ANY_TYPE))
assert resolution == UnaryOpType(CELL_TYPE)
reverse_type = ComplexType(
ComplexType(CELL_TYPE, ROW_TYPE), ComplexType(CELL_TYPE, ROW_TYPE)
)
resolution = unary_type.resolve(reverse_type)
assert resolution == UnaryOpType(ComplexType(CELL_TYPE, ROW_TYPE))
def test_binary_ops_resolve_correctly(self):
binary_type = BinaryOpType()
# Resolution must fail against a basic type and a complex type that returns a basic type
assert binary_type.resolve(CELL_TYPE) is None
assert binary_type.resolve(ComplexType(CELL_TYPE, ROW_TYPE)) is None
# Resolution must fail against incompatible types
complex_type = ComplexType(ANY_TYPE, ComplexType(CELL_TYPE, ROW_TYPE))
assert binary_type.resolve(complex_type) is None
complex_type = ComplexType(ROW_TYPE, ComplexType(CELL_TYPE, ANY_TYPE))
assert binary_type.resolve(complex_type) is None
complex_type = ComplexType(ROW_TYPE, ComplexType(ANY_TYPE, CELL_TYPE))
assert binary_type.resolve(complex_type) is None
# Resolution must resolve any types appropriately
complex_type = ComplexType(ROW_TYPE, ComplexType(ANY_TYPE, ROW_TYPE))
assert binary_type.resolve(complex_type) == BinaryOpType(ROW_TYPE)
complex_type = ComplexType(ROW_TYPE, ComplexType(ANY_TYPE, ANY_TYPE))
assert binary_type.resolve(complex_type) == BinaryOpType(ROW_TYPE)
complex_type = ComplexType(ANY_TYPE, ComplexType(ROW_TYPE, ANY_TYPE))
assert binary_type.resolve(complex_type) == BinaryOpType(ROW_TYPE)
def test_get_valid_actions(self):
type_r = NamedBasicType("R")
type_d = NamedBasicType("D")
type_e = NamedBasicType("E")
name_mapping = {"sample_function": "F"}
# <e,<r,<d,r>>>
type_signatures = {
"F": ComplexType(type_e, ComplexType(type_r, ComplexType(type_d, type_r)))
}
basic_types = {type_r, type_d, type_e}
valid_actions = types.get_valid_actions(name_mapping, type_signatures, basic_types)
assert len(valid_actions) == 3
assert valid_actions["<e,<r,<d,r>>>"] == ["<e,<r,<d,r>>> -> sample_function"]
assert valid_actions["r"] == ["r -> [<e,<r,<d,r>>>, e, r, d]"]
assert valid_actions["@start@"] == ["@start@ -> d", "@start@ -> e", "@start@ -> r"]
def test_get_valid_actions_with_placeholder_type(self):
type_r = NamedBasicType("R")
type_d = NamedBasicType("D")
type_e = NamedBasicType("E")
name_mapping = {"sample_function": "F"}
# <#1,#1>
type_signatures = {"F": UnaryOpType()}
basic_types = {type_r, type_d, type_e}
valid_actions = types.get_valid_actions(name_mapping, type_signatures, basic_types)
assert len(valid_actions) == 5
assert valid_actions["<#1,#1>"] == ["<#1,#1> -> sample_function"]
assert valid_actions["e"] == ["e -> [<#1,#1>, e]"]
assert valid_actions["r"] == ["r -> [<#1,#1>, r]"]
assert valid_actions["d"] == ["d -> [<#1,#1>, d]"]
assert valid_actions["@start@"] == ["@start@ -> d", "@start@ -> e", "@start@ -> r"]
def test_get_valid_actions_with_any_type(self):
type_r = NamedBasicType("R")
type_d = NamedBasicType("D")
type_e = NamedBasicType("E")
name_mapping = {"sample_function": "F"}
# The purpose of this test is to ensure that ANY_TYPE gets substituted by every possible basic type,
# to simulate an intermediate step while getting actions for a placeholder type.
# I do not foresee defining a function type with ANY_TYPE. We should just use a ``PlaceholderType``
# instead.
# <?,r>
type_signatures = {"F": ComplexType(ANY_TYPE, type_r)}
basic_types = {type_r, type_d, type_e}
valid_actions = types.get_valid_actions(name_mapping, type_signatures, basic_types)
assert len(valid_actions) == 5
assert valid_actions["<d,r>"] == ["<d,r> -> sample_function"]
assert valid_actions["<e,r>"] == ["<e,r> -> sample_function"]
assert valid_actions["<r,r>"] == ["<r,r> -> sample_function"]
assert valid_actions["r"] == ["r -> [<d,r>, d]", "r -> [<e,r>, e]", "r -> [<r,r>, r]"]
assert valid_actions["@start@"] == ["@start@ -> d", "@start@ -> e", "@start@ -> r"]
| allennlp-semparse-master | tests/nltk_languages/type_declarations/type_declaration_test.py |
_MAJOR = "0"
_MINOR = "0"
_REVISION = "4"
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}".format(_MAJOR, _MINOR, _REVISION)
| allennlp-semparse-master | allennlp_semparse/version.py |
from allennlp_semparse.common.action_space_walker import ActionSpaceWalker
from allennlp_semparse.domain_languages.domain_language import (
DomainLanguage,
predicate,
predicate_with_side_args,
)
| allennlp-semparse-master | allennlp_semparse/__init__.py |
"""
Reader for WikitableQuestions (https://github.com/ppasupat/WikiTableQuestions/releases/tag/v1.0.2).
"""
import logging
from typing import Dict, List, Any
import os
import gzip
import tarfile
from allennlp.data import DatasetReader
from allennlp.data.fields import Field, TextField, MetadataField, ListField, IndexField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
from allennlp_semparse.common import ParsingError
from allennlp_semparse.common.wikitables import TableQuestionContext
from allennlp_semparse.domain_languages import WikiTablesLanguage
from allennlp_semparse.fields import KnowledgeGraphField, ProductionRuleField
logger = logging.getLogger(__name__)
def parse_example_line(lisp_string: str) -> Dict:
"""
Training data in WikitableQuestions comes with examples in the form of lisp strings in the format:
(example (id <example-id>)
(utterance <question>)
(context (graph tables.TableKnowledgeGraph <table-filename>))
(targetValue (list (description <answer1>) (description <answer2>) ...)))
We parse such strings and return the parsed information here.
"""
id_piece, rest = lisp_string.split(') (utterance "')
example_id = id_piece.split("(id ")[1]
question, rest = rest.split('") (context (graph tables.TableKnowledgeGraph ')
table_filename, rest = rest.split(")) (targetValue (list")
target_value_strings = rest.strip().split("(description")
target_values = []
for string in target_value_strings:
string = string.replace(")", "").replace('"', "").strip()
if string != "":
target_values.append(string)
return {
"id": example_id,
"question": question,
"table_filename": table_filename,
"target_values": target_values,
}
@DatasetReader.register("wikitables")
class WikiTablesDatasetReader(DatasetReader):
"""
This ``DatasetReader`` takes WikiTableQuestions ``*.examples`` files and converts them into
``Instances`` suitable for use with the ``WikiTablesSemanticParser``.
The ``*.examples`` files have pointers in them to two other files: a file that contains an
associated table for each question, and a file that has pre-computed, possible logical forms.
Because of how the ``DatasetReader`` API works, we need to take base directories for those
other files in the constructor.
We initialize the dataset reader with paths to the tables directory and the directory where offline search
output is stored if you are training. While testing, you can either provide existing table
filenames or if your question is about a new table, provide the content of the table as a dict
(See :func:`TableQuestionContext.read_from_json` for the expected format). If you are
doing the former, you still need to provide a ``tables_directory`` path here.
We lowercase the question and all table text, because the questions in the data are typically
all lowercase, anyway. This makes it so that any live demo that you put up will have questions
that match the data this was trained on. Lowercasing the table text makes matching the
lowercased question text easier.
Parameters
----------
tables_directory : ``str``, optional
Prefix for the path to the directory in which the tables reside. For example,
``*.examples`` files contain paths like ``csv/204-csv/590.csv``, and we will use the corresponding
``tagged`` files by manipulating the paths in the examples files. This is the directory that
contains the ``csv`` and ``tagged`` directories. This is only optional for ``Predictors`` (i.e., in a
demo), where you're only calling :func:`text_to_instance`.
offline_logical_forms_directory : ``str``, optional
Directory that contains all the gzipped offline search output files. We assume the filenames match the
example IDs (e.g.: ``nt-0.gz``). This is required for training a model, but not required
for prediction.
max_offline_logical_forms : ``int``, optional (default=10)
We will use the first ``max_offline_logical_forms`` logical forms as our target label. Only
applicable if ``offline_logical_forms_directory`` is given.
keep_if_no_logical_forms : ``bool``, optional (default=False)
If ``True``, we will keep instances we read that don't have offline search output. If you want to
compute denotation accuracy on the full dataset, you should set this to ``True``.
Otherwise, your accuracy numbers will only reflect the subset of the data that has offline search
output.
tokenizer : ``Tokenizer``, optional
Tokenizer to use for the questions. Will default to ``SpacyTokenizer()`` with Spacy's tagger
enabled, as we use lemma matches as features for entity linking.
question_token_indexers : ``Dict[str, TokenIndexer]``, optional
Token indexers for questions. Will default to ``{"tokens": SingleIdTokenIndexer()}``.
table_token_indexers : ``Dict[str, TokenIndexer]``, optional
Token indexers for table entities. Will default to ``question_token_indexers`` (though you
very likely want to use something different for these, as you can't rely on having an
embedding for every table entity at test time).
use_table_for_vocab : ``bool`` (optional, default=False)
If ``True``, we will include table cell text in vocabulary creation. The original parser
did not do this, because the same table can appear multiple times, messing with vocab
counts, and making you include lots of rare entities in your vocab.
max_table_tokens : ``int``, optional
If given, we will only keep this number of total table tokens. This bounds the memory
usage of the table representations, truncating cells with really long text. We specify a
total number of tokens, not a max cell text length, because the number of table entities
varies.
output_agendas : ``bool``, (optional, default=False)
Should we output agenda fields? This needs to be true if you want to train a coverage based
parser.
"""
def __init__(
self,
tables_directory: str = None,
offline_logical_forms_directory: str = None,
max_offline_logical_forms: int = 10,
keep_if_no_logical_forms: bool = False,
tokenizer: Tokenizer = None,
question_token_indexers: Dict[str, TokenIndexer] = None,
table_token_indexers: Dict[str, TokenIndexer] = None,
use_table_for_vocab: bool = False,
max_table_tokens: int = None,
output_agendas: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tables_directory = tables_directory
self._offline_logical_forms_directory = offline_logical_forms_directory
self._max_offline_logical_forms = max_offline_logical_forms
self._keep_if_no_logical_forms = keep_if_no_logical_forms
self._tokenizer = tokenizer or SpacyTokenizer(pos_tags=True)
self._question_token_indexers = question_token_indexers or {
"tokens": SingleIdTokenIndexer()
}
self._table_token_indexers = table_token_indexers or self._question_token_indexers
self._use_table_for_vocab = use_table_for_vocab
self._max_table_tokens = max_table_tokens
self._output_agendas = output_agendas
def _read(self, file_path: str):
# Checking if there is a single tarball with all the logical forms. If so, untaring it
# first.
if self._offline_logical_forms_directory:
tarball_with_all_lfs: str = None
for filename in os.listdir(self._offline_logical_forms_directory):
if filename.endswith(".tar.gz"):
tarball_with_all_lfs = os.path.join(
self._offline_logical_forms_directory, filename
)
break
if tarball_with_all_lfs is not None:
logger.info(
f"Found a tarball in offline logical forms directory: {tarball_with_all_lfs}"
)
logger.info(
"Assuming it contains logical forms for all questions and un-taring it."
)
# If you're running this with beaker, the input directory will be read-only and we
# cannot untar the files in the directory itself. So we will do so in /tmp, but that
# means the new offline logical forms directory will be /tmp.
self._offline_logical_forms_directory = "/tmp/"
tarfile.open(tarball_with_all_lfs, mode="r:gz").extractall(
path=self._offline_logical_forms_directory
)
with open(file_path, "r") as data_file:
num_missing_logical_forms = 0
num_lines = 0
num_instances = 0
for line in data_file.readlines():
line = line.strip("\n")
if not line:
continue
num_lines += 1
parsed_info = parse_example_line(line)
question = parsed_info["question"]
# We want the tagged file, but the ``*.examples`` files typically point to CSV.
table_filename = os.path.join(
self._tables_directory, parsed_info["table_filename"].replace("csv", "tagged")
)
if self._offline_logical_forms_directory:
logical_forms_filename = os.path.join(
self._offline_logical_forms_directory, parsed_info["id"] + ".gz"
)
try:
print(logical_forms_filename)
logical_forms_file = gzip.open(logical_forms_filename)
logical_forms = []
for logical_form_line in logical_forms_file:
logical_forms.append(logical_form_line.strip().decode("utf-8"))
except FileNotFoundError:
logger.debug(
f'Missing search output for instance {parsed_info["id"]}; skipping...'
)
logical_forms = None
num_missing_logical_forms += 1
if not self._keep_if_no_logical_forms:
continue
else:
logical_forms = None
table_lines = [line.split("\t") for line in open(table_filename).readlines()]
instance = self.text_to_instance(
question=question,
table_lines=table_lines,
target_values=parsed_info["target_values"],
offline_search_output=logical_forms,
)
if instance is not None:
num_instances += 1
yield instance
if self._offline_logical_forms_directory:
logger.info(
f"Missing logical forms for {num_missing_logical_forms} out of {num_lines} instances"
)
logger.info(f"Kept {num_instances} instances")
def text_to_instance(
self, # type: ignore
question: str,
table_lines: List[List[str]],
target_values: List[str] = None,
offline_search_output: List[str] = None,
) -> Instance:
"""
Reads text inputs and makes an instance. We pass the ``table_lines`` to ``TableQuestionContext``, and that
method accepts this field either as lines from CoreNLP processed tagged files that come with the dataset,
or simply in a tsv format where each line corresponds to a row and the cells are tab-separated.
Parameters
----------
question : ``str``
Input question
table_lines : ``List[List[str]]``
The table content optionally preprocessed by CoreNLP. See ``TableQuestionContext.read_from_lines``
for the expected format.
target_values : ``List[str]``, optional
Target values for the denotations the logical forms should execute to. Not required for testing.
offline_search_output : ``List[str]``, optional
List of logical forms, produced by offline search. Not required during test.
"""
tokenized_question = self._tokenizer.tokenize(question.lower())
question_field = TextField(tokenized_question, self._question_token_indexers)
metadata: Dict[str, Any] = {"question_tokens": [x.text for x in tokenized_question]}
table_context = TableQuestionContext.read_from_lines(table_lines, tokenized_question)
world = WikiTablesLanguage(table_context)
world_field = MetadataField(world)
# Note: Not passing any featre extractors when instantiating the field below. This will make
# it use all the available extractors.
table_field = KnowledgeGraphField(
table_context.get_table_knowledge_graph(),
tokenized_question,
self._table_token_indexers,
tokenizer=self._tokenizer,
include_in_vocab=self._use_table_for_vocab,
max_table_tokens=self._max_table_tokens,
)
production_rule_fields: List[Field] = []
for production_rule in world.all_possible_productions():
_, rule_right_side = production_rule.split(" -> ")
is_global_rule = not world.is_instance_specific_entity(rule_right_side)
field = ProductionRuleField(production_rule, is_global_rule=is_global_rule)
production_rule_fields.append(field)
action_field = ListField(production_rule_fields)
fields = {
"question": question_field,
"metadata": MetadataField(metadata),
"table": table_field,
"world": world_field,
"actions": action_field,
}
if target_values is not None:
target_values_field = MetadataField(target_values)
fields["target_values"] = target_values_field
# We'll make each target action sequence a List[IndexField], where the index is into
# the action list we made above. We need to ignore the type here because mypy doesn't
# like `action.rule` - it's hard to tell mypy that the ListField is made up of
# ProductionRuleFields.
action_map = {
action.rule: i for i, action in enumerate(action_field.field_list)
} # type: ignore
if offline_search_output:
action_sequence_fields: List[Field] = []
for logical_form in offline_search_output:
try:
action_sequence = world.logical_form_to_action_sequence(logical_form)
index_fields: List[Field] = []
for production_rule in action_sequence:
index_fields.append(IndexField(action_map[production_rule], action_field))
action_sequence_fields.append(ListField(index_fields))
except ParsingError as error:
logger.debug(f"Parsing error: {error.message}, skipping logical form")
logger.debug(f"Question was: {question}")
logger.debug(f"Logical form was: {logical_form}")
logger.debug(f"Table info was: {table_lines}")
continue
except KeyError as error:
logger.debug(f"Missing production rule: {error.args}, skipping logical form")
logger.debug(f"Question was: {question}")
logger.debug(f"Table info was: {table_lines}")
logger.debug(f"Logical form was: {logical_form}")
continue
except: # noqa
logger.error(logical_form)
raise
if len(action_sequence_fields) >= self._max_offline_logical_forms:
break
if not action_sequence_fields:
# This is not great, but we're only doing it when we're passed logical form
# supervision, so we're expecting labeled logical forms, but we can't actually
# produce the logical forms. We should skip this instance. Note that this affects
# _dev_ and _test_ instances, too, so your metrics could be over-estimates on the
# full test data.
return None
fields["target_action_sequences"] = ListField(action_sequence_fields)
if self._output_agendas:
agenda_index_fields: List[Field] = []
for agenda_string in world.get_agenda(conservative=True):
agenda_index_fields.append(IndexField(action_map[agenda_string], action_field))
if not agenda_index_fields:
agenda_index_fields = [IndexField(-1, action_field)]
fields["agenda"] = ListField(agenda_index_fields)
return Instance(fields)
| allennlp-semparse-master | allennlp_semparse/dataset_readers/wikitables.py |
import json
from typing import Dict, List
import logging
from copy import deepcopy
from parsimonious.exceptions import ParseError
from allennlp.common.file_utils import cached_path
from allennlp.data import DatasetReader
from allennlp.data.fields import Field, ArrayField, ListField, IndexField, TextField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
from allennlp_semparse.fields import ProductionRuleField
from allennlp_semparse.parsimonious_languages.worlds.atis_world import AtisWorld
from allennlp_semparse.parsimonious_languages.contexts.atis_sql_table_context import (
NUMERIC_NONTERMINALS,
)
logger = logging.getLogger(__name__)
END_OF_UTTERANCE_TOKEN = "@@EOU@@"
def _lazy_parse(text: str):
for interaction in text.split("\n"):
if interaction:
yield json.loads(interaction)
@DatasetReader.register("atis")
class AtisDatasetReader(DatasetReader):
"""
This ``DatasetReader`` takes json files and converts them into ``Instances`` for the
``AtisSemanticParser``.
Each line in the file is a JSON object that represent an interaction in the ATIS dataset
that has the following keys and values:
```
"id": The original filepath in the LDC corpus
"interaction": <list where each element represents a turn in the interaction>
"scenario": A code that refers to the scenario that served as the prompt for this interaction
"ut_date": Date of the interaction
"zc09_path": Path that was used in the original paper `Learning Context-Dependent Mappings from
Sentences to Logical Form
<https://www.semanticscholar.org/paper/Learning-Context-Dependent-Mappings-from-Sentences-Zettlemoyer-Collins/44a8fcee0741139fa15862dc4b6ce1e11444878f>'_ by Zettlemoyer and Collins (ACL/IJCNLP 2009)
```
Each element in the ``interaction`` list has the following keys and values:
```
"utterance": Natural language input
"sql": A list of SQL queries that the utterance maps to, it could be multiple SQL queries
or none at all.
```
Parameters
----------
token_indexers : ``Dict[str, TokenIndexer]``, optional
Token indexers for the utterances. Will default to ``{"tokens": SingleIdTokenIndexer()}``.
tokenizer : ``Tokenizer``, optional
Tokenizer to use for the utterances. Will default to ``SpacyTokenizer()`` with Spacy's tagger
enabled.
database_file: ``str``, optional
The directory to find the sqlite database file. We query the sqlite database to find the strings
that are allowed.
num_turns_to_concatenate: ``str``, optional
The number of utterances to concatenate as the conversation context.
""" # noqa
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
keep_if_unparseable: bool = False,
tokenizer: Tokenizer = None,
database_file: str = None,
num_turns_to_concatenate: int = 1,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._keep_if_unparseable = keep_if_unparseable
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._tokenizer = tokenizer or SpacyTokenizer()
self._database_file = database_file
self._num_turns_to_concatenate = num_turns_to_concatenate
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path) as atis_file:
logger.info("Reading ATIS instances from dataset at : %s", file_path)
for line in _lazy_parse(atis_file.read()):
utterances = []
for current_interaction in line["interaction"]:
if not current_interaction["utterance"] or not current_interaction["sql"]:
continue
utterances.append(current_interaction["utterance"])
sql_query_labels = [
query for query in current_interaction["sql"].split("\n") if query
]
instance = self.text_to_instance(deepcopy(utterances), sql_query_labels)
if not instance:
continue
yield instance
def text_to_instance( # type: ignore
self, utterances: List[str], sql_query_labels: List[str] = None
) -> Instance:
"""
Parameters
----------
utterances: ``List[str]``, required.
List of utterances in the interaction, the last element is the current utterance.
sql_query_labels: ``List[str]``, optional
The SQL queries that are given as labels during training or validation.
"""
if self._num_turns_to_concatenate:
utterances[-1] = f" {END_OF_UTTERANCE_TOKEN} ".join(
utterances[-self._num_turns_to_concatenate :]
)
utterance = utterances[-1]
action_sequence: List[str] = []
if not utterance:
return None
world = AtisWorld(utterances=utterances)
if sql_query_labels:
# If there are multiple sql queries given as labels, we use the shortest
# one for training.
sql_query = min(sql_query_labels, key=len)
try:
action_sequence = world.get_action_sequence(sql_query)
except ParseError:
action_sequence = []
logger.debug("Parsing error")
tokenized_utterance = self._tokenizer.tokenize(utterance.lower())
utterance_field = TextField(tokenized_utterance, self._token_indexers)
production_rule_fields: List[Field] = []
for production_rule in world.all_possible_actions():
nonterminal, _ = production_rule.split(" ->")
# The whitespaces are not semantically meaningful, so we filter them out.
production_rule = " ".join(
[token for token in production_rule.split(" ") if token != "ws"]
)
field = ProductionRuleField(production_rule, self._is_global_rule(nonterminal))
production_rule_fields.append(field)
action_field = ListField(production_rule_fields)
action_map = {
action.rule: i for i, action in enumerate(action_field.field_list) # type: ignore
}
index_fields: List[Field] = []
world_field = MetadataField(world)
fields = {
"utterance": utterance_field,
"actions": action_field,
"world": world_field,
"linking_scores": ArrayField(world.linking_scores),
}
if sql_query_labels is not None:
fields["sql_queries"] = MetadataField(sql_query_labels)
if self._keep_if_unparseable or action_sequence:
for production_rule in action_sequence:
index_fields.append(IndexField(action_map[production_rule], action_field))
if not action_sequence:
index_fields = [IndexField(-1, action_field)]
action_sequence_field = ListField(index_fields)
fields["target_action_sequence"] = action_sequence_field
else:
# If we are given a SQL query, but we are unable to parse it, and we do not specify explicitly
# to keep it, then we will skip the it.
return None
return Instance(fields)
@staticmethod
def _is_global_rule(nonterminal: str) -> bool:
if nonterminal in NUMERIC_NONTERMINALS:
return False
elif nonterminal.endswith("string"):
return False
return True
| allennlp-semparse-master | allennlp_semparse/dataset_readers/atis.py |
from typing import Any, Dict, List
import json
import logging
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader
from allennlp.data.fields import Field, TextField, ListField, IndexField, LabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
from allennlp_semparse.domain_languages import NlvrLanguage
from allennlp_semparse.domain_languages.nlvr_language import Box
from allennlp_semparse.fields import ProductionRuleField
logger = logging.getLogger(__name__)
@DatasetReader.register("nlvr")
class NlvrDatasetReader(DatasetReader):
"""
``DatasetReader`` for the NLVR domain. In addition to the usual methods for reading files and
instances from text, this class contains a method for creating an agenda of actions that each
sentence triggers, if needed. Note that we deal with the version of the dataset with structured
representations of the synthetic images instead of the actual images themselves.
We support multiple data formats here:
1) The original json version of the NLVR dataset (http://lic.nlp.cornell.edu/nlvr/) where the
format of each line in the jsonl file is
```
"sentence": <sentence>,
"label": <true/false>,
"identifier": <id>,
"evals": <dict containing all annotations>,
"structured_rep": <list of three box representations, where each box is a list of object
representation dicts, containing fields "x_loc", "y_loc", "color", "type", "size">
```
2) A grouped version (constructed using ``scripts/nlvr/group_nlvr_worlds.py``) where we group
all the worlds that a sentence appears in. We use the fields ``sentence``, ``label`` and
``structured_rep``. And the format of the grouped files is
```
"sentence": <sentence>,
"labels": <list of labels corresponding to worlds the sentence appears in>
"identifier": <id that is only the prefix from the original data>
"worlds": <list of structured representations>
```
3) A processed version that contains action sequences that lead to the correct denotations (or
not), using some search. This format is very similar to the grouped format, and has the
following extra field
```
"correct_sequences": <list of lists of action sequences corresponding to logical forms that
evaluate to the correct denotations>
```
Parameters
----------
tokenizer : ``Tokenizer`` (optional)
The tokenizer used for sentences in NLVR. Default is ``SpacyTokenizer``
sentence_token_indexers : ``Dict[str, TokenIndexer]`` (optional)
Token indexers for tokens in input sentences.
Default is ``{"tokens": SingleIdTokenIndexer()}``
nonterminal_indexers : ``Dict[str, TokenIndexer]`` (optional)
Indexers for non-terminals in production rules. The default is to index terminals and
non-terminals in the same way, but you may want to change it.
Default is ``{"tokens": SingleIdTokenIndexer("rule_labels")}``
terminal_indexers : ``Dict[str, TokenIndexer]`` (optional)
Indexers for terminals in production rules. The default is to index terminals and
non-terminals in the same way, but you may want to change it.
Default is ``{"tokens": SingleIdTokenIndexer("rule_labels")}``
output_agendas : ``bool`` (optional)
If preparing data for a trainer that uses agendas, set this flag and the datset reader will
output agendas.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
sentence_token_indexers: Dict[str, TokenIndexer] = None,
nonterminal_indexers: Dict[str, TokenIndexer] = None,
terminal_indexers: Dict[str, TokenIndexer] = None,
output_agendas: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or SpacyTokenizer()
self._sentence_token_indexers = sentence_token_indexers or {
"tokens": SingleIdTokenIndexer()
}
self._nonterminal_indexers = nonterminal_indexers or {
"tokens": SingleIdTokenIndexer("rule_labels")
}
self._terminal_indexers = terminal_indexers or {
"tokens": SingleIdTokenIndexer("rule_labels")
}
self._output_agendas = output_agendas
def _read(self, file_path: str):
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file: %s", file_path)
for line in data_file:
line = line.strip("\n")
if not line:
continue
data = json.loads(line)
sentence = data["sentence"]
identifier = data["identifier"] if "identifier" in data else data["id"]
if "worlds" in data:
# This means that we are reading grouped nlvr data. There will be multiple
# worlds and corresponding labels per sentence.
labels = data["labels"]
structured_representations = data["worlds"]
else:
# We will make lists of labels and structured representations, each with just
# one element for consistency.
labels = [data["label"]]
structured_representations = [data["structured_rep"]]
target_sequences: List[List[str]] = None
# TODO(pradeep): The processed file also has incorrect sequences as well, which are
# needed if we want to define some sort of a hinge-loss based trainer. Deal with
# them.
if "correct_sequences" in data:
# We are reading the processed file and these are the "correct" logical form
# sequences. See ``scripts/nlvr/get_nlvr_logical_forms.py``.
target_sequences = data["correct_sequences"]
instance = self.text_to_instance(
sentence, structured_representations, labels, target_sequences, identifier
)
if instance is not None:
yield instance
def text_to_instance(
self, # type: ignore
sentence: str,
structured_representations: List[List[List[JsonDict]]],
labels: List[str] = None,
target_sequences: List[List[str]] = None,
identifier: str = None,
) -> Instance:
"""
Parameters
----------
sentence : ``str``
The query sentence.
structured_representations : ``List[List[List[JsonDict]]]``
A list of Json representations of all the worlds. See expected format in this class' docstring.
labels : ``List[str]`` (optional)
List of string representations of the labels (true or false) corresponding to the
``structured_representations``. Not required while testing.
target_sequences : ``List[List[str]]`` (optional)
List of target action sequences for each element which lead to the correct denotation in
worlds corresponding to the structured representations.
identifier : ``str`` (optional)
The identifier from the dataset if available.
"""
worlds = []
for structured_representation in structured_representations:
boxes = {
Box(object_list, box_id)
for box_id, object_list in enumerate(structured_representation)
}
worlds.append(NlvrLanguage(boxes))
tokenized_sentence = self._tokenizer.tokenize(sentence)
sentence_field = TextField(tokenized_sentence, self._sentence_token_indexers)
production_rule_fields: List[Field] = []
instance_action_ids: Dict[str, int] = {}
# TODO(pradeep): Assuming that possible actions are the same in all worlds. This may change
# later.
for production_rule in worlds[0].all_possible_productions():
instance_action_ids[production_rule] = len(instance_action_ids)
field = ProductionRuleField(production_rule, is_global_rule=True)
production_rule_fields.append(field)
action_field = ListField(production_rule_fields)
worlds_field = ListField([MetadataField(world) for world in worlds])
metadata: Dict[str, Any] = {"sentence_tokens": [x.text for x in tokenized_sentence]}
fields: Dict[str, Field] = {
"sentence": sentence_field,
"worlds": worlds_field,
"actions": action_field,
"metadata": MetadataField(metadata),
}
if identifier is not None:
fields["identifier"] = MetadataField(identifier)
# Depending on the type of supervision used for training the parser, we may want either
# target action sequences or an agenda in our instance. We check if target sequences are
# provided, and include them if they are. If not, we'll get an agenda for the sentence, and
# include that in the instance.
if target_sequences:
action_sequence_fields: List[Field] = []
for target_sequence in target_sequences:
index_fields = ListField(
[
IndexField(instance_action_ids[action], action_field)
for action in target_sequence
]
)
action_sequence_fields.append(index_fields)
# TODO(pradeep): Define a max length for this field.
fields["target_action_sequences"] = ListField(action_sequence_fields)
elif self._output_agendas:
# TODO(pradeep): Assuming every world gives the same agenda for a sentence. This is true
# now, but may change later too.
agenda = worlds[0].get_agenda_for_sentence(sentence)
assert agenda, "No agenda found for sentence: %s" % sentence
# agenda_field contains indices into actions.
agenda_field = ListField(
[IndexField(instance_action_ids[action], action_field) for action in agenda]
)
fields["agenda"] = agenda_field
if labels:
labels_field = ListField(
[LabelField(label, label_namespace="denotations") for label in labels]
)
fields["labels"] = labels_field
return Instance(fields)
| allennlp-semparse-master | allennlp_semparse/dataset_readers/nlvr.py |
from allennlp_semparse.dataset_readers.atis import AtisDatasetReader
from allennlp_semparse.dataset_readers.grammar_based_text2sql import (
GrammarBasedText2SqlDatasetReader,
)
from allennlp_semparse.dataset_readers.nlvr import NlvrDatasetReader
from allennlp_semparse.dataset_readers.template_text2sql import TemplateText2SqlDatasetReader
from allennlp_semparse.dataset_readers.wikitables import WikiTablesDatasetReader
| allennlp-semparse-master | allennlp_semparse/dataset_readers/__init__.py |
from typing import Dict, List
import logging
import json
import glob
import os
import sqlite3
from allennlp.common.file_utils import cached_path
from allennlp.common.checks import ConfigurationError
from allennlp.data import DatasetReader
from allennlp.data.fields import TextField, Field, ListField, IndexField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp_semparse.common.sql import text2sql_utils as util
from allennlp_semparse.fields import ProductionRuleField
from allennlp_semparse.parsimonious_languages.worlds.text2sql_world import Text2SqlWorld
logger = logging.getLogger(__name__)
@DatasetReader.register("grammar_based_text2sql")
class GrammarBasedText2SqlDatasetReader(DatasetReader):
"""
Reads text2sql data from
`"Improving Text to SQL Evaluation Methodology" <https://arxiv.org/abs/1806.09029>`_
for a type constrained semantic parser.
Parameters
----------
schema_path : ``str``, required.
The path to the database schema.
database_path : ``str``, optional (default = None)
The path to a database.
use_all_sql : ``bool``, optional (default = False)
Whether to use all of the sql queries which have identical semantics,
or whether to just use the first one.
remove_unneeded_aliases : ``bool``, (default = True)
Whether or not to remove table aliases in the SQL which
are not required.
use_prelinked_entities : ``bool``, (default = True)
Whether or not to use the pre-linked entities in the text2sql data.
use_untyped_entities : ``bool``, (default = True)
Whether or not to attempt to infer the pre-linked entity types.
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Note that the `output` tags will always correspond to single token IDs based on how they
are pre-tokenised in the data file.
cross_validation_split_to_exclude : ``int``, optional (default = None)
Some of the text2sql datasets are very small, so you may need to do cross validation.
Here, you can specify a integer corresponding to a split_{int}.json file not to include
in the training set.
keep_if_unparsable : ``bool``, optional (default = True)
Whether or not to keep examples that we can't parse using the grammar.
"""
def __init__(
self,
schema_path: str,
database_file: str = None,
use_all_sql: bool = False,
remove_unneeded_aliases: bool = True,
use_prelinked_entities: bool = True,
use_untyped_entities: bool = True,
token_indexers: Dict[str, TokenIndexer] = None,
cross_validation_split_to_exclude: int = None,
keep_if_unparseable: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._use_all_sql = use_all_sql
self._remove_unneeded_aliases = remove_unneeded_aliases
self._use_prelinked_entities = use_prelinked_entities
self._keep_if_unparsable = keep_if_unparseable
if not self._use_prelinked_entities:
raise ConfigurationError(
"The grammar based text2sql dataset reader "
"currently requires the use of entity pre-linking."
)
self._cross_validation_split_to_exclude = str(cross_validation_split_to_exclude)
if database_file is not None:
database_file = cached_path(database_file)
connection = sqlite3.connect(database_file)
self._cursor = connection.cursor()
else:
self._cursor = None
self._schema_path = schema_path
self._world = Text2SqlWorld(
schema_path,
self._cursor,
use_prelinked_entities=use_prelinked_entities,
use_untyped_entities=use_untyped_entities,
)
def _read(self, file_path: str):
"""
This dataset reader consumes the data from
https://github.com/jkkummerfeld/text2sql-data/tree/master/data
formatted using ``scripts/reformat_text2sql_data.py``.
Parameters
----------
file_path : ``str``, required.
For this dataset reader, file_path can either be a path to a file `or` a
path to a directory containing json files. The reason for this is because
some of the text2sql datasets require cross validation, which means they are split
up into many small files, for which you only want to exclude one.
"""
files = [
p
for p in glob.glob(file_path)
if self._cross_validation_split_to_exclude not in os.path.basename(p)
]
schema = util.read_dataset_schema(self._schema_path)
for path in files:
with open(cached_path(path), "r") as data_file:
data = json.load(data_file)
for sql_data in util.process_sql_data(
data,
use_all_sql=self._use_all_sql,
remove_unneeded_aliases=self._remove_unneeded_aliases,
schema=schema,
):
linked_entities = sql_data.sql_variables if self._use_prelinked_entities else None
instance = self.text_to_instance(
sql_data.text_with_variables, linked_entities, sql_data.sql
)
if instance is not None:
yield instance
def text_to_instance(
self, # type: ignore
query: List[str],
prelinked_entities: Dict[str, Dict[str, str]] = None,
sql: List[str] = None,
) -> Instance:
fields: Dict[str, Field] = {}
tokens = TextField([Token(t) for t in query], self._token_indexers)
fields["tokens"] = tokens
if sql is not None:
action_sequence, all_actions = self._world.get_action_sequence_and_all_actions(
sql, prelinked_entities
)
if action_sequence is None and self._keep_if_unparsable:
print("Parse error")
action_sequence = []
elif action_sequence is None:
return None
index_fields: List[Field] = []
production_rule_fields: List[Field] = []
for production_rule in all_actions:
nonterminal, _ = production_rule.split(" ->")
production_rule = " ".join(production_rule.split(" "))
field = ProductionRuleField(
production_rule, self._world.is_global_rule(nonterminal), nonterminal=nonterminal
)
production_rule_fields.append(field)
valid_actions_field = ListField(production_rule_fields)
fields["valid_actions"] = valid_actions_field
action_map = {
action.rule: i # type: ignore
for i, action in enumerate(valid_actions_field.field_list)
}
for production_rule in action_sequence:
index_fields.append(IndexField(action_map[production_rule], valid_actions_field))
if not action_sequence:
index_fields = [IndexField(-1, valid_actions_field)]
action_sequence_field = ListField(index_fields)
fields["action_sequence"] = action_sequence_field
return Instance(fields)
| allennlp-semparse-master | allennlp_semparse/dataset_readers/grammar_based_text2sql.py |
from typing import Dict, List
import logging
import json
import glob
import os
from allennlp.common.file_utils import cached_path
from allennlp.data import DatasetReader
from allennlp.data.fields import TextField, Field, SequenceLabelField, LabelField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp_semparse.common.sql import text2sql_utils
logger = logging.getLogger(__name__)
@DatasetReader.register("template_text2sql")
class TemplateText2SqlDatasetReader(DatasetReader):
"""
Reads text2sql data for the sequence tagging and template prediction baseline
from `"Improving Text to SQL Evaluation Methodology" <https://arxiv.org/abs/1806.09029>`_.
Parameters
----------
use_all_sql : ``bool``, optional (default = False)
Whether to use all of the sql queries which have identical semantics,
or whether to just use the first one.
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Note that the `output` tags will always correspond to single token IDs based on how they
are pre-tokenised in the data file.
cross_validation_split_to_exclude : ``int``, optional (default = None)
Some of the text2sql datasets are very small, so you may need to do cross validation.
Here, you can specify a integer corresponding to a split_{int}.json file not to include
in the training set.
"""
def __init__(
self,
use_all_sql: bool = False,
token_indexers: Dict[str, TokenIndexer] = None,
cross_validation_split_to_exclude: int = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._use_all_sql = use_all_sql
self._cross_validation_split_to_exclude = str(cross_validation_split_to_exclude)
def _read(self, file_path: str):
"""
This dataset reader consumes the data from
https://github.com/jkkummerfeld/text2sql-data/tree/master/data
formatted using ``scripts/reformat_text2sql_data.py``.
Parameters
----------
file_path : ``str``, required.
For this dataset reader, file_path can either be a path to a file `or` a
path to a directory containing json files. The reason for this is because
some of the text2sql datasets require cross validation, which means they are split
up into many small files, for which you only want to exclude one.
"""
files = [
p
for p in glob.glob(file_path)
if self._cross_validation_split_to_exclude not in os.path.basename(p)
]
for path in files:
with open(cached_path(path), "r") as data_file:
data = json.load(data_file)
for sql_data in text2sql_utils.process_sql_data(data, self._use_all_sql):
template = " ".join(sql_data.sql)
yield self.text_to_instance(sql_data.text, sql_data.variable_tags, template)
def text_to_instance(
self, # type: ignore
query: List[str],
slot_tags: List[str] = None,
sql_template: str = None,
) -> Instance:
fields: Dict[str, Field] = {}
tokens = TextField([Token(t) for t in query], self._token_indexers)
fields["tokens"] = tokens
if slot_tags is not None and sql_template is not None:
slot_field = SequenceLabelField(slot_tags, tokens, label_namespace="slot_tags")
template = LabelField(sql_template, label_namespace="template_labels")
fields["slot_tags"] = slot_field
fields["template"] = template
return Instance(fields)
| allennlp-semparse-master | allennlp_semparse/dataset_readers/template_text2sql.py |
allennlp-semparse-master | allennlp_semparse/parsimonious_languages/__init__.py |
|
from typing import List, Dict, Callable, Set
from datetime import datetime, timedelta
import re
from collections import defaultdict
from nltk import ngrams
from allennlp.data.tokenizers import Token
TWELVE_TO_TWENTY_FOUR = 1200
HOUR_TO_TWENTY_FOUR = 100
HOURS_IN_DAY = 2400
AROUND_RANGE = 30
MINS_IN_HOUR = 60
APPROX_WORDS = ["about", "around", "approximately"]
WORDS_PRECEDING_TIME = ["at", "between", "to", "before", "after"]
def pm_map_match_to_query_value(match: str):
if len(match.rstrip("pm")) < 3: # This will match something like ``5pm``.
if match.startswith("12"):
return [int(match.rstrip("pm")) * HOUR_TO_TWENTY_FOUR]
else:
return [int(match.rstrip("pm")) * HOUR_TO_TWENTY_FOUR + TWELVE_TO_TWENTY_FOUR]
else: # This will match something like ``530pm``.
if match.startswith("12"):
return [int(match.rstrip("pm"))]
else:
return [int(match.rstrip("pm")) + TWELVE_TO_TWENTY_FOUR]
def am_map_match_to_query_value(match: str):
if len(match.rstrip("am")) < 3:
return [int(match.rstrip("am")) * HOUR_TO_TWENTY_FOUR]
else:
return [int(match.rstrip("am"))]
def get_times_from_utterance(
utterance: str,
char_offset_to_token_index: Dict[int, int],
indices_of_approximate_words: Set[int],
) -> Dict[str, List[int]]:
"""
Given an utterance, we get the numbers that correspond to times and convert them to
values that may appear in the query. For example: convert ``7pm`` to ``1900``.
"""
pm_linking_dict = _time_regex_match(
r"\d+pm",
utterance,
char_offset_to_token_index,
pm_map_match_to_query_value,
indices_of_approximate_words,
)
am_linking_dict = _time_regex_match(
r"\d+am",
utterance,
char_offset_to_token_index,
am_map_match_to_query_value,
indices_of_approximate_words,
)
oclock_linking_dict = _time_regex_match(
r"\d+ o'clock",
utterance,
char_offset_to_token_index,
lambda match: digit_to_query_time(match.rstrip(" o'clock")),
indices_of_approximate_words,
)
hours_linking_dict = _time_regex_match(
r"\d+ hours",
utterance,
char_offset_to_token_index,
lambda match: [int(match.rstrip(" hours"))],
indices_of_approximate_words,
)
times_linking_dict: Dict[str, List[int]] = defaultdict(list)
linking_dicts = [pm_linking_dict, am_linking_dict, oclock_linking_dict, hours_linking_dict]
for linking_dict in linking_dicts:
for key, value in linking_dict.items():
times_linking_dict[key].extend(value)
return times_linking_dict
def get_date_from_utterance(tokenized_utterance: List[Token], year: int = 1993) -> List[datetime]:
"""
When the year is not explicitly mentioned in the utterance, the query assumes that
it is 1993 so we do the same here. If there is no mention of the month or day then
we do not return any dates from the utterance.
"""
dates = []
utterance = " ".join([token.text for token in tokenized_utterance])
year_result = re.findall(r"199[0-4]", utterance)
if year_result:
year = int(year_result[0])
trigrams = ngrams([token.text for token in tokenized_utterance], 3)
for month, tens, digit in trigrams:
# This will match something like ``september twenty first``.
day = " ".join([tens, digit])
if month in MONTH_NUMBERS and day in DAY_NUMBERS:
try:
dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))
except ValueError:
print("invalid month day")
bigrams = ngrams([token.text for token in tokenized_utterance], 2)
for month, day in bigrams:
if month in MONTH_NUMBERS and day in DAY_NUMBERS:
# This will match something like ``september first``.
try:
dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))
except ValueError:
print("invalid month day")
fivegrams = ngrams([token.text for token in tokenized_utterance], 5)
for tens, digit, _, year_match, month in fivegrams:
# This will match something like ``twenty first of 1993 july``.
day = " ".join([tens, digit])
if month in MONTH_NUMBERS and day in DAY_NUMBERS and year_match.isdigit():
try:
dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[day]))
except ValueError:
print("invalid month day")
if month in MONTH_NUMBERS and digit in DAY_NUMBERS and year_match.isdigit():
try:
dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[digit]))
except ValueError:
print("invalid month day")
return dates
def get_numbers_from_utterance(
utterance: str, tokenized_utterance: List[Token]
) -> Dict[str, List[int]]:
"""
Given an utterance, this function finds all the numbers that are in the action space. Since we need to
keep track of linking scores, we represent the numbers as a dictionary, where the keys are the string
representation of the number and the values are lists of the token indices that triggers that number.
"""
# When we use a regex to find numbers or strings, we need a mapping from
# the character to which token triggered it.
char_offset_to_token_index = {
token.idx: token_index for token_index, token in enumerate(tokenized_utterance)
}
# We want to look up later for each time whether it appears after a word
# such as "about" or "approximately".
indices_of_approximate_words = {
index for index, token in enumerate(tokenized_utterance) if token.text in APPROX_WORDS
}
indices_of_words_preceding_time = {
index
for index, token in enumerate(tokenized_utterance)
if token.text in WORDS_PRECEDING_TIME
}
indices_of_am_pm = {
index for index, token in enumerate(tokenized_utterance) if token.text in {"am", "pm"}
}
number_linking_dict: Dict[str, List[int]] = defaultdict(list)
for token_index, token in enumerate(tokenized_utterance):
if token.text.isdigit():
if (
token_index - 1 in indices_of_words_preceding_time
and token_index + 1 not in indices_of_am_pm
):
for time in digit_to_query_time(token.text):
number_linking_dict[str(time)].append(token_index)
times_linking_dict = get_times_from_utterance(
utterance, char_offset_to_token_index, indices_of_approximate_words
)
for key, value in times_linking_dict.items():
number_linking_dict[key].extend(value)
for index, token in enumerate(tokenized_utterance):
for number in NUMBER_TRIGGER_DICT.get(token.text, []):
if index - 1 in indices_of_approximate_words:
for approx_time in get_approximate_times([int(number)]):
number_linking_dict[str(approx_time)].append(index)
else:
number_linking_dict[number].append(index)
return number_linking_dict
def get_time_range_start_from_utterance(
utterance: str, tokenized_utterance: List[Token]
) -> Dict[str, List[int]]:
late_indices = {
index for index, token in enumerate(tokenized_utterance) if token.text == "late"
}
time_range_start_linking_dict: Dict[str, List[int]] = defaultdict(list)
for token_index, token in enumerate(tokenized_utterance):
for time in TIME_RANGE_START_DICT.get(token.text, []):
if token_index - 1 not in late_indices:
time_range_start_linking_dict[str(time)].append(token_index)
bigrams = ngrams([token.text for token in tokenized_utterance], 2)
for bigram_index, bigram in enumerate(bigrams):
for time in TIME_RANGE_START_DICT.get(" ".join(bigram), []):
time_range_start_linking_dict[str(time)].extend([bigram_index, bigram_index + 1])
return time_range_start_linking_dict
def get_time_range_end_from_utterance(
utterance: str, tokenized_utterance: List[Token]
) -> Dict[str, List[int]]:
early_indices = {
index for index, token in enumerate(tokenized_utterance) if token.text == "early"
}
time_range_end_linking_dict: Dict[str, List[int]] = defaultdict(list)
for token_index, token in enumerate(tokenized_utterance):
for time in TIME_RANGE_END_DICT.get(token.text, []):
if token_index - 1 not in early_indices:
time_range_end_linking_dict[str(time)].append(token_index)
bigrams = ngrams([token.text for token in tokenized_utterance], 2)
for bigram_index, bigram in enumerate(bigrams):
for time in TIME_RANGE_END_DICT.get(" ".join(bigram), []):
time_range_end_linking_dict[str(time)].extend([bigram_index, bigram_index + 1])
return time_range_end_linking_dict
def get_costs_from_utterance(
utterance: str, tokenized_utterance: List[Token]
) -> Dict[str, List[int]]:
dollars_indices = {
index
for index, token in enumerate(tokenized_utterance)
if token.text == "dollars" or token.text == "dollar"
}
costs_linking_dict: Dict[str, List[int]] = defaultdict(list)
for token_index, token in enumerate(tokenized_utterance):
if token_index + 1 in dollars_indices and token.text.isdigit():
costs_linking_dict[token.text].append(token_index)
return costs_linking_dict
def get_flight_numbers_from_utterance(
utterance: str, tokenized_utterance: List[Token]
) -> Dict[str, List[int]]:
indices_words_preceding_flight_number = {
index
for index, token in enumerate(tokenized_utterance)
if token.text in {"flight", "number"}
or token.text.upper() in AIRLINE_CODE_LIST
or token.text.lower() in AIRLINE_CODES.keys()
}
indices_words_succeeding_flight_number = {
index for index, token in enumerate(tokenized_utterance) if token.text == "flight"
}
flight_numbers_linking_dict: Dict[str, List[int]] = defaultdict(list)
for token_index, token in enumerate(tokenized_utterance):
if token.text.isdigit():
if token_index - 1 in indices_words_preceding_flight_number:
flight_numbers_linking_dict[token.text].append(token_index)
if token_index + 1 in indices_words_succeeding_flight_number:
flight_numbers_linking_dict[token.text].append(token_index)
return flight_numbers_linking_dict
def digit_to_query_time(digit: str) -> List[int]:
"""
Given a digit in the utterance, return a list of the times that it corresponds to.
"""
if len(digit) > 2:
return [int(digit), int(digit) + TWELVE_TO_TWENTY_FOUR]
elif int(digit) % 12 == 0:
return [0, 1200, 2400]
return [
int(digit) * HOUR_TO_TWENTY_FOUR,
(int(digit) * HOUR_TO_TWENTY_FOUR + TWELVE_TO_TWENTY_FOUR) % HOURS_IN_DAY,
]
def get_approximate_times(times: List[int]) -> List[int]:
"""
Given a list of times that follow a word such as ``about``,
we return a list of times that could appear in the query as a result
of this. For example if ``about 7pm`` appears in the utterance, then
we also want to add ``1830`` and ``1930``.
"""
approximate_times = []
for time in times:
hour = int(time / HOUR_TO_TWENTY_FOUR) % 24
minute = time % HOUR_TO_TWENTY_FOUR
approximate_time = datetime.now()
approximate_time = approximate_time.replace(hour=hour, minute=minute)
start_time_range = approximate_time - timedelta(minutes=30)
end_time_range = approximate_time + timedelta(minutes=30)
approximate_times.extend(
[
start_time_range.hour * HOUR_TO_TWENTY_FOUR + start_time_range.minute,
end_time_range.hour * HOUR_TO_TWENTY_FOUR + end_time_range.minute,
]
)
return approximate_times
def _time_regex_match(
regex: str,
utterance: str,
char_offset_to_token_index: Dict[int, int],
map_match_to_query_value: Callable[[str], List[int]],
indices_of_approximate_words: Set[int],
) -> Dict[str, List[int]]:
r"""
Given a regex for matching times in the utterance, we want to convert the matches
to the values that appear in the query and token indices they correspond to.
``char_offset_to_token_index`` is a dictionary that maps from the character offset to
the token index, we use this to look up what token a regex match corresponds to.
``indices_of_approximate_words`` are the token indices of the words such as ``about`` or
``approximately``. We use this to check if a regex match is preceded by one of these words.
If it is, we also want to add the times that define this approximate time range.
``map_match_to_query_value`` is a function that converts the regex matches to the
values that appear in the query. For example, we may pass in a regex such as ``\d+pm``
that matches times such as ``7pm``. ``map_match_to_query_value`` would be a function that
takes ``7pm`` as input and returns ``1900``.
"""
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile(regex)
for match in number_regex.finditer(utterance):
query_values = map_match_to_query_value(match.group())
# If the time appears after a word like ``about`` then we also add
# the times that mark the start and end of the allowed range.
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend(
[
char_offset_to_token_index[match.start()],
char_offset_to_token_index[match.start()] + 1,
]
)
return linking_scores_dict
def get_trigger_dict(
trigger_lists: List[List[str]], trigger_dicts: List[Dict[str, List[str]]]
) -> Dict[str, List[str]]:
merged_trigger_dict: Dict[str, List[str]] = defaultdict(list)
for trigger_list in trigger_lists:
for trigger in trigger_list:
merged_trigger_dict[trigger.lower()].append(trigger)
for trigger_dict in trigger_dicts:
for key, value in trigger_dict.items():
merged_trigger_dict[key.lower()].extend(value)
return merged_trigger_dict
def convert_to_string_list_value_dict(trigger_dict: Dict[str, int]) -> Dict[str, List[str]]:
return {key: [str(value)] for key, value in trigger_dict.items()}
AIRLINE_CODES = {
"alaska": ["AS"],
"alliance": ["3J"],
"alpha": ["7V"],
"america west": ["HP"],
"american": ["AA"],
"american trans": ["TZ"],
"argentina": ["AR"],
"atlantic": ["DH"],
"atlantic.": ["EV"],
"braniff.": ["BE"],
"british": ["BA"],
"business": ["HQ"],
"canada": ["AC"],
"canadian": ["CP"],
"carnival": ["KW"],
"christman": ["SX"],
"colgan": ["9L"],
"comair": ["OH"],
"continental": ["CO"],
"czecho": ["OK"],
"delta": ["DL"],
"eastern": ["EA"],
"express": ["9E"],
"grand": ["QD"],
"lufthansa": ["LH"],
"mesaba": ["XJ"],
"mgm": ["MG"],
"midwest": ["YX"],
"nation": ["NX"],
"nationair": ["NX"],
"northeast": ["2V"],
"northwest": ["NW"],
"ontario": ["GX"],
"ontario express": ["9X"],
"precision": ["RP"],
"royal": ["AT"],
"sabena": ["SN"],
"sky": ["OO"],
"south": ["WN"],
"states": ["9N"],
"thai": ["TG"],
"tower": ["FF"],
"twa": ["TW"],
"united": ["UA"],
"us": ["US"],
"west": ["OE"],
"wisconson": ["ZW"],
"world": ["RZ"],
}
CITY_CODES = {
"ATLANTA": ["MATL"],
"BALTIMORE": ["BBWI"],
"BOSTON": ["BBOS"],
"BURBANK": ["BBUR"],
"CHARLOTTE": ["CCLT"],
"CHICAGO": ["CCHI"],
"CINCINNATI": ["CCVG"],
"CLEVELAND": ["CCLE"],
"COLUMBUS": ["CCMH"],
"DALLAS": ["DDFW"],
"DENVER": ["DDEN"],
"DETROIT": ["DDTT"],
"FORT WORTH": ["FDFW"],
"HOUSTON": ["HHOU"],
"KANSAS CITY": ["MMKC"],
"LAS VEGAS": ["LLAS"],
"LONG BEACH": ["LLGB"],
"LOS ANGELES": ["LLAX"],
"MEMPHIS": ["MMEM"],
"MIAMI": ["MMIA"],
"MILWAUKEE": ["MMKE"],
"MINNEAPOLIS": ["MMSP"],
"MONTREAL": ["YYMQ"],
"NASHVILLE": ["BBNA"],
"NEW YORK": ["NNYC"],
"NEWARK": ["JNYC"],
"OAKLAND": ["OOAK"],
"ONTARIO": ["OONT"],
"ORLANDO": ["OORL"],
"PHILADELPHIA": ["PPHL"],
"PHOENIX": ["PPHX"],
"PITTSBURGH": ["PPIT"],
"SALT LAKE CITY": ["SSLC"],
"SAN DIEGO": ["SSAN"],
"SAN FRANCISCO": ["SSFO"],
"SAN JOSE": ["SSJC"],
"SEATTLE": ["SSEA"],
"ST. LOUIS": ["SSTL"],
"ST. PAUL": ["SMSP"],
"ST. PETERSBURG": ["STPA"],
"TACOMA": ["TSEA"],
"TAMPA": ["TTPA"],
"TORONTO": ["YYTO"],
"WASHINGTON": ["WWAS"],
"WESTCHESTER COUNTY": ["HHPN"],
}
MONTH_NUMBERS = {
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12,
}
GROUND_SERVICE = {
"air taxi": ["AIR TAXI OPERATION"],
"car": ["RENTAL CAR"],
"limo": ["LIMOUSINE"],
"limousine": ["LIMOUSINE"],
"rapid": ["RAPID TRANSIT"],
"rental": ["RENTAL CAR"],
"taxi": ["TAXI"],
}
MISC_STR = {
"every day": ["DAILY"],
"saint petersburg": ["ST. PETERSBURG"],
"saint louis": ["ST. LOUIS"],
}
DAY_NUMBERS = {
"first": 1,
"second": 2,
"third": 3,
"fourth": 4,
"fifth": 5,
"sixth": 6,
"seventh": 7,
"eighth": 8,
"ninth": 9,
"tenth": 10,
"eleventh": 11,
"twelfth": 12,
"thirteenth": 13,
"fourteenth": 14,
"fifteenth": 15,
"sixteenth": 16,
"seventeenth": 17,
"eighteenth": 18,
"nineteenth": 19,
"twentieth": 20,
"twenty first": 21,
"twenty second": 22,
"twenty third": 23,
"twenty fourth": 24,
"twenty fifth": 25,
"twenty sixth": 26,
"twenty seventh": 27,
"twenty eighth": 28,
"twenty ninth": 29,
"thirtieth": 30,
"thirty first": 31,
}
MISC_TIME_TRIGGERS = {
"lunch": ["1400"],
"noon": ["1200"],
"early evening": ["1800", "2000"],
"morning": ["0", "1200"],
"night": ["1800", "2400"],
}
TIME_RANGE_START_DICT = {
"morning": ["0"],
"mornings": ["1200"],
"afternoon": ["1200"],
"afternoons": ["1200"],
"after noon": ["1200"],
"late afternoon": ["1600"],
"evening": ["1800"],
"late evening": ["2000"],
}
TIME_RANGE_END_DICT = {
"early morning": ["800"],
"morning": ["1200", "800"],
"mornings": ["1200", "800"],
"early afternoon": ["1400"],
"afternoon": ["1800"],
"afternoons": ["1800"],
"after noon": ["1800"],
"evening": ["2200"],
}
ALL_TABLES = {
"aircraft": [
"aircraft_code",
"aircraft_description",
"capacity",
"manufacturer",
"basic_type",
"propulsion",
"wide_body",
"pressurized",
],
"airline": ["airline_name", "airline_code"],
"airport": [
"airport_code",
"airport_name",
"airport_location",
"state_code",
"country_name",
"time_zone_code",
"minimum_connect_time",
],
"airport_service": [
"city_code",
"airport_code",
"miles_distant",
"direction",
"minutes_distant",
],
"city": ["city_code", "city_name", "state_code", "country_name", "time_zone_code"],
"class_of_service": ["booking_class", "rank", "class_description"],
"date_day": ["day_name"],
"days": ["days_code", "day_name"],
"equipment_sequence": ["aircraft_code_sequence", "aircraft_code"],
"fare": [
"fare_id",
"from_airport",
"to_airport",
"fare_basis_code",
"fare_airline",
"restriction_code",
"one_direction_cost",
"round_trip_cost",
"round_trip_required",
],
"fare_basis": [
"fare_basis_code",
"booking_class",
"class_type",
"premium",
"economy",
"discounted",
"night",
"season",
"basis_days",
],
"flight": [
"flight_id",
"flight_days",
"from_airport",
"to_airport",
"departure_time",
"arrival_time",
"airline_flight",
"airline_code",
"flight_number",
"aircraft_code_sequence",
"meal_code",
"stops",
"connections",
"dual_carrier",
"time_elapsed",
],
"flight_fare": ["flight_id", "fare_id"],
"flight_leg": ["flight_id", "leg_number", "leg_flight"],
"flight_stop": [
"flight_id",
"stop_number",
"stop_days",
"stop_airport",
"arrival_time",
"arrival_airline",
"arrival_flight_number",
"departure_time",
"departure_airline",
"departure_flight_number",
"stop_time",
],
"food_service": ["meal_code", "meal_number", "compartment", "meal_description"],
"ground_service": ["city_code", "airport_code", "transport_type", "ground_fare"],
"month": ["month_number", "month_name"],
"restriction": [
"restriction_code",
"advance_purchase",
"stopovers",
"saturday_stay_required",
"minimum_stay",
"maximum_stay",
"application",
"no_discounts",
],
"state": ["state_code", "state_name", "country_name"],
}
TABLES_WITH_STRINGS = {
"airline": ["airline_code", "airline_name"],
"city": ["city_name", "state_code", "city_code"],
"fare": ["round_trip_required", "fare_basis_code", "restriction_code"],
"flight": ["airline_code", "flight_days"],
"flight_stop": ["stop_airport"],
"airport": ["airport_code", "airport_name"],
"state": ["state_name", "state_code"],
"fare_basis": ["fare_basis_code", "class_type", "economy", "booking_class"],
"class_of_service": ["booking_class", "class_description"],
"aircraft": ["basic_type", "manufacturer", "aircraft_code", "propulsion"],
"restriction": ["restriction_code"],
"ground_service": ["transport_type"],
"days": ["day_name", "days_code"],
"food_service": ["meal_description", "compartment"],
}
DAY_OF_WEEK = ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]
FARE_BASIS_CODE = [
"B",
"BH",
"BHW",
"BHX",
"BL",
"BLW",
"BLX",
"BN",
"BOW",
"BOX",
"BW",
"BX",
"C",
"CN",
"F",
"FN",
"H",
"HH",
"HHW",
"HHX",
"HL",
"HLW",
"HLX",
"HOW",
"HOX",
"J",
"K",
"KH",
"KL",
"KN",
"LX",
"M",
"MH",
"ML",
"MOW",
"P",
"Q",
"QH",
"QHW",
"QHX",
"QLW",
"QLX",
"QO",
"QOW",
"QOX",
"QW",
"QX",
"S",
"U",
"V",
"VHW",
"VHX",
"VW",
"VX",
"Y",
"YH",
"YL",
"YN",
"YW",
"YX",
]
MEALS = ["BREAKFAST", "LUNCH", "SNACK", "DINNER"]
RESTRICT_CODES = [
"AP/2",
"AP/6",
"AP/12",
"AP/20",
"AP/21",
"AP/57",
"AP/58",
"AP/60",
"AP/75",
"EX/9",
"EX/13",
"EX/14",
"EX/17",
"EX/19",
]
STATES = [
"ARIZONA",
"CALIFORNIA",
"COLORADO",
"DISTRICT OF COLUMBIA",
"FLORIDA",
"GEORGIA",
"ILLINOIS",
"INDIANA",
"MASSACHUSETTS",
"MARYLAND",
"MICHIGAN",
"MINNESOTA",
"MISSOURI",
"NORTH CAROLINA",
"NEW JERSEY",
"NEVADA",
"NEW YORK",
"OHIO",
"ONTARIO",
"PENNSYLVANIA",
"QUEBEC",
"TENNESSEE",
"TEXAS",
"UTAH",
"WASHINGTON",
"WISCONSIN",
]
STATE_CODES = [
"TN",
"MA",
"CA",
"MD",
"IL",
"OH",
"NC",
"CO",
"TX",
"MI",
"NY",
"IN",
"NJ",
"NV",
"GA",
"FL",
"MO",
"WI",
"MN",
"PA",
"AZ",
"WA",
"UT",
"DC",
"PQ",
"ON",
]
DAY_OF_WEEK_DICT = {"weekdays": ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY"]}
YES_NO = {"one way": ["NO"], "economy": ["YES"]}
CITY_AIRPORT_CODES = {
"atlanta": ["ATL"],
"boston": ["BOS"],
"baltimore": ["BWI"],
"charlotte": ["CLT"],
"dallas": ["DFW"],
"detroit": ["DTW"],
"houston": ["IAH"],
"la guardia": ["LGA"],
"love field": ["DAL"],
"los angeles": ["LAX"],
"oakland": ["OAK"],
"philadelphia": ["PHL"],
"pittsburgh": ["PIT"],
"san francisco": ["SFO"],
"toronto": ["YYZ"],
}
AIRPORT_CODES = [
"ATL",
"NA",
"OS",
"UR",
"WI",
"CLE",
"CLT",
"CMH",
"CVG",
"DAL",
"DCA",
"DEN",
"DET",
"DFW",
"DTW",
"EWR",
"HOU",
"HPN",
"IAD",
"IAH",
"IND",
"JFK",
"LAS",
"LAX",
"LGA",
"LG",
"MCI",
"MCO",
"MDW",
"MEM",
"MIA",
"MKE",
"MSP",
"OAK",
"ONT",
"ORD",
"PHL",
"PHX",
"PIE",
"PIT",
"SAN",
"SEA",
"SFO",
"SJC",
"SLC",
"STL",
"TPA",
"YKZ",
"YMX",
"YTZ",
"YUL",
"YYZ",
]
AIRLINE_CODE_LIST = [
"AR",
"3J",
"AC",
"9X",
"ZW",
"AS",
"7V",
"AA",
"TZ",
"HP",
"DH",
"EV",
"BE",
"BA",
"HQ",
"CP",
"KW",
"SX",
"9L",
"OH",
"CO",
"OK",
"DL",
"9E",
"QD",
"LH",
"XJ",
"MG",
"YX",
"NX",
"2V",
"NW",
"RP",
"AT",
"SN",
"OO",
"WN",
"TG",
"FF",
"9N",
"TW",
"RZ",
"UA",
"US",
"OE",
"EA",
]
CITIES = [
"NASHVILLE",
"BOSTON",
"BURBANK",
"BALTIMORE",
"CHICAGO",
"CLEVELAND",
"CHARLOTTE",
"COLUMBUS",
"CINCINNATI",
"DENVER",
"DALLAS",
"DETROIT",
"FORT WORTH",
"HOUSTON",
"WESTCHESTER COUNTY",
"INDIANAPOLIS",
"NEWARK",
"LAS VEGAS",
"LOS ANGELES",
"LONG BEACH",
"ATLANTA",
"MEMPHIS",
"MIAMI",
"KANSAS CITY",
"MILWAUKEE",
"MINNEAPOLIS",
"NEW YORK",
"OAKLAND",
"ONTARIO",
"ORLANDO",
"PHILADELPHIA",
"PHOENIX",
"PITTSBURGH",
"ST. PAUL",
"SAN DIEGO",
"SEATTLE",
"SAN FRANCISCO",
"SAN JOSE",
"SALT LAKE CITY",
"ST. LOUIS",
"ST. PETERSBURG",
"TACOMA",
"TAMPA",
"WASHINGTON",
"MONTREAL",
"TORONTO",
]
CITY_CODE_LIST = [
"BBNA",
"BBOS",
"BBUR",
"BBWI",
"CCHI",
"CCLE",
"CCLT",
"CCMH",
"CCVG",
"DDEN",
"DDFW",
"DDTT",
"FDFW",
"HHOU",
"HHPN",
"IIND",
"JNYC",
"LLAS",
"LLAX",
"LLGB",
"MATL",
"MMEM",
"MMIA",
"MMKC",
"MMKE",
"MMSP",
"NNYC",
"OOAK",
"OONT",
"OORL",
"PPHL",
"PPHX",
"PPIT",
"SMSP",
"SSAN",
"SSEA",
"SSFO",
"SSJC",
"SSLC",
"SSTL",
"STPA",
"TSEA",
"TTPA",
"WWAS",
"YYMQ",
"YYTO",
]
CLASS = ["COACH", "BUSINESS", "FIRST", "THRIFT", "STANDARD", "SHUTTLE"]
AIRCRAFT_MANUFACTURERS = ["BOEING", "MCDONNELL DOUGLAS", "FOKKER"]
AIRCRAFT_BASIC_CODES = ["DC9", "737", "767", "747", "DC10", "757", "MD80"]
DAY_OF_WEEK_INDEX = {idx: [day] for idx, day in enumerate(DAY_OF_WEEK)}
TRIGGER_LISTS = [
CITIES,
AIRPORT_CODES,
STATES,
STATE_CODES,
FARE_BASIS_CODE,
CLASS,
AIRLINE_CODE_LIST,
DAY_OF_WEEK,
CITY_CODE_LIST,
MEALS,
RESTRICT_CODES,
AIRCRAFT_MANUFACTURERS,
AIRCRAFT_BASIC_CODES,
]
TRIGGER_DICTS = [
CITY_AIRPORT_CODES,
AIRLINE_CODES,
CITY_CODES,
GROUND_SERVICE,
DAY_OF_WEEK_DICT,
YES_NO,
MISC_STR,
]
ATIS_TRIGGER_DICT = get_trigger_dict(TRIGGER_LISTS, TRIGGER_DICTS)
NUMBER_TRIGGER_DICT: Dict[str, List[str]] = get_trigger_dict([], [MISC_TIME_TRIGGERS])
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/contexts/atis_tables.py |
from allennlp_semparse.parsimonious_languages.contexts.atis_sql_table_context import (
AtisSqlTableContext,
)
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/contexts/__init__.py |
"""
An ``AtisSqlTableContext`` represents the SQL context in which an utterance appears
for the Atis dataset, with the grammar and the valid actions.
"""
from typing import List, Dict, Tuple
import sqlite3
from copy import deepcopy
from parsimonious.grammar import Grammar
from allennlp.common.file_utils import cached_path
from allennlp_semparse.parsimonious_languages.contexts.sql_context_utils import (
initialize_valid_actions,
format_grammar_string,
format_action,
)
# This is the base definition of the SQL grammar in a simplified sort of
# EBNF notation, and represented as a dictionary. The keys are the nonterminals and the values
# are the possible expansions of the nonterminal where each element in the list is one possible expansion.
# Rules that differ only in capitalization of keywords are mapped to the same action by
# the ``SqlVisitor``. The nonterminal of the first rule is the starting symbol.
# In addition to the grammar here, we add ``col_ref``, ``table_name`` based on the tables
# that ``SqlTableContext`` is initialized with. ``number`` is initialized to
# be empty and later on updated based on the utterances. ``biexpr`` is altered based on the
# database to column references with strings that are allowed to appear in that column.
# We then create additional nonterminals for each column that may be used as a string constraint
# in the query.
# For example, to include city names as strings:
#
# grammar_dictionary['biexpr'] = \
# ['( "city" ws "." ws "city_name" binop ws city_city_name_strings )', ...
# grammar_dictionary['city_city_name_strings'] = ['"NASHVILLE"', '"BOSTON"', ...
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['query ws ";" ws']
GRAMMAR_DICTIONARY["query"] = [
'(ws "(" ws "SELECT" ws distinct ws select_results ws '
'"FROM" ws table_refs ws where_clause ws group_by_clause ws ")" ws)',
'(ws "(" ws "SELECT" ws distinct ws select_results ws '
'"FROM" ws table_refs ws where_clause ws ")" ws)',
'(ws "SELECT" ws distinct ws select_results ws ' '"FROM" ws table_refs ws where_clause ws)',
]
GRAMMAR_DICTIONARY["select_results"] = ["col_refs", "agg"]
GRAMMAR_DICTIONARY["agg"] = [
'( agg_func ws "(" ws col_ref ws ")" )',
'(agg_func ws "(" ws col ws ")" )',
]
GRAMMAR_DICTIONARY["agg_func"] = ['"MIN"', '"min"', '"MAX"', '"max"', '"COUNT"', '"count"']
GRAMMAR_DICTIONARY["col_refs"] = ['(col_ref ws "," ws col_refs)', "(col_ref)"]
GRAMMAR_DICTIONARY["table_refs"] = ['(table_name ws "," ws table_refs)', "(table_name)"]
GRAMMAR_DICTIONARY["where_clause"] = [
'("WHERE" ws "(" ws conditions ws ")" ws)',
'("WHERE" ws conditions ws)',
]
GRAMMAR_DICTIONARY["group_by_clause"] = ['("GROUP" ws "BY" ws col_ref)']
GRAMMAR_DICTIONARY["conditions"] = [
"(condition ws conj ws conditions)",
'(condition ws conj ws "(" ws conditions ws ")")',
'("(" ws conditions ws ")" ws conj ws conditions)',
'("(" ws conditions ws ")")',
'("not" ws conditions ws )',
'("NOT" ws conditions ws )',
"condition",
]
GRAMMAR_DICTIONARY["condition"] = ["in_clause", "ternaryexpr", "biexpr"]
GRAMMAR_DICTIONARY["in_clause"] = ['(ws col_ref ws "IN" ws query ws)']
GRAMMAR_DICTIONARY["biexpr"] = ["( col_ref ws binaryop ws value)", "(value ws binaryop ws value)"]
GRAMMAR_DICTIONARY["binaryop"] = [
'"+"',
'"-"',
'"*"',
'"/"',
'"="',
'">="',
'"<="',
'">"',
'"<"',
'"is"',
'"IS"',
]
GRAMMAR_DICTIONARY["ternaryexpr"] = [
'(col_ref ws "not" ws "BETWEEN" ws value ws "AND" ws value ws)',
'(col_ref ws "NOT" ws "BETWEEN" ws value ws "AND" ws value ws)',
'(col_ref ws "BETWEEN" ws value ws "AND" ws value ws)',
]
GRAMMAR_DICTIONARY["value"] = ['("not" ws pos_value)', '("NOT" ws pos_value)', "(pos_value)"]
GRAMMAR_DICTIONARY["pos_value"] = [
'("ALL" ws query)',
'("ANY" ws query)',
"number",
"boolean",
"col_ref",
"agg_results",
'"NULL"',
]
GRAMMAR_DICTIONARY["agg_results"] = [
'(ws "(" ws "SELECT" ws distinct ws agg ws ' '"FROM" ws table_name ws where_clause ws ")" ws)',
'(ws "SELECT" ws distinct ws agg ws "FROM" ws table_name ws where_clause ws)',
]
GRAMMAR_DICTIONARY["boolean"] = ['"true"', '"false"']
GRAMMAR_DICTIONARY["ws"] = [r'~"\s*"i']
GRAMMAR_DICTIONARY["conj"] = ['"AND"', '"OR"']
GRAMMAR_DICTIONARY["distinct"] = ['("DISTINCT")', '("")']
GRAMMAR_DICTIONARY["number"] = ['""']
KEYWORDS = [
'"SELECT"',
'"FROM"',
'"MIN"',
'"MAX"',
'"COUNT"',
'"WHERE"',
'"NOT"',
'"IN"',
'"LIKE"',
'"IS"',
'"BETWEEN"',
'"AND"',
'"ALL"',
'"ANY"',
'"NULL"',
'"OR"',
'"DISTINCT"',
]
NUMERIC_NONTERMINALS = [
"number",
"time_range_start",
"time_range_end",
"fare_round_trip_cost",
"fare_one_direction_cost",
"flight_number",
"day_number",
"month_number",
"year_number",
]
class AtisSqlTableContext:
"""
An ``AtisSqlTableContext`` represents the SQL context with a grammar of SQL and the valid actions
based on the schema of the tables that it represents.
Parameters
----------
all_tables: ``Dict[str, List[str]]``
A dictionary representing the SQL tables in the dataset, the keys are the names of the tables
that map to lists of the table's column names.
tables_with_strings: ``Dict[str, List[str]]``
A dictionary representing the SQL tables that we want to generate strings for. The keys are the
names of the tables that map to lists of the table's column names.
database_file : ``str``, optional
The directory to find the sqlite database file. We query the sqlite database to find the strings
that are allowed.
"""
def __init__(
self,
all_tables: Dict[str, List[str]] = None,
tables_with_strings: Dict[str, List[str]] = None,
database_file: str = None,
) -> None:
self.all_tables = all_tables
self.tables_with_strings = tables_with_strings
if database_file:
self.database_file = cached_path(database_file)
self.connection = sqlite3.connect(self.database_file)
self.cursor = self.connection.cursor()
grammar_dictionary, strings_list = self.create_grammar_dict_and_strings()
self.grammar_dictionary: Dict[str, List[str]] = grammar_dictionary
self.strings_list: List[Tuple[str, str]] = strings_list
self.grammar_string: str = self.get_grammar_string()
self.grammar: Grammar = Grammar(self.grammar_string)
self.valid_actions: Dict[str, List[str]] = initialize_valid_actions(self.grammar, KEYWORDS)
if database_file:
self.connection.close()
def get_grammar_dictionary(self) -> Dict[str, List[str]]:
return self.grammar_dictionary
def get_valid_actions(self) -> Dict[str, List[str]]:
return self.valid_actions
def create_grammar_dict_and_strings(self) -> Tuple[Dict[str, List[str]], List[Tuple[str, str]]]:
grammar_dictionary = deepcopy(GRAMMAR_DICTIONARY)
strings_list = []
if self.all_tables:
grammar_dictionary["table_name"] = sorted(
[f'"{table}"' for table in list(self.all_tables.keys())], reverse=True
)
grammar_dictionary["col_ref"] = ['"*"', "agg"]
all_columns = []
for table, columns in self.all_tables.items():
grammar_dictionary["col_ref"].extend(
[f'("{table}" ws "." ws "{column}")' for column in columns]
)
all_columns.extend(columns)
grammar_dictionary["col_ref"] = sorted(grammar_dictionary["col_ref"], reverse=True)
grammar_dictionary["col"] = sorted(
[f'"{column}"' for column in all_columns], reverse=True
)
biexprs = []
if self.tables_with_strings:
for table, columns in self.tables_with_strings.items():
biexprs.extend(
[
f'("{table}" ws "." ws "{column}" ws binaryop ws {table}_{column}_string)'
for column in columns
]
)
for column in columns:
self.cursor.execute(f"SELECT DISTINCT {table} . {column} FROM {table}")
results = self.cursor.fetchall()
# Almost all the query values are in the database, we hardcode the rare case here.
if table == "flight" and column == "airline_code":
results.append(("EA",))
strings_list.extend(
[
(
format_action(
f"{table}_{column}_string",
str(row[0]),
is_string="number" not in column,
is_number="number" in column,
),
str(row[0]),
)
for row in results
]
)
if column.endswith("number"):
grammar_dictionary[f"{table}_{column}_string"] = sorted(
[f'"{str(row[0])}"' for row in results], reverse=True
)
else:
grammar_dictionary[f"{table}_{column}_string"] = sorted(
[f"\"'{str(row[0])}'\"" for row in results], reverse=True
)
grammar_dictionary["biexpr"] = sorted(biexprs, reverse=True) + [
"( col_ref ws binaryop ws value)",
"(value ws binaryop ws value)",
]
return grammar_dictionary, strings_list
def get_grammar_string(self):
return format_grammar_string(self.grammar_dictionary)
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/contexts/atis_sql_table_context.py |
"""
A ``Text2SqlTableContext`` represents the SQL context in which an utterance appears
for the any of the text2sql datasets, with the grammar and the valid actions.
"""
from typing import List, Dict
from sqlite3 import Cursor
from allennlp_semparse.common.sql.text2sql_utils import TableColumn
from allennlp_semparse.common.sql.text2sql_utils import column_has_numeric_type
from allennlp_semparse.common.sql.text2sql_utils import column_has_string_type
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['(query ws ";")', "(query ws)"]
GRAMMAR_DICTIONARY["query"] = [
"(ws select_core ws groupby_clause ws orderby_clause ws limit)",
"(ws select_core ws groupby_clause ws orderby_clause)",
"(ws select_core ws groupby_clause ws limit)",
"(ws select_core ws orderby_clause ws limit)",
"(ws select_core ws groupby_clause)",
"(ws select_core ws orderby_clause)",
"(ws select_core)",
]
GRAMMAR_DICTIONARY["select_core"] = [
"(select_with_distinct ws select_results ws from_clause ws where_clause)",
"(select_with_distinct ws select_results ws from_clause)",
"(select_with_distinct ws select_results ws where_clause)",
"(select_with_distinct ws select_results)",
]
GRAMMAR_DICTIONARY["select_with_distinct"] = ['(ws "SELECT" ws "DISTINCT")', '(ws "SELECT")']
GRAMMAR_DICTIONARY["select_results"] = [
'(ws select_result ws "," ws select_results)',
"(ws select_result)",
]
GRAMMAR_DICTIONARY["select_result"] = [
'"*"',
'(table_name ws ".*")',
'(expr ws "AS" wsp name)',
"expr",
'(col_ref ws "AS" wsp name)',
]
GRAMMAR_DICTIONARY["from_clause"] = ['ws "FROM" ws source']
GRAMMAR_DICTIONARY["source"] = ['(ws single_source ws "," ws source)', "(ws single_source)"]
GRAMMAR_DICTIONARY["single_source"] = ["source_table", "source_subq"]
GRAMMAR_DICTIONARY["source_table"] = ['(table_name ws "AS" wsp name)', "table_name"]
GRAMMAR_DICTIONARY["source_subq"] = [
'("(" ws query ws ")" ws "AS" ws name)',
'("(" ws query ws ")")',
]
GRAMMAR_DICTIONARY["limit"] = ['("LIMIT" ws "1")', '("LIMIT" ws number)']
GRAMMAR_DICTIONARY["where_clause"] = [
'(ws "WHERE" wsp expr ws where_conj)',
'(ws "WHERE" wsp expr)',
]
GRAMMAR_DICTIONARY["where_conj"] = ['(ws "AND" wsp expr ws where_conj)', '(ws "AND" wsp expr)']
GRAMMAR_DICTIONARY["groupby_clause"] = [
'(ws "GROUP" ws "BY" ws group_clause ws "HAVING" ws expr)',
'(ws "GROUP" ws "BY" ws group_clause)',
]
GRAMMAR_DICTIONARY["group_clause"] = ['(ws expr ws "," ws group_clause)', "(ws expr)"]
GRAMMAR_DICTIONARY["orderby_clause"] = ['ws "ORDER" ws "BY" ws order_clause']
GRAMMAR_DICTIONARY["order_clause"] = ['(ordering_term ws "," ws order_clause)', "ordering_term"]
GRAMMAR_DICTIONARY["ordering_term"] = ["(ws expr ws ordering)", "(ws expr)"]
GRAMMAR_DICTIONARY["ordering"] = ['(ws "ASC")', '(ws "DESC")']
GRAMMAR_DICTIONARY["col_ref"] = ['(table_name ws "." ws column_name)', "table_name"]
GRAMMAR_DICTIONARY["table_name"] = ["name"]
GRAMMAR_DICTIONARY["column_name"] = ["name"]
GRAMMAR_DICTIONARY["ws"] = [r'~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = [r'~"\s+"i']
GRAMMAR_DICTIONARY["name"] = [r'~"[a-zA-Z]\w*"i']
GRAMMAR_DICTIONARY["expr"] = [
"in_expr",
# Like expressions.
'(value wsp "LIKE" wsp string)',
# Between expressions.
'(value ws "BETWEEN" wsp value ws "AND" wsp value)',
# Binary expressions.
"(value ws binaryop wsp expr)",
# Unary expressions.
"(unaryop ws expr)",
# Two types of null check expressions.
'(col_ref ws "IS" ws "NOT" ws "NULL")',
'(col_ref ws "IS" ws "NULL")',
"source_subq",
"value",
]
GRAMMAR_DICTIONARY["in_expr"] = [
'(value wsp "NOT" wsp "IN" wsp string_set)',
'(value wsp "IN" wsp string_set)',
'(value wsp "NOT" wsp "IN" wsp expr)',
'(value wsp "IN" wsp expr)',
]
GRAMMAR_DICTIONARY["value"] = [
"parenval",
'"YEAR(CURDATE())"',
"number",
"boolean",
"function",
"col_ref",
"string",
]
GRAMMAR_DICTIONARY["parenval"] = ['"(" ws expr ws ")"']
GRAMMAR_DICTIONARY["function"] = [
'(fname ws "(" ws "DISTINCT" ws arg_list_or_star ws ")")',
'(fname ws "(" ws arg_list_or_star ws ")")',
]
GRAMMAR_DICTIONARY["arg_list_or_star"] = ["arg_list", '"*"']
GRAMMAR_DICTIONARY["arg_list"] = ['(expr ws "," ws arg_list)', "expr"]
# TODO(MARK): Massive hack, remove and modify the grammar accordingly
GRAMMAR_DICTIONARY["number"] = [r'~"\d*\.?\d+"i', "'3'", "'4'"]
GRAMMAR_DICTIONARY["string_set"] = ['ws "(" ws string_set_vals ws ")"']
GRAMMAR_DICTIONARY["string_set_vals"] = ['(string ws "," ws string_set_vals)', "string"]
GRAMMAR_DICTIONARY["string"] = ["~\"'.*?'\"i"]
GRAMMAR_DICTIONARY["fname"] = ['"COUNT"', '"SUM"', '"MAX"', '"MIN"', '"AVG"', '"ALL"']
GRAMMAR_DICTIONARY["boolean"] = ['"true"', '"false"']
# TODO(MARK): This is not tight enough. AND/OR are strictly boolean value operators.
GRAMMAR_DICTIONARY["binaryop"] = [
'"+"',
'"-"',
'"*"',
'"/"',
'"="',
'"<>"',
'">="',
'"<="',
'">"',
'"<"',
'"AND"',
'"OR"',
'"LIKE"',
]
GRAMMAR_DICTIONARY["unaryop"] = ['"+"', '"-"', '"not"', '"NOT"']
GLOBAL_DATASET_VALUES: Dict[str, List[str]] = {
# These are used to check values are present, or numbers of authors.
"scholar": ["0", "1", "2"],
# 0 is used for "sea level", 750 is a "major" lake, and 150000 is a "major" city.
"geography": ["0", "750", "150000"],
# This defines what an "above average" restaurant is.
"restaurants": ["2.5"],
}
def update_grammar_with_tables(
grammar_dictionary: Dict[str, List[str]], schema: Dict[str, List[TableColumn]]
) -> None:
table_names = sorted([f'"{table}"' for table in list(schema.keys())], reverse=True)
grammar_dictionary["table_name"] = table_names
all_columns = set()
for table in schema.values():
all_columns.update([column.name for column in table])
sorted_columns = sorted([f'"{column}"' for column in all_columns], reverse=True)
grammar_dictionary["column_name"] = sorted_columns
def update_grammar_with_table_values(
grammar_dictionary: Dict[str, List[str]], schema: Dict[str, List[TableColumn]], cursor: Cursor
) -> None:
for table_name, columns in schema.items():
for column in columns:
cursor.execute(f"SELECT DISTINCT {table_name}.{column.name} FROM {table_name}")
results = [x[0] for x in cursor.fetchall()]
if column_has_string_type(column):
productions = sorted([f'"{str(result)}"' for result in results], reverse=True)
grammar_dictionary["string"].extend(productions)
elif column_has_numeric_type(column):
productions = sorted([f'"{str(result)}"' for result in results], reverse=True)
grammar_dictionary["number"].extend(productions)
def update_grammar_with_global_values(grammar_dictionary: Dict[str, List[str]], dataset_name: str):
values = GLOBAL_DATASET_VALUES.get(dataset_name, [])
values_for_grammar = [f'"{str(value)}"' for value in values]
grammar_dictionary["value"] = values_for_grammar + grammar_dictionary["value"]
def update_grammar_to_be_variable_free(grammar_dictionary: Dict[str, List[str]]):
"""
SQL is a predominately variable free language in terms of simple usage, in the
sense that most queries do not create references to variables which are not
already static tables in a dataset. However, it is possible to do this via
derived tables. If we don't require this functionality, we can tighten the
grammar, because we don't need to support aliased tables.
"""
# Tables in variable free grammars cannot be aliased, so we
# remove this functionality from the grammar.
grammar_dictionary["select_result"] = ['"*"', '(table_name ws ".*")', "expr"]
# Similarly, collapse the definition of a source table
# to not contain aliases and modify references to subqueries.
grammar_dictionary["single_source"] = ["table_name", '("(" ws query ws ")")']
del grammar_dictionary["source_subq"]
del grammar_dictionary["source_table"]
grammar_dictionary["expr"] = [
"in_expr",
'(value wsp "LIKE" wsp string)',
'(value ws "BETWEEN" wsp value ws "AND" wsp value)',
"(value ws binaryop wsp expr)",
"(unaryop ws expr)",
'(col_ref ws "IS" ws "NOT" ws "NULL")',
'(col_ref ws "IS" ws "NULL")',
# This used to be source_subq - now
# we don't need aliases, we can colapse it to queries.
'("(" ws query ws ")")',
"value",
]
# Finally, remove the ability to reference an arbitrary name,
# because now we don't have aliased tables, we don't need
# to recognise new variables.
del grammar_dictionary["name"]
def update_grammar_with_untyped_entities(grammar_dictionary: Dict[str, List[str]]) -> None:
"""
Variables can be treated as numbers or strings if their type can be inferred -
however, that can be difficult, so instead, we can just treat them all as values
and be a bit looser on the typing we allow in our grammar. Here we just remove
all references to number and string from the grammar, replacing them with value.
"""
grammar_dictionary["string_set_vals"] = ['(value ws "," ws string_set_vals)', "value"]
grammar_dictionary["value"].remove("string")
grammar_dictionary["value"].remove("number")
grammar_dictionary["limit"] = ['("LIMIT" ws "1")', '("LIMIT" ws value)']
grammar_dictionary["expr"][1] = '(value wsp "LIKE" wsp value)'
del grammar_dictionary["string"]
del grammar_dictionary["number"]
def update_grammar_values_with_variables(
grammar_dictionary: Dict[str, List[str]], prelinked_entities: Dict[str, Dict[str, str]]
) -> None:
for variable, _ in prelinked_entities.items():
grammar_dictionary["value"] = [f"\"'{variable}'\""] + grammar_dictionary["value"]
def update_grammar_numbers_and_strings_with_variables(
grammar_dictionary: Dict[str, List[str]],
prelinked_entities: Dict[str, Dict[str, str]],
columns: Dict[str, TableColumn],
) -> None:
for variable, info in prelinked_entities.items():
variable_column = info["type"].upper()
matched_column = columns.get(variable_column, None)
if matched_column is not None:
# Try to infer the variable's type by matching it to a column in
# the database. If we can't, we just add it as a value.
if column_has_numeric_type(matched_column):
grammar_dictionary["number"] = [f"\"'{variable}'\""] + grammar_dictionary["number"]
elif column_has_string_type(matched_column):
grammar_dictionary["string"] = [f"\"'{variable}'\""] + grammar_dictionary["string"]
else:
grammar_dictionary["value"] = [f"\"'{variable}'\""] + grammar_dictionary["value"]
# Otherwise, try to infer by looking at the actual value:
else:
try:
# This is what happens if you try and do type inference
# in a grammar which parses _strings_ in _Python_.
# We're just seeing if the python interpreter can convert
# to to a float - if it can, we assume it's a number.
float(info["text"])
is_numeric = True
except ValueError:
is_numeric = False
if is_numeric:
grammar_dictionary["number"] = [f"\"'{variable}'\""] + grammar_dictionary["number"]
elif info["text"].replace(" ", "").isalpha():
grammar_dictionary["string"] = [f"\"'{variable}'\""] + grammar_dictionary["string"]
else:
grammar_dictionary["value"] = [f"\"'{variable}'\""] + grammar_dictionary["value"]
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/contexts/text2sql_table_context.py |
import re
from typing import List, Dict, Set
from collections import defaultdict
from sys import exc_info
from six import reraise
from parsimonious.expressions import Literal, OneOf, Sequence
from parsimonious.nodes import Node, NodeVisitor
from parsimonious.grammar import Grammar
from parsimonious.exceptions import VisitationError, UndefinedLabel
WHITESPACE_REGEX = re.compile(" wsp |wsp | wsp| ws |ws | ws")
def format_grammar_string(grammar_dictionary: Dict[str, List[str]]) -> str:
"""
Formats a dictionary of production rules into the string format expected
by the Parsimonious Grammar class.
"""
grammar_string = "\n".join(
[
f"{nonterminal} = {' / '.join(right_hand_side)}"
for nonterminal, right_hand_side in grammar_dictionary.items()
]
)
return grammar_string.replace("\\", "\\\\")
def initialize_valid_actions(
grammar: Grammar, keywords_to_uppercase: List[str] = None
) -> Dict[str, List[str]]:
"""
We initialize the valid actions with the global actions. These include the
valid actions that result from the grammar and also those that result from
the tables provided. The keys represent the nonterminals in the grammar
and the values are lists of the valid actions of that nonterminal.
"""
valid_actions: Dict[str, Set[str]] = defaultdict(set)
for key in grammar:
rhs = grammar[key]
# Sequence represents a series of expressions that match pieces of the text in order.
# Eg. A -> B C
if isinstance(rhs, Sequence):
valid_actions[key].add(
format_action(
key,
" ".join(rhs._unicode_members()),
keywords_to_uppercase=keywords_to_uppercase,
)
)
# OneOf represents a series of expressions, one of which matches the text.
# Eg. A -> B / C
elif isinstance(rhs, OneOf):
for option in rhs._unicode_members():
valid_actions[key].add(
format_action(key, option, keywords_to_uppercase=keywords_to_uppercase)
)
# A string literal, eg. "A"
elif isinstance(rhs, Literal):
if rhs.literal != "":
valid_actions[key].add(
format_action(
key, repr(rhs.literal), keywords_to_uppercase=keywords_to_uppercase
)
)
else:
valid_actions[key] = set()
valid_action_strings = {key: sorted(value) for key, value in valid_actions.items()}
return valid_action_strings
def format_action(
nonterminal: str,
right_hand_side: str,
is_string: bool = False,
is_number: bool = False,
keywords_to_uppercase: List[str] = None,
) -> str:
"""
This function formats an action as it appears in models. It
splits productions based on the special `ws` and `wsp` rules,
which are used in grammars to denote whitespace, and then
rejoins these tokens a formatted, comma separated list.
Importantly, note that it `does not` split on spaces in
the grammar string, because these might not correspond
to spaces in the language the grammar recognises.
Parameters
----------
nonterminal : ``str``, required.
The nonterminal in the action.
right_hand_side : ``str``, required.
The right hand side of the action
(i.e the thing which is produced).
is_string : ``bool``, optional (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['string']``
is_number : ``bool``, optional, (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['number']``
keywords_to_uppercase: ``List[str]``, optional, (default = None)
Keywords in the grammar to uppercase. In the case of sql,
this might be SELECT, MAX etc.
"""
keywords_to_uppercase = keywords_to_uppercase or []
if right_hand_side.upper() in keywords_to_uppercase:
right_hand_side = right_hand_side.upper()
if is_string:
return f"{nonterminal} -> [\"'{right_hand_side}'\"]"
elif is_number:
return f'{nonterminal} -> ["{right_hand_side}"]'
else:
right_hand_side = right_hand_side.lstrip("(").rstrip(")")
child_strings = [token for token in WHITESPACE_REGEX.split(right_hand_side) if token]
child_strings = [
tok.upper() if tok.upper() in keywords_to_uppercase else tok for tok in child_strings
]
return f"{nonterminal} -> [{', '.join(child_strings)}]"
def action_sequence_to_sql(action_sequences: List[str]) -> str:
# Convert an action sequence like ['statement -> [query, ";"]', ...] to the
# SQL string.
query = []
for action in action_sequences:
nonterminal, right_hand_side = action.split(" -> ")
right_hand_side_tokens = right_hand_side[1:-1].split(", ")
if nonterminal == "statement":
query.extend(right_hand_side_tokens)
else:
for query_index, token in list(enumerate(query)):
if token == nonterminal:
query = query[:query_index] + right_hand_side_tokens + query[query_index + 1 :]
break
return " ".join([token.strip('"') for token in query])
class SqlVisitor(NodeVisitor):
"""
``SqlVisitor`` performs a depth-first traversal of the the AST. It takes the parse tree
and gives us an action sequence that resulted in that parse. Since the visitor has mutable
state, we define a new ``SqlVisitor`` for each query. To get the action sequence, we create
a ``SqlVisitor`` and call parse on it, which returns a list of actions. Ex.
sql_visitor = SqlVisitor(grammar_string)
action_sequence = sql_visitor.parse(query)
Importantly, this ``SqlVisitor`` skips over ``ws`` and ``wsp`` nodes,
because they do not hold any meaning, and make an action sequence
much longer than it needs to be.
Parameters
----------
grammar : ``Grammar``
A Grammar object that we use to parse the text.
keywords_to_uppercase: ``List[str]``, optional, (default = None)
Keywords in the grammar to uppercase. In the case of sql,
this might be SELECT, MAX etc.
"""
def __init__(self, grammar: Grammar, keywords_to_uppercase: List[str] = None) -> None:
self.action_sequence: List[str] = []
self.grammar: Grammar = grammar
self.keywords_to_uppercase = keywords_to_uppercase or []
def generic_visit(self, node: Node, visited_children: List[None]) -> List[str]:
self.add_action(node)
if node.expr.name == "statement":
return self.action_sequence
return []
def add_action(self, node: Node) -> None:
"""
For each node, we accumulate the rules that generated its children in a list.
"""
if node.expr.name and node.expr.name not in ["ws", "wsp"]:
nonterminal = f"{node.expr.name} -> "
if isinstance(node.expr, Literal):
right_hand_side = f'["{node.text}"]'
else:
child_strings = []
for child in node.__iter__():
if child.expr.name in ["ws", "wsp"]:
continue
if child.expr.name != "":
child_strings.append(child.expr.name)
else:
child_right_side_string = child.expr._as_rhs().lstrip("(").rstrip(")")
child_right_side_list = [
tok for tok in WHITESPACE_REGEX.split(child_right_side_string) if tok
]
child_right_side_list = [
tok.upper() if tok.upper() in self.keywords_to_uppercase else tok
for tok in child_right_side_list
]
child_strings.extend(child_right_side_list)
right_hand_side = "[" + ", ".join(child_strings) + "]"
rule = nonterminal + right_hand_side
self.action_sequence = [rule] + self.action_sequence
def visit(self, node):
"""
See the ``NodeVisitor`` visit method. This just changes the order in which
we visit nonterminals from right to left to left to right.
"""
method = getattr(self, "visit_" + node.expr_name, self.generic_visit)
# Call that method, and show where in the tree it failed if it blows
# up.
try:
# Changing this to reverse here!
return method(node, [self.visit(child) for child in reversed(list(node))])
except (VisitationError, UndefinedLabel):
# Don't catch and re-wrap already-wrapped exceptions.
raise
except self.unwrapped_exceptions:
raise
except Exception:
# Catch any exception, and tack on a parse tree so it's easier to
# see where it went wrong.
exc_class, exc, traceback = exc_info()
reraise(VisitationError, VisitationError(exc, exc_class, node), traceback)
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/contexts/sql_context_utils.py |
from typing import List, Dict, Tuple, Set, Callable
from collections import defaultdict
from copy import copy
import numpy
from nltk import ngrams, bigrams
from parsimonious.grammar import Grammar
from parsimonious.expressions import Expression, OneOf, Sequence, Literal
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp_semparse.parsimonious_languages.contexts import atis_tables
from allennlp_semparse.parsimonious_languages.contexts.atis_sql_table_context import (
AtisSqlTableContext,
KEYWORDS,
NUMERIC_NONTERMINALS,
)
from allennlp_semparse.parsimonious_languages.contexts.sql_context_utils import (
SqlVisitor,
format_action,
initialize_valid_actions,
)
def get_strings_from_utterance(tokenized_utterance: List[Token]) -> Dict[str, List[int]]:
"""
Based on the current utterance, return a dictionary where the keys are the strings in
the database that map to lists of the token indices that they are linked to.
"""
string_linking_scores: Dict[str, List[int]] = defaultdict(list)
for index, token in enumerate(tokenized_utterance):
for string in atis_tables.ATIS_TRIGGER_DICT.get(token.text.lower(), []):
string_linking_scores[string].append(index)
token_bigrams = bigrams([token.text for token in tokenized_utterance])
for index, token_bigram in enumerate(token_bigrams):
for string in atis_tables.ATIS_TRIGGER_DICT.get(" ".join(token_bigram).lower(), []):
string_linking_scores[string].extend([index, index + 1])
trigrams = ngrams([token.text for token in tokenized_utterance], 3)
for index, trigram in enumerate(trigrams):
if trigram[0] == "st":
natural_language_key = f"st. {trigram[2]}".lower()
else:
natural_language_key = " ".join(trigram).lower()
for string in atis_tables.ATIS_TRIGGER_DICT.get(natural_language_key, []):
string_linking_scores[string].extend([index, index + 1, index + 2])
return string_linking_scores
class AtisWorld:
"""
World representation for the Atis SQL domain. This class has a ``SqlTableContext`` which holds the base
grammar, it then augments this grammar by constraining each column to the values that are allowed in it.
Parameters
----------
utterances: ``List[str]``
A list of utterances in the interaction, the last element in this list is the
current utterance that we are interested in.
tokenizer: ``Tokenizer``, optional (default=``SpacyTokenizer()``)
We use this tokenizer to tokenize the utterances.
"""
database_file = "https://allennlp.s3.amazonaws.com/datasets/atis/atis.db"
sql_table_context = None
def __init__(self, utterances: List[str], tokenizer: Tokenizer = None) -> None:
if AtisWorld.sql_table_context is None:
AtisWorld.sql_table_context = AtisSqlTableContext(
atis_tables.ALL_TABLES, atis_tables.TABLES_WITH_STRINGS, AtisWorld.database_file
)
self.utterances: List[str] = utterances
self.tokenizer = tokenizer if tokenizer else SpacyTokenizer()
self.tokenized_utterances = [
self.tokenizer.tokenize(utterance) for utterance in self.utterances
]
self.dates = self._get_dates()
self.linked_entities = self._get_linked_entities()
entities, linking_scores = self._flatten_entities()
# This has shape (num_entities, num_utterance_tokens).
self.linking_scores: numpy.ndarray = linking_scores
self.entities: List[str] = entities
self.grammar: Grammar = self._update_grammar()
self.valid_actions = initialize_valid_actions(self.grammar, KEYWORDS)
def _update_grammar(self):
"""
We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also
has the new entities that are extracted from the utterance. Stitching together the expressions
to form the grammar is a little tedious here, but it is worth it because we don't have to create
a new grammar from scratch. Creating a new grammar is expensive because we have many production
rules that have all database values in the column on the right hand side. We update the expressions
bottom up, since the higher level expressions may refer to the lower level ones. For example, the
ternary expression will refer to the start and end times.
"""
# This will give us a shallow copy. We have to be careful here because the ``Grammar`` object
# contains ``Expression`` objects that have tuples containing the members of that expression.
# We have to create new sub-expression objects so that original grammar is not mutated.
new_grammar = copy(AtisWorld.sql_table_context.grammar)
for numeric_nonterminal in NUMERIC_NONTERMINALS:
self._add_numeric_nonterminal_to_grammar(numeric_nonterminal, new_grammar)
self._update_expression_reference(new_grammar, "pos_value", "number")
ternary_expressions = [
self._get_sequence_with_spacing(
new_grammar,
[
new_grammar["col_ref"],
Literal("BETWEEN"),
new_grammar["time_range_start"],
Literal("AND"),
new_grammar["time_range_end"],
],
),
self._get_sequence_with_spacing(
new_grammar,
[
new_grammar["col_ref"],
Literal("NOT"),
Literal("BETWEEN"),
new_grammar["time_range_start"],
Literal("AND"),
new_grammar["time_range_end"],
],
),
self._get_sequence_with_spacing(
new_grammar,
[
new_grammar["col_ref"],
Literal("not"),
Literal("BETWEEN"),
new_grammar["time_range_start"],
Literal("AND"),
new_grammar["time_range_end"],
],
),
]
new_grammar["ternaryexpr"] = OneOf(*ternary_expressions, name="ternaryexpr")
self._update_expression_reference(new_grammar, "condition", "ternaryexpr")
new_binary_expressions = []
fare_round_trip_cost_expression = self._get_sequence_with_spacing(
new_grammar,
[
Literal("fare"),
Literal("."),
Literal("round_trip_cost"),
new_grammar["binaryop"],
new_grammar["fare_round_trip_cost"],
],
)
new_binary_expressions.append(fare_round_trip_cost_expression)
fare_one_direction_cost_expression = self._get_sequence_with_spacing(
new_grammar,
[
Literal("fare"),
Literal("."),
Literal("one_direction_cost"),
new_grammar["binaryop"],
new_grammar["fare_one_direction_cost"],
],
)
new_binary_expressions.append(fare_one_direction_cost_expression)
flight_number_expression = self._get_sequence_with_spacing(
new_grammar,
[
Literal("flight"),
Literal("."),
Literal("flight_number"),
new_grammar["binaryop"],
new_grammar["flight_number"],
],
)
new_binary_expressions.append(flight_number_expression)
if self.dates:
year_binary_expression = self._get_sequence_with_spacing(
new_grammar,
[
Literal("date_day"),
Literal("."),
Literal("year"),
new_grammar["binaryop"],
new_grammar["year_number"],
],
)
month_binary_expression = self._get_sequence_with_spacing(
new_grammar,
[
Literal("date_day"),
Literal("."),
Literal("month_number"),
new_grammar["binaryop"],
new_grammar["month_number"],
],
)
day_binary_expression = self._get_sequence_with_spacing(
new_grammar,
[
Literal("date_day"),
Literal("."),
Literal("day_number"),
new_grammar["binaryop"],
new_grammar["day_number"],
],
)
new_binary_expressions.extend(
[year_binary_expression, month_binary_expression, day_binary_expression]
)
new_binary_expressions = new_binary_expressions + list(new_grammar["biexpr"].members)
new_grammar["biexpr"] = OneOf(*new_binary_expressions, name="biexpr")
self._update_expression_reference(new_grammar, "condition", "biexpr")
return new_grammar
def _get_numeric_database_values(self, nonterminal: str) -> List[str]:
return sorted(
[
value[1]
for key, value in self.linked_entities["number"].items()
if value[0] == nonterminal
],
reverse=True,
)
def _add_numeric_nonterminal_to_grammar(self, nonterminal: str, new_grammar: Grammar) -> None:
numbers = self._get_numeric_database_values(nonterminal)
number_literals = [Literal(number) for number in numbers]
if number_literals:
new_grammar[nonterminal] = OneOf(*number_literals, name=nonterminal)
def _update_expression_reference(
self,
grammar: Grammar,
parent_expression_nonterminal: str,
child_expression_nonterminal: str,
) -> None:
"""
When we add a new expression, there may be other expressions that refer to
it, and we need to update those to point to the new expression.
"""
grammar[parent_expression_nonterminal].members = [
member
if member.name != child_expression_nonterminal
else grammar[child_expression_nonterminal]
for member in grammar[parent_expression_nonterminal].members
]
def _get_sequence_with_spacing(
self, new_grammar, expressions: List[Expression], name: str = ""
) -> Sequence:
"""
This is a helper method for generating sequences, since we often want a list of expressions
with whitespaces between them.
"""
expressions = [
subexpression
for expression in expressions
for subexpression in (expression, new_grammar["ws"])
]
return Sequence(*expressions, name=name)
def get_valid_actions(self) -> Dict[str, List[str]]:
return self.valid_actions
def add_dates_to_number_linking_scores(
self,
number_linking_scores: Dict[str, Tuple[str, str, List[int]]],
current_tokenized_utterance: List[Token],
) -> None:
month_reverse_lookup = {
str(number): string for string, number in atis_tables.MONTH_NUMBERS.items()
}
day_reverse_lookup = {
str(number): string for string, number in atis_tables.DAY_NUMBERS.items()
}
if self.dates:
for date in self.dates:
# Add the year linking score
entity_linking = [0 for token in current_tokenized_utterance]
for token_index, token in enumerate(current_tokenized_utterance):
if token.text == str(date.year):
entity_linking[token_index] = 1
action = format_action(
nonterminal="year_number",
right_hand_side=str(date.year),
is_number=True,
keywords_to_uppercase=KEYWORDS,
)
number_linking_scores[action] = ("year_number", str(date.year), entity_linking)
entity_linking = [0 for token in current_tokenized_utterance]
for token_index, token in enumerate(current_tokenized_utterance):
if token.text == month_reverse_lookup[str(date.month)]:
entity_linking[token_index] = 1
action = format_action(
nonterminal="month_number",
right_hand_side=str(date.month),
is_number=True,
keywords_to_uppercase=KEYWORDS,
)
number_linking_scores[action] = ("month_number", str(date.month), entity_linking)
entity_linking = [0 for token in current_tokenized_utterance]
for token_index, token in enumerate(current_tokenized_utterance):
if token.text == day_reverse_lookup[str(date.day)]:
entity_linking[token_index] = 1
for bigram_index, bigram in enumerate(
bigrams([token.text for token in current_tokenized_utterance])
):
if " ".join(bigram) == day_reverse_lookup[str(date.day)]:
entity_linking[bigram_index] = 1
entity_linking[bigram_index + 1] = 1
action = format_action(
nonterminal="day_number",
right_hand_side=str(date.day),
is_number=True,
keywords_to_uppercase=KEYWORDS,
)
number_linking_scores[action] = ("day_number", str(date.day), entity_linking)
def add_to_number_linking_scores(
self,
all_numbers: Set[str],
number_linking_scores: Dict[str, Tuple[str, str, List[int]]],
get_number_linking_dict: Callable[[str, List[Token]], Dict[str, List[int]]],
current_tokenized_utterance: List[Token],
nonterminal: str,
) -> None:
"""
This is a helper method for adding different types of numbers (eg. starting time ranges) as entities.
We first go through all utterances in the interaction and find the numbers of a certain type and add
them to the set ``all_numbers``, which is initialized with default values. We want to add all numbers
that occur in the interaction, and not just the current turn because the query could contain numbers
that were triggered before the current turn. For each entity, we then check if it is triggered by tokens
in the current utterance and construct the linking score.
"""
number_linking_dict: Dict[str, List[int]] = {}
for utterance, tokenized_utterance in zip(self.utterances, self.tokenized_utterances):
number_linking_dict = get_number_linking_dict(utterance, tokenized_utterance)
all_numbers.update(number_linking_dict.keys())
all_numbers_list: List[str] = sorted(all_numbers, reverse=True)
for number in all_numbers_list:
entity_linking = [0 for token in current_tokenized_utterance]
# ``number_linking_dict`` is for the last utterance here. If the number was triggered
# before the last utterance, then it will have linking scores of 0's.
for token_index in number_linking_dict.get(number, []):
if token_index < len(entity_linking):
entity_linking[token_index] = 1
action = format_action(
nonterminal, number, is_number=True, keywords_to_uppercase=KEYWORDS
)
number_linking_scores[action] = (nonterminal, number, entity_linking)
def _get_linked_entities(self) -> Dict[str, Dict[str, Tuple[str, str, List[int]]]]:
"""
This method gets entities from the current utterance finds which tokens they are linked to.
The entities are divided into two main groups, ``numbers`` and ``strings``. We rely on these
entities later for updating the valid actions and the grammar.
"""
current_tokenized_utterance = (
[] if not self.tokenized_utterances else self.tokenized_utterances[-1]
)
# We generate a dictionary where the key is the type eg. ``number`` or ``string``.
# The value is another dictionary where the key is the action and the value is a tuple
# of the nonterminal, the string value and the linking score.
entity_linking_scores: Dict[str, Dict[str, Tuple[str, str, List[int]]]] = {}
number_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {}
string_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {}
# Get time range start
self.add_to_number_linking_scores(
{"0"},
number_linking_scores,
atis_tables.get_time_range_start_from_utterance,
current_tokenized_utterance,
"time_range_start",
)
self.add_to_number_linking_scores(
{"1200"},
number_linking_scores,
atis_tables.get_time_range_end_from_utterance,
current_tokenized_utterance,
"time_range_end",
)
self.add_to_number_linking_scores(
{"0", "1", "60", "41"},
number_linking_scores,
atis_tables.get_numbers_from_utterance,
current_tokenized_utterance,
"number",
)
self.add_to_number_linking_scores(
{"0"},
number_linking_scores,
atis_tables.get_costs_from_utterance,
current_tokenized_utterance,
"fare_round_trip_cost",
)
self.add_to_number_linking_scores(
{"0"},
number_linking_scores,
atis_tables.get_costs_from_utterance,
current_tokenized_utterance,
"fare_one_direction_cost",
)
self.add_to_number_linking_scores(
{"0"},
number_linking_scores,
atis_tables.get_flight_numbers_from_utterance,
current_tokenized_utterance,
"flight_number",
)
self.add_dates_to_number_linking_scores(number_linking_scores, current_tokenized_utterance)
# Add string linking dict.
string_linking_dict: Dict[str, List[int]] = {}
for tokenized_utterance in self.tokenized_utterances:
string_linking_dict = get_strings_from_utterance(tokenized_utterance)
strings_list = AtisWorld.sql_table_context.strings_list
strings_list.append(("flight_airline_code_string -> [\"'EA'\"]", "EA"))
strings_list.append(("airline_airline_name_string-> [\"'EA'\"]", "EA"))
# We construct the linking scores for strings from the ``string_linking_dict`` here.
for string in strings_list:
entity_linking = [0 for token in current_tokenized_utterance]
# string_linking_dict has the strings and linking scores from the last utterance.
# If the string is not in the last utterance, then the linking scores will be all 0.
for token_index in string_linking_dict.get(string[1], []):
entity_linking[token_index] = 1
action = string[0]
string_linking_scores[action] = (action.split(" -> ")[0], string[1], entity_linking)
entity_linking_scores["number"] = number_linking_scores
entity_linking_scores["string"] = string_linking_scores
return entity_linking_scores
def _get_dates(self):
dates = []
for tokenized_utterance in self.tokenized_utterances:
dates.extend(atis_tables.get_date_from_utterance(tokenized_utterance))
return dates
def _ignore_dates(self, query: str):
tokens = query.split(" ")
year_indices = [index for index, token in enumerate(tokens) if token.endswith("year")]
month_indices = [
index for index, token in enumerate(tokens) if token.endswith("month_number")
]
day_indices = [index for index, token in enumerate(tokens) if token.endswith("day_number")]
if self.dates:
for token_index, token in enumerate(tokens):
if token_index - 2 in year_indices and token.isdigit():
tokens[token_index] = str(self.dates[0].year)
if token_index - 2 in month_indices and token.isdigit():
tokens[token_index] = str(self.dates[0].month)
if token_index - 2 in day_indices and token.isdigit():
tokens[token_index] = str(self.dates[0].day)
return " ".join(tokens)
def get_action_sequence(self, query: str) -> List[str]:
query = self._ignore_dates(query)
sql_visitor = SqlVisitor(self.grammar, keywords_to_uppercase=KEYWORDS)
if query:
action_sequence = sql_visitor.parse(query)
return action_sequence
return []
def all_possible_actions(self) -> List[str]:
"""
Return a sorted list of strings representing all possible actions
of the form: nonterminal -> [right_hand_side]
"""
all_actions = set()
for _, action_list in self.valid_actions.items():
for action in action_list:
all_actions.add(action)
return sorted(all_actions)
def _flatten_entities(self) -> Tuple[List[str], numpy.ndarray]:
"""
When we first get the entities and the linking scores in ``_get_linked_entities``
we represent as dictionaries for easier updates to the grammar and valid actions.
In this method, we flatten them for the model so that the entities are represented as
a list, and the linking scores are a 2D numpy array of shape (num_entities, num_utterance_tokens).
"""
entities = []
linking_scores = []
for entity in sorted(self.linked_entities["number"]):
entities.append(entity)
linking_scores.append(self.linked_entities["number"][entity][2])
for entity in sorted(self.linked_entities["string"]):
entities.append(entity)
linking_scores.append(self.linked_entities["string"][entity][2])
return entities, numpy.array(linking_scores)
def __eq__(self, other):
if isinstance(self, other.__class__):
return all(
[
self.valid_actions == other.valid_actions,
numpy.array_equal(self.linking_scores, other.linking_scores),
self.utterances == other.utterances,
]
)
return False
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/worlds/atis_world.py |
from allennlp_semparse.parsimonious_languages.worlds.atis_world import AtisWorld
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/worlds/__init__.py |
from typing import List, Tuple, Dict
from copy import deepcopy
from sqlite3 import Cursor
import os
from parsimonious import Grammar
from parsimonious.exceptions import ParseError
from allennlp.common.checks import ConfigurationError
from allennlp_semparse.common.sql.text2sql_utils import read_dataset_schema
from allennlp_semparse.parsimonious_languages.contexts.sql_context_utils import SqlVisitor
from allennlp_semparse.parsimonious_languages.contexts.sql_context_utils import (
format_grammar_string,
initialize_valid_actions,
)
from allennlp_semparse.parsimonious_languages.contexts.text2sql_table_context import (
GRAMMAR_DICTIONARY,
)
from allennlp_semparse.parsimonious_languages.contexts.text2sql_table_context import (
update_grammar_numbers_and_strings_with_variables,
)
from allennlp_semparse.parsimonious_languages.contexts.text2sql_table_context import (
update_grammar_to_be_variable_free,
)
from allennlp_semparse.parsimonious_languages.contexts.text2sql_table_context import (
update_grammar_values_with_variables,
)
from allennlp_semparse.parsimonious_languages.contexts.text2sql_table_context import (
update_grammar_with_global_values,
)
from allennlp_semparse.parsimonious_languages.contexts.text2sql_table_context import (
update_grammar_with_table_values,
)
from allennlp_semparse.parsimonious_languages.contexts.text2sql_table_context import (
update_grammar_with_tables,
)
from allennlp_semparse.parsimonious_languages.contexts.text2sql_table_context import (
update_grammar_with_untyped_entities,
)
class Text2SqlWorld:
"""
World representation for any of the Text2Sql datasets.
Parameters
----------
schema_path: ``str``
A path to a schema file which we read into a dictionary
representing the SQL tables in the dataset, the keys are the
names of the tables that map to lists of the table's column names.
cursor : ``Cursor``, optional (default = None)
An optional cursor for a database, which is used to add
database values to the grammar.
use_prelinked_entities : ``bool``, (default = True)
Whether or not to use the pre-linked entities from the text2sql data.
We take this parameter here because it effects whether we need to add
table values to the grammar.
variable_free : ``bool``, optional (default = True)
Denotes whether the data being parsed by the grammar is variable free.
If it is, the grammar is modified to be less expressive by removing
elements which are not necessary if the data is variable free.
use_untyped_entities : ``bool``, optional (default = False)
Whether or not to try to infer the types of prelinked variables.
If not, they are added as untyped values to the grammar instead.
"""
def __init__(
self,
schema_path: str,
cursor: Cursor = None,
use_prelinked_entities: bool = True,
variable_free: bool = True,
use_untyped_entities: bool = False,
) -> None:
self.cursor = cursor
self.schema = read_dataset_schema(schema_path)
self.columns = {column.name: column for table in self.schema.values() for column in table}
self.dataset_name = os.path.basename(schema_path).split("-")[0]
self.use_prelinked_entities = use_prelinked_entities
self.variable_free = variable_free
self.use_untyped_entities = use_untyped_entities
# NOTE: This base dictionary should not be modified.
self.base_grammar_dictionary = self._initialize_grammar_dictionary(
deepcopy(GRAMMAR_DICTIONARY)
)
def get_action_sequence_and_all_actions(
self, query: List[str] = None, prelinked_entities: Dict[str, Dict[str, str]] = None
) -> Tuple[List[str], List[str]]:
grammar_with_context = deepcopy(self.base_grammar_dictionary)
if not self.use_prelinked_entities and prelinked_entities is not None:
raise ConfigurationError(
"The Text2SqlWorld was specified to not use prelinked "
"entities, but prelinked entities were passed."
)
prelinked_entities = prelinked_entities or {}
if self.use_untyped_entities:
update_grammar_values_with_variables(grammar_with_context, prelinked_entities)
else:
update_grammar_numbers_and_strings_with_variables(
grammar_with_context, prelinked_entities, self.columns
)
grammar = Grammar(format_grammar_string(grammar_with_context))
valid_actions = initialize_valid_actions(grammar)
all_actions = set()
for action_list in valid_actions.values():
all_actions.update(action_list)
sorted_actions = sorted(all_actions)
sql_visitor = SqlVisitor(grammar)
try:
action_sequence = sql_visitor.parse(" ".join(query)) if query else []
except ParseError:
action_sequence = None
return action_sequence, sorted_actions
def _initialize_grammar_dictionary(
self, grammar_dictionary: Dict[str, List[str]]
) -> Dict[str, List[str]]:
# Add all the table and column names to the grammar.
update_grammar_with_tables(grammar_dictionary, self.schema)
if self.cursor is not None and not self.use_prelinked_entities:
# Now if we have strings in the table, we need to be able to
# produce them, so we find all of the strings in the tables here
# and create production rules from them. We only do this if
# we haven't pre-linked entities, because if we have, we don't
# need to be able to generate the values - just the placeholder
# symbols which link to them.
grammar_dictionary["number"] = []
grammar_dictionary["string"] = []
update_grammar_with_table_values(grammar_dictionary, self.schema, self.cursor)
# Finally, update the grammar with global, non-variable values
# found in the dataset, if present.
update_grammar_with_global_values(grammar_dictionary, self.dataset_name)
if self.variable_free:
update_grammar_to_be_variable_free(grammar_dictionary)
if self.use_untyped_entities:
update_grammar_with_untyped_entities(grammar_dictionary)
return grammar_dictionary
def is_global_rule(self, production_rule: str) -> bool:
if self.use_prelinked_entities:
# we are checking -4 as is not a global rule if we
# see the 0 in the a rule like 'value -> ["\'city_name0\'"]'
if "value" in production_rule and production_rule[-4].isnumeric():
return False
return True
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/worlds/text2sql_world.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.