python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# pylint: disable=no-self-use,invalid-name
from allennlp_rc.eval.squad_eval import normalize_answer as _normalize_answer_squad
from allennlp_rc.eval.orb_utils import get_metric_squad
from allennlp_rc.eval.orb_utils import get_metric_drop
from allennlp_rc.eval.squad2_eval import get_metric_score as get_metric_squad2
from allennlp_rc.eval.narrativeqa_eval import get_metric_score as get_metric_narrativeqa
from tests import FIXTURES_ROOT
import os
class TestSQUAD1:
def test_spaces_are_ignored(self):
assert _normalize_answer_squad("abcd") == _normalize_answer_squad("abcd ")
assert _normalize_answer_squad("abcd") == _normalize_answer_squad(" abcd ")
assert _normalize_answer_squad(" ABCD") == _normalize_answer_squad("ABCD")
def test_punctations_are_ignored(self):
assert _normalize_answer_squad("T.J Howard") == _normalize_answer_squad("tj howard")
assert _normalize_answer_squad("7802") == _normalize_answer_squad("78.02")
def test_articles_are_ignored(self):
assert get_metric_squad("td", ["the td"]) == (1.0, 1.0)
assert get_metric_squad("the a NOT an ARTICLE the an a", ["NOT ARTICLE"]) == (1.0, 1.0)
def test_casing_is_ignored(self):
assert get_metric_squad("This was a triumph", ["tHIS Was A TRIUMPH"]) == (1.0, 1.0)
class TestDROP:
def test_articles_are_ignored(self):
assert get_metric_drop("td", ["the td"]) == (1.0, 1.0)
assert get_metric_drop("the a NOT an ARTICLE the an a", ["NOT ARTICLE"]) == (1.0, 1.0)
def test_casing_is_ignored(self):
assert get_metric_drop("This was a triumph", ["tHIS Was A TRIUMPH"]) == (1.0, 1.0)
def test_long_answers(self):
assert (
get_metric_drop(
"David Thomas",
[
"Thomas David Arquette Thomas David Arquette Thomas \
David Arquette Thomas David Arquette"
],
)
== (0.0, 0.8)
)
def test_span_order_is_ignored(self):
assert get_metric_drop(["athlete", "unprofessional"], [["unprofessional", "athlete"]]) == (
1.0,
1.0,
)
assert get_metric_drop(
["algebra", "arithmetic"], [["arithmetic", "algebra", "geometry"]]
) == (0.0, 0.67)
def test_word_order_is_not_ignored(self):
assert get_metric_drop(["athlete unprofessional"], [["unprofessional athlete"]]) == (
0.0,
1.0,
)
def test_bag_alignment_is_optimal(self):
assert get_metric_drop(
["Thomas Jefferson", "Thomas Davidson Arquette"], [["David Thomas", "Thomas Jefferson"]]
) == (0.0, 0.7)
assert get_metric_drop(
["Thomas David Arquette"], [["David Thomas", "Thomas Jefferson"]]
) == (0.0, 0.4)
def test_multiple_gold_spans(self):
assert get_metric_drop(
["Thomas David Arquette"],
[["David Thomas"], ["Thomas Jefferson"], ["David Thomas"], ["Thomas David"]],
) == (0.0, 0.8)
def test_long_gold_spans(self):
assert get_metric_drop(
["Thomas David Arquette"], [["David Thomas was eating an apple and fell to the ground"]]
) == (0.0, 0.33)
class TestNarrativeQA:
def test_ngrams(self):
assert get_metric_narrativeqa(
"David Thomas was eating an apple",
["David Thomas was eating an apple and fell to the ground"],
) == (0.43, 0.43, 0.57, 0.75, 1.0, 0.6)
assert get_metric_narrativeqa(
"David Thomas was eating an apple and fell to the ground",
["David Thomas was eating an apple", "he fell to the ground"],
) == (0.55, 0.38, 0.92, 0.75, 0.6, 1.0)
assert get_metric_narrativeqa(
"David Thomas was eating an apple and fell to the ground",
["David Thomas was eating an apple and fell to the ground"],
) == (1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
class TestQuoREF:
def test_articles_are_ignored(self):
assert get_metric_drop("td", ["the td"]) == (1.0, 1.0)
assert get_metric_drop("the a NOT an ARTICLE the an a", ["NOT ARTICLE"]) == (1.0, 1.0)
def test_casing_is_ignored(self):
assert get_metric_drop("This was a triumph", ["tHIS Was A TRIUMPH"]) == (1.0, 1.0)
def test_long_answers(self):
assert (
get_metric_drop(
"David Thomas",
[
"Thomas David Arquette Thomas David Arquette Thomas \
David Arquette Thomas David Arquette"
],
)
== (0.0, 0.8)
)
def test_span_order_is_ignored(self):
assert get_metric_drop(["athlete", "unprofessional"], [["unprofessional", "athlete"]]) == (
1.0,
1.0,
)
assert get_metric_drop(
["algebra", "arithmetic"], [["arithmetic", "algebra", "geometry"]]
) == (0.0, 0.67)
def test_word_order_is_not_ignored(self):
assert get_metric_drop(["athlete unprofessional"], [["unprofessional athlete"]]) == (
0.0,
1.0,
)
def test_bag_alignment_is_optimal(self):
assert get_metric_drop(
["Thomas Jefferson", "Thomas Davidson Arquette"], [["David Thomas", "Thomas Jefferson"]]
) == (0.0, 0.7)
assert get_metric_drop(
["Thomas David Arquette"], [["David Thomas", "Thomas Jefferson"]]
) == (0.0, 0.4)
def test_multiple_gold_spans(self):
assert get_metric_drop(
["Thomas David Arquette"],
[["David Thomas"], ["Thomas Jefferson"], ["David Thomas"], ["Thomas David"]],
) == (0.0, 0.8)
def test_long_gold_spans(self):
assert get_metric_drop(
["Thomas David Arquette"], [["David Thomas was eating an apple and fell to the ground"]]
) == (0.0, 0.33)
class TestSQUAD2:
def test_impossible_answer(self):
assert get_metric_squad2("", ["news"]) == (0.0, 0.0)
assert get_metric_squad2("news", [""]) == (0.0, 0.0)
assert get_metric_squad2("", [""]) == (1.0, 1.0)
def test_functional_case(self):
assert get_metric_squad2("This was a triumph", ["a triumph"]) == (0.0, 0.5)
class TestIntegration:
def test_sample_results(self):
gold_file = FIXTURES_ROOT / "data" / "orb" / "sample_input.jsonl"
predictions_file = FIXTURES_ROOT / "data" / "orb" / "sample_predictions.json"
result = os.system(
f"python -m allennlp_rc.eval.orb_eval --dataset_file {gold_file} "
f"--prediction_file {predictions_file} --metrics_output_file /tmp/output.json"
)
assert result == 0
| allennlp-reading-comprehension-master | tests/eval/orb_eval_test.py |
allennlp-reading-comprehension-master | tests/eval/__init__.py |
|
import os
from allennlp.common.testing import AllenNlpTestCase
from allennlp_rc.eval import quoref_eval
from tests import FIXTURES_ROOT
class TestQuorefEval(AllenNlpTestCase):
"""
The actual evaluation logic in Quoref's evaluation script is from DROP's script, and the
only additional thing that Quoref's script does is handling the data properly. So this class only tests the
data handling aspects. The tests we have for DROP are fairly comprehensive.
"""
def test_quoref_eval_with_original_data_format(self):
predictions_file = FIXTURES_ROOT / "data" / "quoref" / "quoref_sample_predictions.json"
gold_file = FIXTURES_ROOT / "data" / "quoref" / "quoref_sample.json"
metrics = quoref_eval.evaluate_prediction_file(predictions_file, gold_file)
assert metrics == (0.5, 0.625)
def test_quoref_eval_with_simple_format(self):
predictions_file = FIXTURES_ROOT / "data" / "quoref" / "quoref_sample_predictions.json"
gold_file = FIXTURES_ROOT / "data" / "quoref" / "quoref_sample_predictions.json"
metrics = quoref_eval.evaluate_prediction_file(predictions_file, gold_file)
assert metrics == (1.0, 1.0)
def test_quoref_eval_script(self):
predictions_file = FIXTURES_ROOT / "data" / "quoref" / "quoref_sample_predictions.json"
gold_file = FIXTURES_ROOT / "data" / "quoref" / "quoref_sample.json"
result = os.system(
f"python -m allennlp_rc.eval.quoref_eval --gold_path {gold_file} "
f"--prediction_path {predictions_file} --output_path /tmp/output.json"
)
assert result == 0
| allennlp-reading-comprehension-master | tests/eval/quoref_eval_test.py |
import io
from contextlib import redirect_stdout
from allennlp_rc.eval.drop_eval import _normalize_answer, get_metrics, evaluate_json
class TestDropEvalNormalize:
def test_number_parse(self):
assert _normalize_answer("12.0") == _normalize_answer("12.0 ")
assert _normalize_answer("12.0") == _normalize_answer("12.000")
assert _normalize_answer("12.0") == _normalize_answer("12")
assert _normalize_answer("12.0") == _normalize_answer(" 1.2e1 ")
def test_punctations(self):
assert _normalize_answer("12.0 persons") == "12.0 persons"
assert _normalize_answer("S.K. Singh") == "sk singh"
class TestDropEvalGetMetrics:
def test_float_numbers(self):
assert get_metrics(["78"], ["78.0"]) == (1.0, 1.0)
def test_metric_is_length_aware(self):
# Overall F1 should be mean([1.0, 0.0])
assert get_metrics(predicted=["td"], gold=["td", "td"]) == (0.0, 0.5)
assert get_metrics("td", ["td", "td"]) == (0.0, 0.5)
# Overall F1 should be mean ([1.0, 0.0]) = 0.5
assert get_metrics(predicted=["td", "td"], gold=["td"]) == (0.0, 0.5)
assert get_metrics(predicted=["td", "td"], gold="td") == (0.0, 0.5)
# F1 score is mean([0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert get_metrics(
predicted=["the", "fat", "cat", "the fat", "fat cat", "the fat cat"], gold=["cat"]
) == (0.0, 0.17)
assert get_metrics(
predicted=["cat"], gold=["the", "fat", "cat", "the fat", "fat cat", "the fat cat"]
) == (0.0, 0.17)
# F1 score is mean([1.0, 0.5, 0.0, 0.0, 0.0, 0.0])
assert get_metrics(
predicted=["the", "fat", "cat", "the fat", "fat cat", "the fat cat"],
gold=["cat", "cat dog"],
) == (0.0, 0.25)
def test_articles_are_ignored(self):
assert get_metrics(["td"], ["the td"]) == (1.0, 1.0)
assert get_metrics(["the a NOT an ARTICLE the an a"], ["NOT ARTICLE"]) == (1.0, 1.0)
def test_f1_ignores_word_order(self):
assert get_metrics(["John Elton"], ["Elton John"]) == (0.0, 1.0)
assert get_metrics(["50 yard"], ["yard 50"]) == (0.0, 1.0)
assert get_metrics(["order word right"], ["right word order"]) == (0.0, 1.0)
def test_periods_commas_and_spaces_are_ignored(self):
assert get_metrics(["Per.i.o.d...."], [".P....e.r,,i;;;o...d,,"]) == (1.0, 1.0)
assert get_metrics(["Spa c e s "], [" Spa c e s"]) == (1.0, 1.0)
def test_splitting_on_hyphens(self):
assert get_metrics(["78-yard"], ["78 yard"]) == (1.0, 1.0)
assert get_metrics(["78 yard"], ["78-yard"]) == (1.0, 1.0)
assert get_metrics(["78"], ["78-yard"]) == (0.0, 0.67)
assert get_metrics(["78-yard"], ["78"]) == (0.0, 0.67)
def test_casing_is_ignored(self):
assert get_metrics(["This was a triumph"], ["tHIS Was A TRIUMPH"]) == (1.0, 1.0)
def test_overlap_in_correct_cases(self):
assert get_metrics(["Green bay packers"], ["Green bay packers"]) == (1.0, 1.0)
assert get_metrics(["Green bay", "packers"], ["Green bay", "packers"]) == (1.0, 1.0)
assert get_metrics(["Green", "bay", "packers"], ["Green", "bay", "packers"]) == (1.0, 1.0)
def test_simple_overlap_in_incorrect_cases(self):
assert get_metrics([""], ["army"]) == (0.0, 0.0)
assert get_metrics(["packers"], ["Green bay packers"]) == (0.0, 0.5)
assert get_metrics(["packers"], ["Green bay"]) == (0.0, 0.0)
# if the numbers in the span don't match f1 is 0
assert get_metrics(["yard"], ["36 yard td"]) == (0.0, 0.0)
assert get_metrics(["23 yards"], ["43 yards"]) == (0.0, 0.0)
# however, if number matches its not given extra weight over the non-functional words
assert get_metrics(["56 yards"], ["56 yd"]) == (0.0, 0.5)
assert get_metrics(["26"], ["26 yard td"]) == (0.0, 0.5)
def test_multi_span_overlap_in_incorrect_cases(self):
# only consider bags with matching numbers if they are present
# F1 scores of: 1.0 2/3 0.0 0.0 0.0 0.0
# Average them to get F1 of 0.28
assert get_metrics(
["78-yard", "56", "28", "40", "44", "touchdown"],
["78-yard", "56 yard", "1 yard touchdown"],
) == (0.0, 0.28)
# two copies of same value will account for only one match (using optimal 1-1 bag alignment)
assert get_metrics(["23", "23 yard"], ["23-yard", "56 yards"]) == (0.0, 0.5)
# matching done at individual span level and not pooled into one global bag
assert get_metrics(["John Karman", "Joe Hardy"], ["Joe Karman", "John Hardy"]) == (0.0, 0.5)
# macro-averaging F1 over spans
assert get_metrics(
["ottoman", "Kantakouzenous"], ["ottoman", "army of Kantakouzenous"]
) == (0.0, 0.75)
def test_order_invariance(self):
assert get_metrics(["a"], ["a", "b"]) == (0, 0.5)
assert get_metrics(["b"], ["a", "b"]) == (0, 0.5)
assert get_metrics(["b"], ["b", "a"]) == (0, 0.5)
class TestDropEvalFunctional:
def test_json_loader(self):
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"number": "1"},
"validated_answers": [{"number": "0"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "1"}
assert evaluate_json(annotation, prediction) == (1.0, 1.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["2"]},
"validated_answers": [{"number": "2"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "2"}
assert evaluate_json(annotation, prediction) == (1.0, 1.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["0"]},
"validated_answers": [{"number": "1"}, {"number": "2"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "1"}
assert evaluate_json(annotation, prediction) == (1.0, 1.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"date": {"day": "17", "month": "August", "year": ""}},
"validated_answers": [{"spans": ["August"]}, {"number": "17"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "17 August"}
assert evaluate_json(annotation, prediction) == (1.0, 1.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["span1", "span2"]},
"validated_answers": [{"spans": ["span2"]}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "span1"}
assert evaluate_json(annotation, prediction) == (0.0, 0.5)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["1"]},
"validated_answers": [{"number": "0"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid0": "2"}
assert evaluate_json(annotation, prediction) == (0.0, 0.0)
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["answer1"]},
"validated_answers": [{"spans": ["answer2"]}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "answer"}
assert evaluate_json(annotation, prediction) == (0.0, 0.0)
annotation = {
"pid1": {
"qa_pairs": [
{"answer": {"spans": ["answer1"]}, "query_id": "qid1"},
{"answer": {"spans": ["answer2"]}, "query_id": "qid2"},
]
}
}
prediction = {"qid1": "answer", "qid2": "answer2"}
assert evaluate_json(annotation, prediction) == (0.5, 0.5)
def test_type_partition_output(self):
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"number": "5"},
"validated_answers": [{"spans": ["7-meters"]}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "5-yard"}
with io.StringIO() as buf, redirect_stdout(buf):
evaluate_json(annotation, prediction)
output = buf.getvalue()
lines = output.strip().split("\n")
assert lines[4] == "number: 1 (100.00%)"
annotation = {
"pid1": {
"qa_pairs": [
{
"answer": {"spans": ["7-meters"]},
"validated_answers": [{"number": "5"}],
"query_id": "qid1",
}
]
}
}
prediction = {"qid1": "5-yard"}
with io.StringIO() as buf, redirect_stdout(buf):
evaluate_json(annotation, prediction)
output = buf.getvalue()
lines = output.strip().split("\n")
assert lines[4] == "number: 1 (100.00%)"
| allennlp-reading-comprehension-master | tests/eval/drop_eval_test.py |
#!/usr/bin/env python
import glob
import logging
import os
import re
import shutil
from allennlp.commands.train import train_model_from_file
logger = logging.getLogger(__name__)
def train_fixture(config_prefix: str) -> None:
import allennlp_rc # noqa F401: Needed to register the registrables.
config_file = config_prefix + "experiment.json"
if not os.path.exists(config_file):
config_file = config_prefix + "experiment.jsonnet"
serialization_dir = config_prefix + "serialization"
# Train model doesn't like it if we have incomplete serialization
# directories, so remove them if they exist.
if os.path.exists(serialization_dir):
shutil.rmtree(serialization_dir)
# train the model
train_model_from_file(config_file, serialization_dir)
# remove unnecessary files
shutil.rmtree(os.path.join(serialization_dir, "log"))
for filename in glob.glob(os.path.join(serialization_dir, "*")):
if (
filename.endswith(".log")
or filename.endswith(".json")
or re.search(r"epoch_[0-9]+\.th$", filename)
):
os.remove(filename)
if __name__ == "__main__":
models = [
"bidaf",
"dialog_qa",
"naqanet",
"qanet",
]
for model in models:
train_fixture(f"test_fixtures/{model}/")
| allennlp-reading-comprehension-master | scripts/train_fixtures.py |
import json
import logging
import time
from typing import Iterable, List
from allennlp.common.checks import check_for_gpu
from allennlp.data import Instance
from allennlp.predictors import Predictor
from allennlp_rc.eval import SquadEmAndF1
from tqdm import tqdm
logger = logging.getLogger(__name__)
if __name__ == "__main__":
import allennlp_rc # noqa F401: Needed to register the registrables.
import argparse
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Evaluation for SQuAD 1.1")
parser.add_argument("--cuda-device", type=int, default=-1)
parser.add_argument("--qa-model", type=str)
parser.add_argument(
"--input-file",
type=str,
default="https://allennlp.s3.amazonaws.com/datasets/squad/squad-dev-v1.1.json",
)
args = parser.parse_args()
# Read inputs
check_for_gpu(args.cuda_device)
predictor = Predictor.from_path(
args.qa_model, predictor_name="transformer_qa", cuda_device=args.cuda_device
)
instances = predictor._dataset_reader.read(args.input_file)
logger.info("Running on %d instances", len(instances))
# We have to make sure we put instances with the same qid all into the same batch.
def batch_instances_by_qid(instances: Iterable[Instance]) -> Iterable[List[Instance]]:
current_qid = None
current_batch = []
for instance in instances:
instance_qid = instance["metadata"]["id"]
if current_qid is None:
current_qid = instance_qid
if instance_qid == current_qid:
current_batch.append(instance)
else:
yield current_batch
current_batch = [instance]
current_qid = instance_qid
if len(current_batch) > 0:
yield current_batch
def make_batches(
instances: Iterable[Instance], batch_size: int = 64
) -> Iterable[List[Instance]]:
current_batch = []
for qid_instances in batch_instances_by_qid(instances):
if len(qid_instances) + len(current_batch) < batch_size:
current_batch.extend(qid_instances)
else:
if len(current_batch) > 0:
yield current_batch
current_batch = qid_instances
if len(current_batch) > 0:
yield current_batch
# Run model and evaluate results
last_logged_scores_time = time.monotonic()
ids_seen = set()
metric = SquadEmAndF1()
answers = {}
for batch in make_batches(tqdm(instances, desc="Evaluating instances")):
for result in predictor.predict_batch_instance(batch):
assert result["id"] not in ids_seen
ids_seen.add(result["id"])
metric(result["best_span_str"], result["answers"])
answers[result["id"]] = result["best_span_str"]
if time.monotonic() - last_logged_scores_time > 30:
exact_match, f1_score = metric.get_metric()
logger.info(json.dumps({"em": exact_match, "f1": f1_score}))
last_logged_scores_time = time.monotonic()
# Print results
exact_match, f1_score = metric.get_metric()
print(json.dumps(answers))
print(json.dumps({"em": exact_match, "f1": f1_score}))
| allennlp-reading-comprehension-master | scripts/transformer_qa_eval.py |
AGGRESSIVE_THRESHOLD = 4 #cluegiver becomes more aggressive with the ordering of the Best Clues list based on the score of the game. "Aggression" = more? fewer? words in Target_Words Group
NUM_CLUES = 10 #number of clues to return in get_next_clue
| codenames-master | config.py |
"""setup.py file for packaging ``codenames``"""
from setuptools import setup, find_packages
with open('readme.md', 'r') as readme_file:
readme = readme_file.read()
setup(
name='codenames',
version='0.0.1',
description="Codenames hackathon 2018 project!",
long_description=readme,
url='http://github.com/allenai/codenames',
author='Allen Institute for Artificial Intelligence',
author_email='[email protected]',
license='Apache',
packages=find_packages(),
python_requires='>=3.6',
zip_safe=False
)
| codenames-master | setup.py |
codenames-master | tests/__init__.py |
|
from gensim.models import Word2Vec
from codenames.utils import file_utils
from codenames.utils.file_utils import read_lines
from random import choices
import random
words = [w.replace(" ", "_") for w in read_lines("codenames/gameplay/words.txt")]
all_sentences = []
for i in range(10000):
num_words = random.randint(4, 10)
all_sentences.append(
choices(words, k=num_words)
)
model = Word2Vec(
sentences=all_sentences,
size=300,
window=10,
workers=1,
negative=10,
min_count=50,
sg=1,
iter=10
)
model.wv.save_word2vec_format("tests/fixtures/model.txt")
model.save("tests/fixtures/model.dat") | codenames-master | tests/models/gensim_w2v.py |
codenames-master | tests/models/__init__.py |
|
from unittest import TestCase
from codenames.guessers.heuristic_guesser import HeuristicGuesser
from codenames.embedding_handler import EmbeddingHandler
class TestHeuristicGuesser(TestCase):
def test_guess(self):
embedding_handler = EmbeddingHandler("tests/fixtures/sample_embedding.txt")
sample_board = ["boy", "girl", "woman", "man"]
guesser = HeuristicGuesser(sample_board, embedding_handler)
sample_state = [0, -1, -1, -1]
guesses = guesser.guess("boy", 1, sample_state, 0)
assert guesses == ['man', 'woman'] # count + 1
| codenames-master | tests/guessers/heuristic_guesser_test.py |
codenames-master | tests/guessers/__init__.py |
|
from unittest import TestCase
from codenames.guessers.learned_guesser import LearnedGuesser
from codenames.embedding_handler import EmbeddingHandler
from codenames.guessers.policy.similarity_threshold import SimilarityThresholdPolicy
class TestLearnedGuesser(TestCase):
def test_guess(self):
embedding_handler = EmbeddingHandler("tests/fixtures/sample_embedding.txt")
sample_board = ["boy", "girl", "woman", "man"]
embed_size = list(embedding_handler.embedding.values())[0].shape[0]
policy = SimilarityThresholdPolicy(embed_size)
guesser = LearnedGuesser(sample_board, embedding_handler, policy, 0.1)
sample_state = [0, -1, -1, -1]
guesses = guesser.guess("boy", 1, sample_state, 0)
guesser.report_reward([10])
assert guesses == ['girl']
| codenames-master | tests/guessers/learned_guesser_test.py |
from unittest import TestCase
from codenames.guessers.learned_guesser import LearnedGuesser
from codenames.embedding_handler import EmbeddingHandler
from codenames.guessers.policy.similarity_threshold_game_state import SimilarityThresholdGameStatePolicy
class TestLearnedGuesser(TestCase):
def test_guess(self):
embedding_handler = EmbeddingHandler("tests/fixtures/sample_embedding.txt")
sample_board = ["boy", "girl", "woman", "man"]
embed_size = list(embedding_handler.embedding.values())[0].shape[0]
policy = SimilarityThresholdGameStatePolicy(embed_size)
guesser = LearnedGuesser(sample_board, embedding_handler, policy, 0.1)
sample_state = [0, -1, -1, -1]
guesses = guesser.guess("boy", 1, sample_state, 0)
orig_guesses = guesses.copy()
guesser.report_reward([10])
assert guesses == orig_guesses
| codenames-master | tests/guessers/learned_guesser_game_state_test.py |
codenames-master | tests/clue_givers/__init__.py |
|
from unittest import TestCase
from codenames.clue_givers.wordnet_cluegiver import WordnetClueGiver
from codenames.utils.game_utils import Clue
class TestWordnetClueGiver(TestCase):
def test_clues(self):
test_board = ["woman", "man", "girl", "boy", "blue", "cat", "queen", "king"]
test_allIDs = [1, 2, 1, 2, -1, -1, 1, 2]
cg = WordnetClueGiver()
clues = cg.get_next_clue(test_board, test_allIDs, [1, 2, 1, 2, -1, -1, 1, 2], 3)
assert clues[0] == Clue(clue_word='female', intended_board_words=('girl', 'woman'), count=2)
| codenames-master | tests/clue_givers/wordnet_clue_giver_test.py |
import argparse
import os
from codenames.utils.file_utils import read_lines, read_lines_tokens
import spacy
from gensim.models import Word2Vec
import logging
def main(args):
corpus_location = args.corpus_location
save_dir = args.save_dir
workers = args.workers
output_weights_file = os.path.join(save_dir, "weights.txt")
model_file = os.path.join(save_dir, "model.pkl")
tokenized_lowercased_filename = os.path.join(os.path.dirname(corpus_location),
os.path.basename(corpus_location) + "-tokenized_lc.txt")
all_sentences = []
if not os.path.exists(tokenized_lowercased_filename):
logging.info("Writing to file: " + tokenized_lowercased_filename)
nlp = spacy.load("en")
with open(tokenized_lowercased_filename, "w") as tokenized_lc_file:
for line in read_lines(corpus_location):
sentence = line.split("\t")[1]
tokens = [w.text.lower() for w in nlp(sentence)]
sentence = ' '.join(tokens)
tokenized_lc_file.write(sentence)
tokenized_lc_file.write("\n")
all_sentences.append(sentence)
tokenized_lc_file.close()
else:
all_sentences = read_lines_tokens(tokenized_lowercased_filename)
logging.info("Found {} sentences".format(len(all_sentences)))
model = Word2Vec(
sentences=all_sentences,
size=300,
window=5,
workers=workers,
negative=5,
min_count=50,
sg=1,
iter=10
)
model.wv.save_word2vec_format(output_weights_file)
model.save(model_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Process results from the final AMTI task')
parser.add_argument('--corpus_location',
type=str,
help='location of corpus')
parser.add_argument('--save_dir',
type=str,
help='location of training data')
parser.add_argument('--workers',
type=int,
help='number of workers')
args = parser.parse_args()
main(args) | codenames-master | codenames/train_w2v.py |
import logging
from typing import List
from scipy.spatial.distance import cosine
import numpy as np
class EmbeddingHandler:
"""
Parameters
----------
embedding_file : `str`
Location of a text file containing embeddings in word2vec format.
"""
def __init__(self, embedding_file: str) -> None:
# str -> int
self.word_indices = {}
# int -> str
self.index_to_word = {}
# list of np arrays
embedding_weights = []
with open(embedding_file) as input_file:
idx = 0
expected_embedding_size = None
for line in input_file:
fields = line.strip().split()
if len(fields) == 2:
# This must be the first line with metadata.
expected_embedding_size = int(fields[1])
continue
if expected_embedding_size is not None and \
len(fields[1:]) != expected_embedding_size:
continue
word = fields[0].lower()
vector = np.asarray([float(x) for x in fields[1:]])
embedding_weights.append(vector)
self.word_indices[word] = idx
self.index_to_word[idx] = word
idx += 1
self.embedding_weights = np.asarray(embedding_weights)
logging.info("Found embeddings for {} words".format(len(self.embedding_weights)))
assert len(self.embedding_weights) == len(self.word_indices)
def get_word_vector(self, word: str) -> np.ndarray:
word = word.lower()
if word not in self.word_indices:
return None
return self.embedding_weights[self.word_indices[word]]
def sort_options_by_similarity(self,
clue: str,
options: List[str],
max_num_outputs: int = -1) -> List[str]:
"""
Takes a clue and returns `max_num_outputs` number of `options` sorted by similarity with
`clue`. If `max_num_outputs` is -1, returns all the options sorted by similarity.
"""
word = clue.lower()
if word not in self.word_indices:
return []
word_vector = self.get_word_vector(word)
option_vectors = []
for option in options:
if option in self.word_indices:
option_vectors.append(self.get_word_vector(option))
distances = [cosine(word_vector, option_vector) for option_vector in option_vectors]
sorted_options = [x[1] for x in sorted(zip(distances, options))]
if max_num_outputs == -1:
return sorted_options
return sorted_options[:max_num_outputs]
def get_embedding_by_index(self, idx):
return self.embedding_weights[idx]
def embed_words_list(self, words_list):
word_vectors = []
for pos_word in words_list:
v = self.get_word_vector(pos_word)
if v is not None:
word_vectors.append(v)
if len(word_vectors) == 0:
return None
return np.asarray(word_vectors)
| codenames-master | codenames/embedding_handler.py |
import logging
logging.getLogger().setLevel(logging.INFO)
| codenames-master | codenames/__init__.py |
import random
import numpy as np
from scipy.spatial.distance import cosine
from embedding_handler import EmbeddingHandler
import pickle
from tqdm import tqdm
dataset_codes = {"CODENAMES": "./data/codenames_words.txt", "SCIENCE": "./data/science_words.txt",
"COMMON": "./data/common_nouns_extrinsic.txt", "PROPER": "./data/proper_nouns_extrinsic.txt"}
class Dataset():
'''
Initializes dataset from text file. Assumes dataset is a code found in dataset_codes
and that the text file has one word per line
'''
def __init__(self, dataset="CODENAMES"):
if dataset not in dataset_codes:
raise ValueError('Expected dataset to be one of ' + str(dataset_codes.keys()))
self.dataset = dataset
self.dataset_file = dataset_codes[dataset]
self.data = []
with open(self.dataset_file) as f:
for line in f.readlines():
self.data.append(line.lower().replace("\n", ""))
# These params are used for the shuffle without replacement case
self.dataset_loc = 0
self.shuffled_data = self.data.copy()
random.shuffle(self.shuffled_data)
self.dataset_size = len(self.data)
'''
Most basic random sampling from a given dataset. Assumes random samples WITH REPLACEMENT
in between consecutive games. Optional to pass in number of assassins, positive, and negative words.
By default, the number of neutral cards is determined by size-num_assassin-num_pos-num_neg.
If you want vocabulary to be restricted to words found in an EmbeddingHandler's embedding,
guesser/clue_givers parameters are used to create blacklists of words to exclude from the sampling
process.
Note that the return is of the format:
ASSASSIN;TEAM1;TEAM2;NEUTRAL
where each group consists of comma-separated words from the word list.
'''
def sample_random(self, size=25, num_assassin=1, num_pos=9, num_neg=8,
guesser: EmbeddingHandler = None,
clue_giver: EmbeddingHandler = None):
#This is the dataset restricted to valid words as per embeddings
restricted_dataset = self.data.copy()
if guesser != None:
restricted_dataset = [w for w in restricted_dataset if w in guesser.word_indices]
if clue_giver != None:
restricted_dataset = [w for w in restricted_dataset if w in clue_giver.word_indices]
board_words_str = ""
words = random.sample(restricted_dataset, size)
#Segments sampled words into gameboard's accepted format
board_words_str += ';'.join([','.join(words[0:num_assassin]), ','.join(words[num_assassin:num_assassin + num_pos]),
','.join(words[num_assassin + num_pos:num_assassin + num_pos + num_neg]),
','.join(words[num_assassin + num_pos + num_neg:])])
return board_words_str
'''
Random sampling for a dataset that assumes random samples WITHOUT REPLACEMENT in between consecutive games.
Optional to pass in number of assassins, positive, and negative words.
By default, the number of neutral cards is determined by size-num_assassin-num_pos-num_neg.
If you want vocabulary to be restricted to words found in an EmbeddingHandler's embedding,
guesser/clue_givers parameters are used to create blacklists of words to exclude from the sampling
process.
Note that the return is of the format:
ASSASSIN;TEAM1;TEAM2;NEUTRAL
where each group consists of comma-separated words from the word list.
'''
def sample_random_without_replacement(self, size=25, num_assassin=1, num_pos=9, num_neg=8,
guesser: EmbeddingHandler = None,
clue_giver: EmbeddingHandler = None):
blacklisted_dataset = []
if guesser != None:
blacklisted_dataset.extend([w for w in self.data if w not in guesser.word_indices])
if clue_giver != None:
blacklisted_dataset.extend([w for w in self.data if w not in clue_giver.word_indices])
board_words_str = ""
#Edge case for if we're at the end of our shuffled list
if self.dataset_loc + size > self.dataset_size:
words = []
words.extend([w for w in self.shuffled_data[self.dataset_loc:] if w not in blacklisted_dataset])
random.shuffle(self.shuffled_data)
self.dataset_loc = 0
while len(words) < size:
word = self.shuffled_data[self.dataset_loc]
if word not in blacklisted_dataset:
words.append(word)
self.dataset_loc += 1
#Normal case, just adds the next words from the shuffled dataset
else:
words = []
while len(words) < size:
word = self.shuffled_data[self.dataset_loc]
if word not in blacklisted_dataset:
words.append(word)
self.dataset_loc += 1
# Segments sampled words into gameboard's accepted format
board_words_str += ';'.join(
[','.join(words[0:num_assassin]), ','.join(words[num_assassin:num_assassin + num_pos]),
','.join(words[num_assassin + num_pos:num_assassin + num_pos + num_neg]),
','.join(words[num_assassin + num_pos + num_neg:])])
return board_words_str
'''
This samples a "challenge" dataset by returning a dataset containing clusters of similar words
(defined by GloVE cos sim).
similarity: EmbeddingHandler is the embedding space we will use to calculate similarity.
If you want vocabulary to be restricted to words found in an EmbeddingHandler's embedding,
guesser/clue_givers parameters are used to create blacklists of words to exclude from the sampling
process.
In the default case: It selects a random word in the dataset as a "seed" word and
adds similar words (with cos distance less than Epsilon). If not enough words are chosen, another
seed word is added and the process continues until "size" words have been chosen.
If num_clusters is specified, there will be num_clusters seed words selected and words will be
randomly added from each cluster until size words are found.
Note that the return is of the format:
ASSASSIN;TEAM1;TEAM2;NEUTRAL
where each group consists of comma-separated words from the word list.
'''
#TODO discuss epsilon value
def sample_similar_embeddings(self, similarity: EmbeddingHandler, size=25, num_assassin=1, num_pos=9, num_neg=8, num_clusters=-1,
guesser: EmbeddingHandler = None,
clue_giver: EmbeddingHandler = None,
epsilon = 0.3):
#Return string
board_words_str = ""
blacklisted_dataset = []
if guesser != None:
blacklisted_dataset.extend([w for w in self.data if w not in guesser.word_indices])
if clue_giver != None:
blacklisted_dataset.extend([w for w in self.data if w not in clue_giver.word_indices])
#Since we are using `similarity`'s metric, each potential word has to be in similarity
blacklisted_dataset.extend([w for w in self.data if w not in similarity.word_indices])
if num_clusters != -1:
words = []
tries = 1000
counter = 0
while len(words) < size and counter < tries:
words = []
seed_words = []
while(len(seed_words) < num_clusters):
seed_word = random.sample(self.data, 1)[0]
if seed_word not in seed_words and seed_word not in blacklisted_dataset:
seed_words.append(seed_word)
words.extend([w for w in seed_words])
similar_words = []
for word in self.data:
if word in seed_words or word in blacklisted_dataset or word in similar_words:
continue
for seed_word in seed_words:
seed_embed = similarity.get_word_vector(seed_word)
if cosine(seed_embed, similarity.get_word_vector(word)) < epsilon:
similar_words.append(word)
if len(similar_words) >= size-num_clusters:
words.extend(random.sample(similar_words, size-num_clusters))
counter += 1
if len(words) < size:
raise ValueError("Cannot find enough words with the cluster/size combo. Try increasing num_clusters or"
"increasing epsilon")
random.shuffle(words)
else:
words = []
while len(words) < size:
seed_word = ""
while seed_word in words or seed_word == "" or seed_word in blacklisted_dataset:
seed_word = random.sample(self.data, 1)[0]
words.append(seed_word)
seed_embed = similarity.get_word_vector(seed_word)
for word in self.data:
if word == seed_word or word in blacklisted_dataset:
continue
if cosine(seed_embed, similarity.get_word_vector(word)) < epsilon:
words.append(word)
random.shuffle(words)
# Segments sampled words into gameboard's accepted format
board_words_str += ';'.join(
[','.join(words[0:num_assassin]), ','.join(words[num_assassin:num_assassin + num_pos]),
','.join(words[num_assassin + num_pos:num_assassin + num_pos + num_neg]),
','.join(words[num_assassin + num_pos + num_neg:])])
return board_words_str
def main():
guesser_embed = EmbeddingHandler("./data/uk_embeddings.txt")
section_sizes = [1000, 100, 100]
section_names = ['train', 'dev', 'test']
for dataset_name in dataset_codes.keys():
d = Dataset(dataset=dataset_name)
for section_size, section_name in zip(section_sizes, section_names):
section_filename = '{}_{}.games'.format(dataset_name.lower(), section_name)
samples_counter = 0
with open(section_filename, mode='wt') as section_file:
for i in tqdm(range(section_size)):
#sample = d.sample_similar_embeddings(similarity=guesser_embed,
# guesser=guesser_embed,
# clue_giver=guesser_embed,
# num_clusters=7)
sample = d.sample_random()
section_file.write('{}\n'.format(sample))
samples_counter += 1
print('finished writing {} samples of game data to {}'.format(section_size, section_filename))
if __name__ == "__main__":
main()
| codenames-master | codenames/dataset.py |
from collections import namedtuple
from typing import List
from collections import namedtuple
UNREVEALED = -1
GOOD = 1
BAD = 2
CIVILIAN = 3
ASSASSIN = 0
Clue = namedtuple('Clue', ['clue_word', 'intended_board_words', 'count'])
DEFAULT_NUM_CLUES = 10
DEFAULT_NUM_TARGETS = 4
CIVILIAN_PENALTY = .0
ASSASSIN_PENALTY = .0
MULTIGROUP_PENALTY = .0
def get_available_choices(board: List[str],
game_state: List[int]) -> List[str]:
assert len(board) == len(game_state), "Invalid state!"
options = []
for string, state_id in zip(board, game_state):
if state_id == UNREVEALED:
options.append(string)
return options
| codenames-master | codenames/utils/game_utils.py |
codenames-master | codenames/utils/__init__.py |
|
from typing import List
def read_lines(input_file: str) -> List[str]:
with open(input_file) as f:
lines = f.readlines()
return [l.strip() for l in lines]
def read_lines_tokens(input_file: str) -> List[str]:
with open(input_file) as f:
lines = f.readlines()
return [l.strip().split(' ') for l in lines]
| codenames-master | codenames/utils/file_utils.py |
from typing import List
from overrides import overrides
import torch
from torch.distributions import Categorical
from codenames.guessers.guesser import Guesser
from codenames.guessers.policy.guesser_policy import GuesserPolicy
from codenames.embedding_handler import EmbeddingHandler
from codenames.utils import game_utils as util
from codenames.guessers.policy.similarity_threshold_game_state import SimilarityThresholdGameStatePolicy
class LearnedGuesser(Guesser):
def __init__(self,
embedding_handler: EmbeddingHandler,
policy: GuesserPolicy,
learning_rate: float,
train: bool=False) -> None:
self.policy = policy
self.guess_history = None
self.guess_log_probs = None
self.embedding_handler = embedding_handler
self.optimizer = torch.optim.Adam(policy.parameters(), lr=learning_rate)
self.train = train
@overrides
def guess(self,
board: List[str],
clue: str,
count: int,
game_state: List[int],
current_score: int) -> List[str]:
# Get vectors for clues and options
clue_vector = self.embedding_handler.get_word_vector(clue)
if clue_vector is None:
return []
option_vectors = []
known_options = []
for option in util.get_available_choices(board, game_state):
option_vector = self.embedding_handler.get_word_vector(option)
if option_vector is not None:
option_vectors.append(option_vector)
known_options.append(option)
if not option_vectors:
return []
# Checks type of policy to see if we should parameterize
if type(self.policy) == SimilarityThresholdGameStatePolicy:
# Parameterizes game state for policy
parameterized_game_state = []
for i in range(4):
parameterized_game_state.append(game_state.count(i))
# Sample from policy
policy_output = self.policy(torch.Tensor(clue_vector),
torch.Tensor(option_vectors),
torch.Tensor(parameterized_game_state))
else:
# Sample from policy
policy_output = self.policy(torch.Tensor(clue_vector),
torch.Tensor(option_vectors))
distribution = Categorical(policy_output)
predictions = distribution.sample(torch.Size((count,)))
# Return guesses
guesses = [known_options[int(prediction)] for prediction in predictions]
log_probs = [distribution.log_prob(prediction) for prediction in predictions]
# Since we sampled multiple terms, there can be repetitions. We need to return unique ones.
# We also need to ensure the order of guesses is not changed. Note that the following logic
# may return less than `count` number of guesses.
unique_guesses = []
unique_guesses_log_probs = []
seen_guesses = set()
for guess, log_prob in zip(guesses, log_probs):
if not guess in seen_guesses:
unique_guesses.append(guess)
unique_guesses_log_probs.append(log_prob)
seen_guesses.add(guess)
self.guess_history = unique_guesses
self.guess_log_probs = unique_guesses_log_probs
return unique_guesses
'''
Save is a path of where to save if we want to save the model
'''
def report_reward(self,
rewards: List[int],
save: str=None) -> None:
if self.train:
if self.guess_log_probs is None:
raise RuntimeError("Haven't made any guesses yet!")
loss = torch.mul(torch.sum(torch.mul(torch.Tensor(rewards),
torch.stack(self.guess_log_probs))), -1)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if save:
torch.save(self.policy, save)
| codenames-master | codenames/guessers/learned_guesser.py |
from typing import List
from overrides import overrides
from codenames.embedding_handler import EmbeddingHandler
from codenames.guessers.guesser import Guesser
import codenames.utils.game_utils as util
class HeuristicGuesser(Guesser):
def __init__(self, embedding_handler: EmbeddingHandler):
self.embedding_handler = embedding_handler
@overrides
def guess(self,
board: List[str],
clue: str,
count: int,
game_state: List[int],
current_score: int) -> List[str]:
available_options = util.get_available_choices(board, game_state)
# Return 1 more than the count because the game rules allow it.
return self.embedding_handler.sort_options_by_similarity(clue,
available_options,
count + 1)
| codenames-master | codenames/guessers/heuristic_guesser.py |
codenames-master | codenames/guessers/__init__.py |
|
from typing import List
class Guesser:
"""
Parameters
----------
board : `List[str]`
List of all words on the board in the current game
embedding_file : `str`
Location of pickled embeddings
"""
def guess(self,
board: List[str],
clue: str,
count: int,
game_state: List[int],
current_score: int) -> List[str]:
"""
Parameters
----------
clue : `str`
count : `int`
Max size of returned list
game_state : `List[int]`
List of same size as self.board, with each element showing the ids revealed so far (eg.
same team, opposite team, assasin, civilian etc.)
score : `int`
Current score
"""
raise NotImplementedError
def report_reward(self, rewards: List[int]) -> None:
pass
| codenames-master | codenames/guessers/guesser.py |
import torch as torch
import torch.nn as nn
from codenames.guessers.policy.guesser_policy import GuesserPolicy
class SimilarityThresholdGameStatePolicy(GuesserPolicy, nn.Module):
'''
embed_size is the size of the word embeddings
'''
def __init__(self, embed_size, seed=42):
super(GuesserPolicy, self).__init__()
torch.manual_seed(seed)
self.embed_size = embed_size
#Similarity matrix
w = torch.empty(self.embed_size, self.embed_size)
nn.init.eye_(w)
self.W = nn.Parameter(w, requires_grad=True)
#Similarity threshold, decided by W_t, which is matmul with the game state
#size 4 x 1 because there are 4 game parameters and 1 threshold param to output.
self.Wt = nn.Parameter(torch.rand(4,1), requires_grad = True)
'''
clue_vector has shape 1 x word_embed_size (represents the embedding of the clue word)
options_matrix has shape num_words_left_to_guess x word_embed_size (where num_words_left_to_guess is the
remaining unguessed words on the game board)
'''
def forward(self,
clue_vector: torch.Tensor,
options_matrix: torch.Tensor,
parameterized_game_state: torch.Tensor) -> torch.Tensor:
m = nn.Sigmoid()
predicted_similarities = m(torch.matmul(torch.matmul(clue_vector, self.W),
torch.t(options_matrix)))
calculated_threshold = m(torch.matmul(parameterized_game_state, self.Wt))
clamped_similarities = torch.clamp(predicted_similarities - calculated_threshold, 0.001, 1.0)
return clamped_similarities
#Main method included only for testing purposes
def main():
clue_vect = torch.empty(1,50)
options_vect = torch.empty(10,50)
game_state = torch.empty(1, 4)
policy = SimilarityThresholdGameStatePolicy(50)
policy.forward(clue_vect, options_vect, game_state)
if __name__ == "__main__":
main()
| codenames-master | codenames/guessers/policy/similarity_threshold_game_state.py |
import torch
class GuesserPolicy:
def __init__(self):
raise NotImplementedError
'''
Runs model forward to create a new policy for a given state.
Inputs should be specified by child instantiations.`
'''
def forward(self) -> torch.Tensor:
raise NotImplementedError
| codenames-master | codenames/guessers/policy/guesser_policy.py |
codenames-master | codenames/guessers/policy/__init__.py |
|
import torch as torch
import torch.nn as nn
from codenames.guessers.policy.guesser_policy import GuesserPolicy
class SimilarityThresholdPolicy(GuesserPolicy, nn.Module):
'''
embed_size is the size of the word embeddings
'''
def __init__(self, embed_size, seed=42):
super(GuesserPolicy, self).__init__()
torch.manual_seed(seed)
self.embed_size = embed_size
#Similarity matrix
w = torch.empty(self.embed_size, self.embed_size)
nn.init.eye_(w)
self.W = nn.Parameter(w, requires_grad = True)
#Similarity threshold
self.threshold = nn.Parameter(torch.Tensor([0.01]), requires_grad = True)
'''
clue_vector has shape 1 x word_embed_size (represents the embedding of the clue word)
options_matrix has shape num_words_left_to_guess x word_embed_size (where num_words_left_to_guess is the
remaining unguessed words on the game board)
'''
def forward(self,
clue_vector: torch.Tensor,
options_matrix: torch.Tensor) -> torch.Tensor:
m = nn.Sigmoid()
predicted_similarities = m(torch.matmul(torch.matmul(clue_vector, self.W),
torch.t(options_matrix)))
clamped_similarities = torch.clamp(predicted_similarities - self.threshold, 0.001, 1.0)
return clamped_similarities
#Main method included only for testing purposes
def main():
clue_vect = torch.empty(1,50)
options_vect = torch.empty(10,50)
policy = SimilarityThresholdPolicy(50)
policy.forward(clue_vect, options_vect)
if __name__ == "__main__":
main()
| codenames-master | codenames/guessers/policy/similarity_threshold.py |
#!/usr/bin/env python
# coding=utf-8
import argparse
import io
import os
import os.path
from functools import partial
from gameplay.config import config
# Maximum number of wikipedia articles to index per word. Can be
# overridden using the --max-size command-line argument.
max_index_size = 10000
def ingest(page, page_titles, depth=0, max_depth=1):
title = page.title()
if title in page_titles:
return page_titles
# Must be in one of the following namespaces:
# Main, Category, Portal, Book.
# https://en.wikipedia.org/wiki/Wikipedia:Namespace
if page.namespace() not in (0, 14, 100, 108,):
return page_titles
page_titles.add(title)
# Have we reached our target number of pages?
if len(page_titles) >= max_index_size:
raise StopIteration
# Explore children of this page?
if depth >= max_depth:
return page_titles
# Visit pages linked from this page.
for sub_page in page.linkedPages(total=max_index_size // 3):
ingest(sub_page, page_titles, depth + 1, max_depth)
# Visit pages that refer to or embed this page.
for sub_page in page.getReferences(total=max_index_size // 3):
ingest(sub_page, page_titles, depth + 1, max_depth)
return page_titles
def main():
global max_index_size
parser = argparse.ArgumentParser(
description='Create an index for the training corpus.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--index-size', type=int, default=max_index_size,
help='Target number of pages per word.')
args = parser.parse_args()
max_index_size = args.index_size
# Read the word list into memory and format using wikimedia conventions.
# https://en.wikipedia.org/wiki/Wikipedia:Naming_conventions_(capitalization)
with open(config.word_list, 'r') as f:
words = [w.strip().capitalize() for w in f]
print('Read {0} words from {1}.'.format(len(words), config.word_list))
if not os.path.isdir(config.corpus_directory):
os.mkdir(config.corpus_directory)
# Use the english wikipedia with no user config and ignore warnings.
os.environ['PYWIKIBOT2_NO_USER_CONFIG'] = '2'
import pywikibot
site = pywikibot.Site('en', 'wikipedia')
for word in words:
out_name = os.path.join(config.corpus_directory, config.template['index'].format(word))
if os.path.isfile(out_name):
with io.open(out_name, 'r', encoding=config.encoding) as existing:
lines = sum(chunk.count('\n')
for chunk in iter(partial(existing.read, 2**16), ''))
print('File {0} already exists ({1} lines), skipping it.'
.format(out_name, lines))
else:
page_titles = set()
try:
# Try to ingest the page for this word directly.
page = pywikibot.Page(site, word)
page_titles = ingest(page, page_titles)
# Try to ingest a disambiguation page for this word.
if not page.isDisambig():
page = pywikibot.Page(site, word + ' (disambiguation)')
page_titles = ingest(page, page_titles)
# Try to ingest the results of a site-wide search for this word.
# Only include results in the Main namespace.
results = site.search(
searchstring=word, where='text', namespaces=[0])
for page in results:
page_titles = ingest(page, page_titles, depth=1)
except StopIteration:
# We normally get here once 10,000 pages have been ingested.
pass
# Save the set of all ingested page names for this word.
with io.open(out_name, 'w', encoding=config.encoding) as out:
for title in page_titles:
out.write(title + '\n')
print('Saved index of {0} pages to {1}.'
.format(len(page_titles), out_name))
if __name__ == '__main__':
main()
| codenames-master | codenames/gameplay/create_corpus_index.py |
#!/usr/bin/env python
# coding=utf-8
import json
import os
import os.path
import sys
CONFIG_FILE = os.path.dirname(__file__) + "/config.json"
class Config(object):
def __init__(self):
config_path = os.path.abspath(CONFIG_FILE)
if not os.path.isfile(config_path):
print('Error: can\'t find config file {0}.'.format(config_path))
sys.exit(1)
# noinspection PyBroadException
try:
with open(config_path, 'r') as f:
parameters = json.load(f)
self.__dict__ = parameters
except Exception:
print('Error: can\'t parse config file {0}.'.format(config_path))
sys.exit(1)
config = Config()
| codenames-master | codenames/gameplay/config.py |
#!/usr/bin/env python
import argparse
import gzip
import os.path
import re
import nltk.tokenize
from gameplay.config import config
def main():
parser = argparse.ArgumentParser(
description='Preprocess training corpus.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-o', '--output', type=str, default='freqs.dat',
help='Filename for saving word list frequencies.')
args = parser.parse_args()
heading = re.compile('=+ ([^=]+) =+\s*')
punctuation = (',', ';', ':', '.', '!', '?', '-', '%', '&', '$',
'(', ')', '[', ']', '{', '}', '``', "''")
# Read the word list and find any compound words since they must be
# treated as a single word during the learning step.
word_list = []
compound = {}
total_freq, cross_freq, corpus_stats = {}, {}, {}
with open(config.word_list, 'r') as f:
for word in f:
word_list.append(word.strip().capitalize())
word = word.strip().lower()
if ' ' in word:
compound[word] = word.replace(' ', '_')
freq_key = compound[word]
else:
freq_key = word
# Initialize frequency counters.
total_freq[freq_key] = cross_freq[freq_key] = 0
corpus_stats[freq_key] = (0, 0)
print('Wordlist contains {0} compound words:'.format(len(compound)))
print(list(compound.keys()))
for word in word_list:
freq_key = word.lower().replace(' ', '_')
in_name = os.path.join(config.corpus_directory, config.template['articles'].format(word))
if not os.path.exists(in_name):
print('Skipping missing file {0}'.format(in_name))
continue
out_name = os.path.join(config.corpus_directory, config.template['preprocess'].format(word))
num_sentences, num_words = 0, 0
with gzip.open(in_name, 'rb') as f_in:
# Read the whole file into memory.
content = f_in.read().decode(config.encoding)
# Remove headings.
content = re.sub(heading, '', content)
with gzip.open(out_name, 'wb') as f_out:
# Loop over sentences.
for sentence in nltk.tokenize.sent_tokenize(content):
words = []
for token in nltk.tokenize.word_tokenize(sentence):
# Ignore punctuation.
if token in punctuation:
continue
words.append(token.lower())
line = ' '.join(words)
# Replace ' ' with '_' in compound words.
for w in compound:
line = line.replace(w, compound[w])
# Update wordlist frequencies.
for w in line.split():
num_words += 1
if w in total_freq:
total_freq[w] += 1
if w != freq_key:
cross_freq[w] += 1
num_sentences += 1
# Save this sentence to the preprocessed output.
f_out.write(line.encode(config.encoding) + '\n')
print(word, num_sentences, num_words)
corpus_stats[freq_key] = (num_sentences, num_words)
# Save wordlist frequencies in decreasing order.
with open(args.output, 'w') as f_out:
print('WORD TOTFREQ XFREQ NSENT NWORD', file=f_out)
for w in sorted(total_freq, key=total_freq.get, reverse=True):
print('{0:11s} {1:8d} {2:8d} {3:8d} {4:8d}'.format(
w, total_freq[w], cross_freq[w], *corpus_stats[w]), file=f_out)
print('Saved wordlist frequencies to {0}'.format(args.output))
if __name__ == '__main__':
main()
| codenames-master | codenames/gameplay/preprocess_corpus.py |
#!/usr/bin/env python
import argparse
import gzip
import io
import multiprocessing
import os
import os.path
import random
import warnings
import wikipedia
from gameplay.config import config
dry_run = False
def fetch(word, min_size=5e6):
# Use a reproducible but different "random" shuffle for each word.
random.seed(word)
in_name = os.path.join(config.corpus_directory, config.template['index'].format(word))
out_name = os.path.join(config.corpus_directory, config.template['articles'].format(word))
# Has this word already been fetched?
if os.path.exists(out_name):
try:
# Check the GZIP structure and size.
with gzip.open(out_name, 'rb') as f_in:
# Uncompress the whole file into memory. This is relatively
# expensive, but is the only foolproof check.
content = f_in.read().decode(config.encoding)
size = len(content)
if size >= min_size:
return word, 0, 0, size
print('Good file "{0}" below minimum size: {1} < {2}.'
.format(out_name, size, min_size))
except Exception as e:
print('Bad file "{0}":: {1}'.format(out_name, e))
with io.open(in_name, 'r', encoding=config.encoding) as f_in:
# Read all page titles.
page_titles = [line.rstrip() for line in f_in]
# Generate a random order of page titles.
order = list(range(len(page_titles)))
random.shuffle(order)
print('Fetching from {0} pages for {1}.'.format(len(page_titles), word))
if dry_run:
return word, 0, 0, 0
total_size = 0
num_articles = 0
with gzip.open(out_name, 'wb') as f_out:
for article_index in order:
page_title = page_titles[article_index]
# Read this article's plain-text content.
with warnings.catch_warnings():
# Ignore warnings. The expected warnings are:
# requests.packages.urllib3.exceptions.SubjectAltNameWarning
# UserWarning
warnings.simplefilter('ignore')
try:
page = wikipedia.page(
page_title, auto_suggest=False, preload=False)
content = page.content
# Save this article's content.
f_out.write(content.encode(config.encoding))
total_size += len(content)
num_articles += 1
if total_size >= min_size:
break
except wikipedia.exceptions.DisambiguationError:
# Ignore disambiguation pages.
pass
except Exception as e:
print('Unexpected Error:: {0}'.format(e))
return word, len(page_titles), num_articles, total_size
def main():
global dry_run
parser = argparse.ArgumentParser(
description='Fetch indexed training corpus text.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--nproc', type=int, default=20,
help='Number of processing pool workers to use.')
parser.add_argument('--dry-run', action='store_true',
help='Perform a dry run only.')
args = parser.parse_args()
dry_run = args.dry_run
# Read the word list into memory and format using wikimedia conventions.
# https://en.wikipedia.org/wiki/Wikipedia:Naming_conventions_(capitalization)
with open(config.word_list, 'r') as f:
words = [w.strip().capitalize() for w in f]
print('Read {0} words from {1}.'.format(len(words), config.word_list))
pool = multiprocessing.Pool(processes=args.nproc)
result = pool.map_async(fetch, words)
result.wait()
if __name__ == '__main__':
main()
| codenames-master | codenames/gameplay/fetch_corpus_text.py |
import logging
logging.getLogger().setLevel(logging.ERROR)
| codenames-master | codenames/gameplay/__init__.py |
#!/usr/bin/env python
import argparse
import re
from gameplay.engine import GameEngine
def main():
parser = argparse.ArgumentParser(
description='Play the CodeNames game.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', type=str, default='CHCH',
help='Config <spy1><team1><spy2><team2> using C,H.')
parser.add_argument('-x', '--expert', action='store_true',
help='Expert clues. For now implements \'unlimited\' only.')
parser.add_argument('--seed', type=int, default=None,
help='Random seed for reproducible games.')
parser.add_argument('--init', type=str, default=None,
help='Initialize words ASSASSIN;TEAM1;TEAM2;NEUTRAL')
args = parser.parse_args()
if not re.match('^[CH]{4}$', args.config):
print('Invalid configuration. Try HHHH or CHCH.')
return -1
d = dict(H='human', C='computer')
spy1 = d[args.config[0]]
team1 = d[args.config[1]]
spy2 = d[args.config[2]]
team2 = d[args.config[3]]
e = GameEngine(seed=args.seed, expert=args.expert)
e.play_game(spy1, team1, spy2, team2, init=args.init)
if __name__ == '__main__':
main()
| codenames-master | codenames/gameplay/play.py |
import warnings
import numpy as np
import nltk.stem.wordnet
import sklearn.cluster
class WordEmbedding(object):
def __init__(self, filename):
# Import gensim here so we can mute a UserWarning about the Pattern
# library not being installed.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
import gensim.models.word2vec
# Load the model.
self.model = gensim.models.word2vec.Word2Vec.load(filename)
# Reduce the memory footprint since we will not be training.
self.model.init_sims(replace=True)
# Initialize a wordnet lemmatizer for stemming.
self.lemmatizer = nltk.stem.wordnet.WordNetLemmatizer()
def get_stem(self, word):
"""Return the stem of word.
"""
# Hardcode some stemming rules for the default CodeName words
# that the wordnet lemmatizer doesn't know about.
if word in ('pass', 'passing', 'passed',):
return 'pass'
if word in ('microscope', 'microscopy'):
return 'microscope'
if word in ('mexico', 'mexican', 'mexicans', 'mexicali'):
return 'mexico'
if word in ('theater', 'theatre', 'theaters', 'theatres',
'theatrical', 'theatricals'):
return 'theater'
if word in ('alp', 'alps', 'apline', 'alpinist'):
return 'alp'
return self.lemmatizer.lemmatize(word).encode('ascii', 'ignore')
def get_clue(self, clue_words, pos_words, neg_words, veto_words,
veto_margin=0.2, num_search=100, verbose=0):
"""
"""
if verbose >= 2:
print('CLUE:', clue_words)
print(' POS:', pos_words)
print(' NEG:', neg_words)
print('VETO:', veto_words)
# Initialize the list of illegal clues.
illegal_words = list(pos_words) + list(neg_words) + list(veto_words)
illegal_stems = set([self.get_stem(word) for word in illegal_words])
# Get the internal indices and normalized vectors for each word.
clue_indices = [self.model.vocab[word].index for word in clue_words]
clue_vectors = self.model.syn0norm[clue_indices]
pos_indices = [self.model.vocab[word].index for word in pos_words]
pos_vectors = self.model.syn0norm[pos_indices]
neg_indices = [self.model.vocab[word].index for word in neg_words]
neg_vectors = self.model.syn0norm[neg_indices]
veto_indices = [self.model.vocab[word].index for word in veto_words]
veto_vectors = self.model.syn0norm[veto_indices]
# Find the normalized mean of the words in the clue group.
mean_vector = clue_vectors.mean(axis=0)
mean_vector /= np.sqrt(mean_vector.dot(mean_vector))
# Calculate the cosine distances between the mean vector and all
# the words in our vocabulary.
cosines = np.dot(self.model.syn0norm[:, np.newaxis],
mean_vector).reshape(-1)
# Sort the vocabulary by decreasing cosine similarity with the mean.
closest = np.argsort(cosines)[::-1]
# Select the clue whose minimum cosine from the words is largest
# (i.e., smallest maximum distance).
best_clue = None
max_min_cosine = -2.
for i in range(num_search):
clue_index = closest[i]
clue = self.model.index2word[clue_index]
# Ignore clues with the same stem as an illegal clue.
if self.get_stem(clue) in illegal_stems:
continue
# Ignore clues that are contained within an illegal clue or
# vice versa.
contained = False
for illegal in illegal_words:
if clue in illegal or illegal in clue:
contained = True
break
if contained:
continue
# Calculate the cosine similarity of this clue with all of the
# positive, negative and veto words.
clue_vector = self.model.syn0norm[clue_index]
clue_cosine = np.dot(clue_vectors[:, np.newaxis], clue_vector)
neg_cosine = np.dot(neg_vectors[:, np.newaxis], clue_vector)
veto_cosine = np.dot(veto_vectors[:, np.newaxis], clue_vector)
# Is this closer to all of the positive words than our previous best?
min_clue_cosine = np.min(clue_cosine)
if min_clue_cosine < max_min_cosine:
continue
# Are all positive words more similar than any negative words?
if list(neg_words):
max_neg_cosine = np.max(neg_cosine)
if max_neg_cosine >= min_clue_cosine:
# A negative word is likely to be selected before all the
# positive words.
if verbose >= 3:
neg_word = neg_words[np.argmax(neg_cosine)]
print('neg word {0} is a distractor (cosine={1:.4f})'
.format(neg_word, max_neg_cosine))
continue
# Is this word too similar to any of the veto words?
if list(veto_words):
max_veto_cosine = np.max(veto_cosine)
if max_veto_cosine >= min_clue_cosine - veto_margin:
# A veto word is too likely to be selected before all the
# positive words.
if verbose >= 2:
veto_word = veto_words[np.argmax(veto_cosine)]
print('veto word {0} is a distractor (cosine={1:.4f})'
.format(veto_word, max_veto_cosine))
continue
# If we get here, we have a new best clue.
max_min_cosine = min_clue_cosine
best_clue = clue
if verbose >= 1:
words = [w.upper() for w in clue_words]
print('{0} = {1} (min_cosine={2:.4f})'
.format('+'.join(words), clue, min_clue_cosine))
return best_clue, max_min_cosine
def get_clusters_kmeans(self, words):
"""Use the KMeans algorithm to find word clusters.
"""
words = np.asarray(words)
num_words = len(words)
X = np.empty((num_words, self.model.vector_size))
for i, word in enumerate(words):
X[i] = self.model.syn0norm[self.model.vocab[word].index]
for num_clusters in range(1, num_words):
kmeans = sklearn.cluster.KMeans(num_clusters).fit(X)
for label in set(kmeans.labels_):
members = words[kmeans.labels_ == label]
print('{0},{1}: {2}'.format(num_clusters, label, members))
def get_clusters_dbscan(self, words, min_sep=1.25):
"""Use the DBSCAN algorithm to find word clusters.
"""
# Calculate the distance matrix for the specified words.
words = np.asarray(words)
num_words = len(words)
distance = np.zeros((num_words, num_words))
for i1 in range(num_words):
for i2 in range(i1):
cosine = self.model.similarity(words[i1], words[i2])
distance[i1, i2] = distance[i2, i1] = np.arccos(cosine)
# Initailize cluster finder.
db = sklearn.cluster.DBSCAN(
eps=min_sep, min_samples=1, metric='precomputed', n_jobs=1)
db.fit(distance)
for label in set(db.labels_):
members = words[db.labels_ == label]
print('{0}: {1}'.format(label, members))
| codenames-master | codenames/gameplay/model.py |
import itertools
import re
import sys
import os
import platform
import numpy as np
from termcolor import colored
from codenames.gameplay.model import WordEmbedding
from codenames.gameplay.config import config
CLUE_PATTERN = r'^([a-zA-Z]+) ({0})$'
UNLIMITED = "unlimited"
# noinspection PyAttributeOutsideInit
class GameEngine(object):
def __init__(self, seed=None, expert=False):
# Load our word list if necessary.
# TODO: Max length of 11 is hardcoded here and in print_board()
with open(config.word_list) as f:
_words = [line.rstrip().lower().replace(' ', '_') for line in f.readlines()]
self.words = np.array(_words)
# Initialize our word embedding model if necessary.
self.model = WordEmbedding(config.embedding)
# Initialize random numbers.
self.generator = np.random.RandomState(seed=seed)
# Register expert mode
self.expert = expert
self.unfound_words = (set(), set())
# Useful regular expressions.
if self.expert:
self.valid_clue = re.compile(CLUE_PATTERN.format("[0-9]|" + UNLIMITED))
else:
self.valid_clue = re.compile(CLUE_PATTERN.format("[0-9]"))
def initialize_random_game(self, size=5):
self.size = size
# Shuffle the wordlist.
shuffle = self.generator.choice(
len(self.words), size * size, replace=False)
self.board = self.words[shuffle]
# Specify the layout for this game.
_assignments = self.generator.permutation(size * size)
self.owner = np.empty(size * size, int)
self.owner[_assignments[0]] = 0 # assassin
self.owner[_assignments[1:10]] = 1 # first player: 9 words
self.owner[_assignments[10:18]] = 2 # second player: 8 words
self.owner[_assignments[18:]] = 3 # bystander: 7 words
self.assassin_word = self.board[self.owner == 0]
# All cards are initially visible.
self.assignment_not_revealed = np.ones_like(self.owner, dtype=bool)
self.num_turns = -1
def initialize_from_words(self, initial_words, size=5):
"""
The initial_words parameter should be in the format:
ASSASSIN;TEAM1;TEAM2;NEUTRAL
where each group consists of comma-separated words from the word list.
The total number of words must be <= size * size. Any missing words
are considered to be already covered and neutral.
"""
self.size = size
word_groups = initial_words.split(';')
if len(word_groups) != 4:
raise ValueError('Expected 4 groups separated by semicolon.')
board, owner, visible = [], [], []
for group_index, word_group in enumerate(word_groups):
words = word_group.split(',')
for word in words:
word = word.lower().replace(' ', '_')
if word not in self.words:
raise ValueError('Invalid word "{0}".'.format(word))
if word in board:
raise ValueError('Duplicate word "{0}".'.format(word))
board.append(word)
owner.append(group_index)
visible.append(True)
if len(board) > size * size:
raise ValueError('Too many words. Expected <= {0}.'.format(size * size))
# Add dummy hidden words if necessary.
while len(board) < size * size:
board.append('---')
owner.append(3)
visible.append(False)
self.board = np.array(board)
self.owner = np.array(owner)
self.assignment_not_revealed = np.array(visible)
# Perform a random shuffle of the board.
shuffle = self.generator.permutation(size * size)
self.board = self.board[shuffle]
self.owner = self.owner[shuffle]
self.assignment_not_revealed = self.assignment_not_revealed[shuffle]
self.assassin_word = self.board[self.owner == 0]
self.num_turns = -1
def print_board(self, spymaster=False, clear_screen=True, verbose=True):
if not verbose:
return
if clear_screen:
if platform.system() == 'Windows':
os.system('cls')
else:
sys.stdout.write(chr(27) + '[2J')
sys.stdout.write('Legend:\n')
sys.stdout.write(' ' + colored('unrevealed TEAM1 card ', 'blue'))
sys.stdout.write(' ' + colored(' revealed TEAM1 card ', 'white', 'on_blue') + '\n')
sys.stdout.write(' ' + colored('unrevealed TEAM2 card ', 'red'))
sys.stdout.write(' ' + colored(' revealed TEAM2 card ', 'white', 'on_red') + '\n')
sys.stdout.write(' ' + colored('unrevealed NEUTRAL card ', 'green'))
sys.stdout.write(' ' + colored(' revealed NEUTRAL card ', 'white', 'on_green') + '\n')
sys.stdout.write(' ' + colored('unrevealed ASSASSIN card ', 'grey'))
sys.stdout.write(' ' + colored(' revealed ASSASSIN card ', 'white', 'on_grey') + '\n\n')
sys.stdout.write('Board:\n')
board = self.board.reshape(self.size, self.size)
owner = self.owner.reshape(self.size, self.size)
visible = self.assignment_not_revealed.reshape(self.size, self.size)
for row in range(self.size):
for col in range(self.size):
word = board[row, col]
foreground_color = None
background_color = None
attrs = []
if not visible[row, col]:
foreground_color = 'white'
if owner[row, col] == 0:
background_color = 'on_grey'
attrs.append('bold')
elif owner[row, col] == 1:
background_color = 'on_blue'
elif owner[row, col] == 2:
background_color = 'on_red'
elif owner[row, col] == 3:
background_color = 'on_green'
else:
raise RuntimeError('invalid owner.')
else:
background_color = None
if owner[row, col] == 0:
foreground_color = 'grey'
attrs.append('bold')
elif owner[row, col] == 1:
foreground_color = 'blue'
elif owner[row, col] == 2:
foreground_color = 'red'
elif owner[row, col] == 3:
foreground_color = 'green'
else:
raise RuntimeError('invalid owner.')
if not spymaster or owner[row, col] in (0, 1, 2):
word = word.upper()
# format cell content
cell = '{:^11} '.format(word)
if background_color:
cell = colored(cell, foreground_color, background_color, attrs = attrs)
else:
cell = colored(cell, foreground_color, attrs = attrs)
sys.stdout.write(cell)
sys.stdout.write('\n')
sys.stdout.write('\n')
def play_computer_spymaster(self, gamma=1.0, verbose=True):
say('Thinking...')
sys.stdout.flush()
# Loop over all permutations of words.
num_words = len(self.player_words)
best_score, saved_clues = [], []
for count in range(num_words, 0, -1):
# Multiply similarity scores by this factor for any clue
# corresponding to this many words.
bonus_factor = count ** gamma
for group in itertools.combinations(list(range(num_words)), count):
words = self.player_words[list(group)]
clue, score = self.model.get_clue(clue_words=words,
pos_words=self.player_words,
neg_words=np.concatenate((self.opponent_words, self.neutral_words)),
veto_words=self.assassin_word)
if clue:
best_score.append(score * bonus_factor)
saved_clues.append((clue, words))
num_clues = len(saved_clues)
order = sorted(range(num_clues), key=lambda k: best_score[k], reverse=True)
if verbose:
self.print_board(spymaster=True)
for i in order[:10]:
clue, words = saved_clues[i]
say('{0:.3f} {1} = {2}'.format(best_score[i], ' + '.join([w.upper() for w in words]), clue))
clue, words = saved_clues[order[0]]
self.unfound_words[self.player].update(words)
if self.expert and self._should_say_unlimited(nb_clue_words=len(words)):
return clue, UNLIMITED
else:
return clue, len(words)
def _should_say_unlimited(self, nb_clue_words, threshold_opponent=2):
"""
Announce "unlimited" if :
(1) the opposing team risks winning with their next clue,
(2) and our +1 guess isn't enough to catch up during this clue,
(3) but all the words hinted by the current and previous clues
are enough to catch up and win
"""
return (len(self.opponent_words) <= threshold_opponent # (1)
and nb_clue_words + 1 < len(self.player_words) # (2)
and self.unfound_words[self.player]
== set(self.player_words)) # (3)
def play_human_spymaster(self):
self.print_board(spymaster=True)
while True:
clue = ask('{0} Enter your clue: '.format(self.player_label))
matched = self.valid_clue.match(clue)
if matched:
word, count = matched.groups()
if count != UNLIMITED:
count = int(count)
return word, count
say('Invalid clue, should be WORD COUNT.')
def play_human_team(self, word, count):
num_guesses = 0
while (self.expert and count == UNLIMITED) or num_guesses < count + 1:
self.print_board(clear_screen=(num_guesses == 0))
say('{0} your clue is: {1} {2}'.format(self.player_label, word, count))
num_guesses += 1
while True:
guess = ask('{0} enter your guess #{1}: '.format(self.player_label, num_guesses))
guess = guess.strip().lower().replace(' ', '_')
if guess == '':
# Team does not want to make any more guesses.
return True
if guess in self.board[self.assignment_not_revealed]:
break
say('Invalid guess, should be a visible word.')
loc = np.where(self.board == guess)[0]
self.assignment_not_revealed[loc] = False
if guess == self.assassin_word:
say('{0} You guessed the assasin - game over!'.format(self.player_label))
return False
if guess in self.player_words:
self.unfound_words[self.player].discard(guess)
if num_guesses == len(self.player_words):
say('{0} You won!!!'.format(self.player_label))
return False
else:
ask('{0} Congratulations, keep going! (hit ENTER)\n'.format(self.player_label))
else:
if guess in self.opponent_words:
ask('{0} Sorry, word from opposing team! (hit ENTER)\n'.format(self.player_label))
else:
ask('{0} Sorry, bystander! (hit ENTER)\n'.format(self.player_label))
break
return True
def next_turn(self):
self.num_turns += 1
self.player = self.num_turns % 2
self.opponent = (self.player + 1) % 2
self.player_label = '<>'[self.player] * 3
self.player_words = self.board[(self.owner == self.player + 1) & self.assignment_not_revealed]
self.opponent_words = self.board[(self.owner == self.opponent + 1) & self.assignment_not_revealed]
self.neutral_words = self.board[(self.owner == 3) & self.assignment_not_revealed]
def play_turn(self, spymaster='human', team='human'):
self.next_turn()
if spymaster == 'human':
word, count = self.play_human_spymaster()
else:
word, count = self.play_computer_spymaster()
if team == 'human':
ongoing = self.play_human_team(word, count)
else:
raise NotImplementedError()
return ongoing
def play_game(self, spymaster1='human', team1='human',
spymaster2='human', team2='human', init=None):
if init is None:
self.initialize_random_game()
else:
self.initialize_from_words(init)
while True:
if not self.play_turn(spymaster1, team1): break
if not self.play_turn(spymaster2, team2): break
def say(message):
sys.stdout.write((message + '\n').encode('utf8'))
def ask(message):
try:
return input(message)
except KeyboardInterrupt:
say('\nBye.')
sys.exit(0)
| codenames-master | codenames/gameplay/engine.py |
#!/usr/bin/env python
import argparse
import warnings
import logging
import random
import gzip
import os.path
from gameplay.config import config
def main():
parser = argparse.ArgumentParser(
description='Merge training corpus.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--npass', type=int, default=1,
help='Perform this pass number (1-5).')
parser.add_argument('--num-epochs', type=int, default=5,
help='Number of training epochs to run per pass.')
parser.add_argument('--dimension', type=int, default=300,
help='Dimension of word vectors to learn.')
parser.add_argument('--min-count', type=int, default=150,
help='Ignore words with fewer occurences.')
parser.add_argument('--max-distance', type=int, default=10,
help='Max distance between words within a sentence')
parser.add_argument('--workers', type=int, default=4,
help='Number of workers to distribute workload across.')
parser.add_argument('--log-level', type=str, default='INFO',
choices=('CRITICAL', 'ERROR', 'WARNING',
'INFO', 'DEBUG'),
help='Filter out log messages below this level.')
args = parser.parse_args()
# Configure logging.
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s',
level=getattr(logging, args.log_level))
logger = logging.getLogger('learn')
# Look for an existing corpus for this pass.
corpus_name = 'corpus_{0}.gz'.format(args.npass)
if os.path.exists(corpus_name):
logger.info('Using corpus {0}'.format(corpus_name))
else:
# Read the wordlist into memory.
with open(config.word_list, 'r') as f:
wordlist = [w.strip().capitalize() for w in f]
logger.info('Read {0} words from {1}.'
.format(len(wordlist), config.word_list))
# Open the output corpus file for this pass.
f_out = gzip.open(corpus_name, 'wb')
# Perform a reproducible random shuffle of the wordlist.
logger.info('Shuffling the corpus for pass {0} into {1}...'
.format(args.npass, corpus_name))
random.seed(args.npass)
random.shuffle(wordlist)
# Split the wordlist into random pairs.
for i in range(0, len(wordlist), 2):
sentences = []
# Read content for the first word of this pair into memory.
in_name = os.path.join(
config.corpus_directory,
config.template['preprocess'].format(wordlist[i]))
with gzip.open(in_name, 'rb') as f_in:
for line in f_in:
sentences.append(line)
# The last "pair" might be a single.
if i < len(wordlist) - 1:
in_name = os.path.join(
config.corpus_directory,
config.template['preprocess'].format(wordlist[i+1]))
# Read content for the second word of this pair into memory.
with gzip.open(in_name, 'rb') as f_in:
for line in f_in:
sentences.append(line)
# Shuffle sentences for this pair of words into a random order.
sentence_order = list(range(len(sentences)))
random.shuffle(sentence_order)
# Save shuffled sentences to the output corpus file.
for j in sentence_order:
f_out.write(sentences[j])
logger.info('Added {0} sentences for ({1}, {2}).'.format(
len(sentences), wordlist[i], wordlist[i+1]))
f_out.close()
# Import gensim here so we can mute a UserWarning about the Pattern
# library not being installed.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
import gensim.models.word2vec
# Use the training sentences for this pass.
sentences = gensim.models.word2vec.LineSentence(corpus_name)
# Calculate start and stop learning rates for this pass.
alpha_start = 0.025 - 0.005 * (args.npass - 1.) + 0.0001
alpha_stop = 0.025 - 0.005 * args.npass + 0.0001
if alpha_stop <= 0:
print('Invalid npass gives negative learning rate.')
return -1
logger.info('Learning rate: {0:.4f} -> {1:.4f}'
.format(alpha_start, alpha_stop))
if args.npass > 1:
# Load a previously trained model.
prev_name = '{0}.{1}'.format(config.embedding, args.npass - 1)
model = gensim.models.word2vec.Word2Vec.load(prev_name)
# Update parameters from the command line.
model.workers = args.workers
model.iter = args.num_epochs
model.alpha = alpha_start
model.min_alpha = alpha_stop
# Continue training.
model.train(sentences)
else:
# Train a new model.
model = gensim.models.word2vec.Word2Vec(
sentences, size=args.dimension, window=args.max_distance,
min_count=args.min_count, workers=args.workers,
alpha=alpha_start, min_alpha=alpha_stop,
sg=1, hs=1, iter=args.num_epochs)
# Save the updated model after this pass.
save_name = '{0}.{1}'.format(config.embedding, args.npass)
model.save(save_name)
if __name__ == '__main__':
main()
| codenames-master | codenames/gameplay/learn.py |
#!/usr/bin/env python
# coding=utf-8
import argparse
import glob
import os
from gameplay.model import WordEmbedding
from gameplay.config import config
def main():
parser = argparse.ArgumentParser(
description='Evaluate word embedding.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--npass', type=int,
help='Evaluate this pass number (if not set: evaluate {0} if it is '
'present, or the last {0}.N in numerical order if not)'
.format(config.embedding))
parser.add_argument('--top-singles', type=int, default=10,
help='Show top single matches.')
parser.add_argument('--top-pairs', type=int, default=0,
help='Show top pair matches.')
parser.add_argument('--save-plots', type=str, default=None,
help='Save plots using this filename root.')
args = parser.parse_args()
if args.save_plots:
# Only import if needed and use background plotting.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if args.npass is not None:
evaluated_file = '{0}.{1}'.format(config.embedding, args.npass)
elif os.path.isfile(config.embedding):
evaluated_file = config.embedding
else:
all_suffixes = [f.split('.')[-1]
for f in glob.glob('{0}.*'.format(config.embedding))]
evaluated_file = '{0}.{1}'.format(config.embedding,
sorted(all_suffixes)[-1])
if not os.path.isfile(evaluated_file):
print('Embedding file {0} not found.'.format(evaluated_file))
embedding = WordEmbedding(evaluated_file)
with open(config.word_list, 'r') as f:
words = [w.strip().lower().replace(' ', '_') for w in f]
if args.top_singles > 0:
best_score, saved_clues = [], []
for word in words:
clue, score = embedding.get_clue([word], [word], [], [])
if clue:
best_score.append(score)
saved_clues.append((word, clue))
num_clues = len(saved_clues)
order = sorted(
range(num_clues), key=lambda k: best_score[k], reverse=True)
for i in order[:args.top_singles]:
word, clue = saved_clues[i]
print('{0:.3f} {1} = {2}'.format(
best_score[i], word.upper(), clue))
if args.save_plots:
plt.hist(best_score, range=(0., 1.), bins=50)
plt.xlim(0., 1.)
plt.xlabel('Similarity Score')
plt.ylabel('Singles')
plt.yscale('log')
plt.grid()
plt.savefig(args.save_plots + '_singles.png')
plt.clf()
if args.top_pairs > 0:
best_score, saved_clues = [], []
for i1, word1 in enumerate(words):
for i2, word2 in enumerate(words[:i1]):
clue, score = embedding.get_clue(
[word1, word2], [word1, word2], [], [])
if clue:
best_score.append(score)
saved_clues.append(((i1, i2), clue))
num_clues = len(saved_clues)
order = sorted(
range(num_clues), key=lambda k: best_score[k], reverse=True)
for i in order[:args.top_pairs]:
i1, i2 = saved_clues[i][0]
clue = saved_clues[i][1]
print('{0:.3f} {1} + {2} = {3}'.format(
best_score[i], words[i1].upper(), words[i2].upper(), clue))
if args.save_plots:
plt.hist(best_score, range=(0., 1.), bins=50)
plt.xlim(0., 1.)
plt.xlabel('Similarity Score')
plt.ylabel('Pairs')
plt.yscale('log')
plt.grid()
plt.savefig(args.save_plots + '_pairs.png')
if __name__ == '__main__':
main()
| codenames-master | codenames/gameplay/evaluate.py |
import sys
import os
import re
import tqdm
import datetime
from collections import defaultdict
from random import choices, shuffle
from typing import List
from termcolor import colored
import numpy as np
import argparse
import torch
from codenames.clue_givers.giver import Giver, Clue
from codenames.clue_givers.heuristic_giver import HeuristicGiver
from codenames.clue_givers.wordnet_cluegiver import WordnetClueGiver
from codenames.embedding_handler import EmbeddingHandler
from codenames.guessers.guesser import Guesser
from codenames.guessers.heuristic_guesser import HeuristicGuesser
from codenames.guessers.learned_guesser import LearnedGuesser
from codenames.guessers.policy.similarity_threshold_game_state import SimilarityThresholdGameStatePolicy
from codenames.guessers.policy.similarity_threshold import SimilarityThresholdPolicy
from codenames.utils.game_utils import UNREVEALED, ASSASSIN, GOOD, BAD, Clue
from codenames.gameplay.engine import GameEngine
SCORE_CORRECT_GUESS = 1
SCORE_INCORRECT_GUESS = -1
SCORE_ASSASSIN_GUESS = -5
SCORE_CIVILIAN_GUESS = -1
class GameWrapper:
def __init__(self, board_size, board_data=None):
'''
board_size: int, number of board_words = board_size * board_size
board_data: string, format as: ASSASSIN;TEAM1;TEAM2;NEUTRAL
where each group consists of comma-separated words from the word list.
'''
self.engine = GameEngine()
# initialize board data.
if board_data == None:
self.engine.initialize_random_game(size=board_size)
else:
self.engine.initialize_from_words(board_data, size=board_size)
# initialize game state.
self.game_state = [UNREVEALED] * (board_size * board_size)
# initialize score.
self.cumulative_score = 0
self.result = None
def is_game_over(self):
team1_has_words_left_to_guess, team2_has_words_left_to_guess = False, False
for i in range(len(self.engine.owner)):
# if the assassin is revealed, then it's game over.
if self.engine.owner[i] == ASSASSIN and not self.engine.assignment_not_revealed[i]:
self.result = "Assassin word guessed"
return True
# does team1/2 has any invisible words?
if self.engine.owner[i] == GOOD and self.engine.assignment_not_revealed[i]:
team1_has_words_left_to_guess = True
if self.engine.owner[i] == BAD and self.engine.assignment_not_revealed[i]:
team2_has_words_left_to_guess = True
# if all words of either team are visible, it's game over.
if not team1_has_words_left_to_guess:
self.result = "Team 1: Winner"
return True
if not team2_has_words_left_to_guess:
self.result = "Team 2: Winner"
return True
# if none of the above conditions apply, the game is not over.
return False
def is_valid_clue(self, clue_word: str):
return True
def _apply_team1_guess(self, guess):
idx = self.engine.board.tolist().index(guess)
self.engine.assignment_not_revealed[idx] = False
if idx == -1:
raise Exception
else:
self.game_state[idx] = self.engine.owner[idx]
if self.engine.owner[idx] == GOOD:
guess_reward = SCORE_CORRECT_GUESS
elif self.engine.owner[idx] == BAD:
guess_reward = SCORE_INCORRECT_GUESS
elif self.engine.owner[idx] == ASSASSIN:
guess_reward = SCORE_ASSASSIN_GUESS
else:
guess_reward = SCORE_CIVILIAN_GUESS
self.cumulative_score += guess_reward
return guess_reward
# This method executes the guesses of team1.
def apply_team1_guesses(self, clue: Clue, guessed_words: List[str]):
guess_list_rewards = []
if len(guessed_words) > int(clue.count) + 1:
raise Exception
for word in guessed_words:
guess_reward = self._apply_team1_guess(word)
guess_list_rewards.append(guess_reward)
if guess_reward <= 0:
break
# team1 just played, so we need to update the internal state of
# the engine pertaining to turn info.
self.engine.next_turn()
return guess_list_rewards + [0] * (len(guessed_words) - len(guess_list_rewards))
# The engine was designed for a 2-player experience. In the 1-player
# version of the game, there is an imaginary team2 which is pretty
# boring: it reveals one of the team2 cards in each turn. This method
# simulates the imaginary boring team2 for the 1-player version of the
# game.
def apply_team2_guesses(self):
team2_guess = None
for i in range(len(self.engine.board)):
# find the first word which belongs to team2 and is not revealed.
if self.engine.owner[i] == BAD and self.engine.assignment_not_revealed[i]:
# then reveal it.
self.game_state[i] = self.engine.owner[i]
self.engine.assignment_not_revealed[i] = False
team2_guess = self.engine.board[i]
break
# This should never happen.
if not team2_guess:
raise RuntimeError('All cards which belong to TEAM2 have already'
'been revealed, why are we still playing?')
# team2 just played, so we need to update the internal state of
# the engine pertaining to turn info.
self.engine.next_turn()
return [team2_guess]
class RandomGiver(Giver):
'''
A clue giver who randomly picks the clue word from a vocabulary.
'''
def __init__(self, embedding_handler):
self.vocab = list(embedding_handler.word_indices.keys())
def get_next_clue(self,
board,
allIDs,
game_state,
score):
options = choices(self.vocab, k=3)
clues = []
for option in options:
clues.append(Clue(clue_word=option, intended_board_words=[], count=1))
return clues
class RandomGuesser(Guesser):
'''
A guesser who randomly picks among unrevealed board words.
'''
def guess(self, board, clue_word, count, game_state, cumulative_score):
unrevealed_words = []
for i in range(len(game_state)):
if game_state[i] == -1:
unrevealed_words.append(board[i])
return choices(unrevealed_words, k=count + 1)
def report_reward(self, reward):
pass
def _print(message, verbose):
if verbose:
sys.stdout.write(message)
def _input(message, verbose):
if verbose:
return input(message)
else:
return ''
def play_game(giver, guesser, board_size=5, board_data=None, verbose=True, saved_path=None):
'''
returns cumulative_team1_score: int, termination_condition: str, team1_turns: int
'''
_print('||| initializing all modules.\n', verbose=verbose)
game = GameWrapper(board_size, board_data)
_print('||| data: {}.\n'.format(list(zip(game.engine.board, game.engine.owner))),
verbose=verbose)
turn = 0
while not game.is_game_over():
if turn == 0:
game.engine.print_board(spymaster=True, verbose=verbose)
else:
game.engine.print_board(spymaster=True, verbose=verbose, clear_screen=False)
_input('\n||| press ENTER to see the next clue for team1.', verbose=verbose)
# get a list of clues.
board = game.engine.board.tolist()
clue_objects = giver.get_next_clue(board,
game.engine.owner,
game.game_state,
game.cumulative_score)
# find the first legal clue, then proceed.
first_valid_clue = None
for clue in clue_objects:
if game.is_valid_clue(clue_objects[0].clue_word):
first_valid_clue = clue
break
if first_valid_clue is None:
# All clues are illegal. Abandoning game!
game.result = 'All clues given were illegal.'
break
clue_word, clue_count = first_valid_clue.clue_word, first_valid_clue.count
# get guesses.
_print("||| team1's clue: ({}, {}); \tIntended target words: [{}]\n".format(clue.clue_word, clue.count, clue.intended_board_words), verbose=verbose)
guessed_words = guesser.guess(game.engine.board,
clue_word,
clue_count,
game.game_state,
game.cumulative_score)
_input(', press ENTER to see team1 guesses.\n', verbose=verbose)
guess_list_rewards = game.apply_team1_guesses(first_valid_clue, guessed_words)
rewards_out = []
for w, r in zip(guessed_words, guess_list_rewards):
if r < SCORE_CORRECT_GUESS:
break
rewards_out.append((w, r))
_print('||| rewards: {}\n'.format(rewards_out), verbose=verbose)
if saved_path and game.is_game_over():
guesser.report_reward(guess_list_rewards, saved_path)
else:
guesser.report_reward(guess_list_rewards)
# print the board after team1 plays this turn.
game.engine.print_board(spymaster=True, verbose=verbose)
_print("||| team1's clue: ({}, {}); \tIntended target words: [{}]\n".format(clue.clue_word, clue.count, clue.intended_board_words), verbose=verbose)
_print("||| team1's guesses: {}\n".format(list(zip(guessed_words, guess_list_rewards))), verbose=verbose)
if not game.is_game_over():
_input(", press ENTER to see team2's next move.", verbose=verbose)
team2_guessed_words = game.apply_team2_guesses()
# print the board again after team2 plays this turn.
game.engine.print_board(spymaster=True, verbose=verbose)
_print("||| team1's clue: ({}, {}).\n".format(clue.clue_word, clue.count), verbose=verbose)
_print("||| team1's guess: {}\n".format(list(zip(guessed_words, guess_list_rewards))), verbose=verbose)
_print("||| team2 revealed: {}\n".format(team2_guessed_words), verbose=verbose)
turn += 1
_print('\n||| termination condition: {}\n'.format(game.result), verbose=verbose)
_print('|||\n', verbose=verbose)
_print('||| =============== GAME OVER =================\n', verbose=verbose)
_print('||| =============== team1 score: {}\n'.format(game.cumulative_score), verbose=verbose)
assert game.result is not None
return game.cumulative_score, game.result, turn
def main(args):
guesser_embedding_handler = EmbeddingHandler(args.guesser_embeddings_file)
giver_embedding_handler = EmbeddingHandler(args.giver_embeddings_file)
if args.giver_type == "heuristic":
giver = HeuristicGiver(giver_embedding_handler)
elif args.giver_type == "random":
giver = RandomGiver(giver_embedding_handler)
elif args.giver_type == "wordnet":
giver = WordnetClueGiver()
else:
raise NotImplementedError
if args.game_data:
all_game_data = []
for line in open(args.game_data, mode='rt'):
# allow comments starting with # or %
if line.startswith("#"): continue
if line.startswith("%"): continue
line = line.strip()
words = re.split('[;,]', line)
if len(words) != args.board_size * args.board_size:
if args.verbose:
sys.stdout.write('WARNING: skipping game data |||{}||| due to a conflict with the specified board size: {}'.format(line, args.board_size))
continue
all_game_data.append(line.strip())
else:
# If game data were not specified, we'd like to generate (args.num_games) random
# games. The method `play_game` randomly samples words when the provided game data
# is set to None.
all_game_data = [None] * args.num_games
if args.guesser_type == "heuristic":
guesser = HeuristicGuesser(guesser_embedding_handler)
elif args.guesser_type == "random":
guesser = RandomGuesser()
elif args.guesser_type == "learned":
if args.load_model:
guesser = LearnedGuesser(guesser_embedding_handler,
policy=torch.load(args.load_model),
learning_rate=0.01,
train=False)
else:
guesser = LearnedGuesser(guesser_embedding_handler,
policy=SimilarityThresholdPolicy(300),
learning_rate=0.01,
train=True)
elif args.guesser_type == "learnedstate":
if args.load_model:
guesser = LearnedGuesser(guesser_embedding_handler,
policy=torch.load(args.load_model),
learning_rate=0.01,
train=False)
else:
guesser = LearnedGuesser(guesser_embedding_handler,
policy=SimilarityThresholdGameStatePolicy(300),
learning_rate=0.01,
train=True)
else:
raise NotImplementedError
# keep track of the results of each game.
all_scores = []
all_termination_conditions = defaultdict(int)
all_turns = []
num_positive_score = 0
start_time = datetime.datetime.now()
for i, board_data in tqdm.tqdm(enumerate(all_game_data), desc="games played: "):
saved_path = ""
save_now = i % 100
if args.num_games is not None:
save_now = save_now or i == args.num_games - 1
if args.guesser_type == "learned" and save_now:
if not os.path.exists("./models"):
os.makedirs("./models")
saved_path = "./models/learned" + str(i)
score, termination_condition, turns = play_game(giver=giver, guesser=guesser,
board_size=args.board_size,
board_data=board_data,
verbose=args.interactive,
saved_path=saved_path)
if score > 0:
num_positive_score += 1
all_scores.append(score)
all_termination_conditions[termination_condition] += 1
all_turns.append(turns)
mean_score = sum(all_scores) / len(all_scores)
std_score = np.std(all_scores)
mean_turns = sum(all_turns) / len(all_turns)
std_turns = np.std(all_turns)
# log, for debugging purposes.
if args.verbose:
# this game's results
sys.stdout.write('||| last game score = {}, termination condition = {}, turns = {}\n'.format(score, termination_condition, turns))
# summary of all games' results
sys.stdout.write('|||\n')
sys.stdout.write("||| # of games played = {}, runtime = {}\n".format(len(all_scores), str(datetime.datetime.now() - start_time)))
sys.stdout.write(f"||| # of games won (by team1) = {num_positive_score}\n")
sys.stdout.write('||| avg. game score = {:.2f}, std. of game score = {:.2f}\n'.format(mean_score, std_score))
sys.stdout.write('||| avg. game turns = {:.2f}, std. of game turns = {:.2f}\n'.format(mean_turns, std_turns))
for _termination_condition, _count in all_termination_conditions.items():
sys.stdout.write('||| % of {}: {:.2f}\n'.format(_termination_condition, 1.0 * _count / len(all_scores)))
with open(args.experiment_name + '.experiment', mode='wt') as experiment_results_file:
experiment_results_file.write('name: {}\n'.format(args.experiment_name))
experiment_results_file.write('runtime: {}\n'.format(str(datetime.datetime.now() - start_time)))
experiment_results_file.write('time finished: {}\n'.format(str(datetime.datetime.now())))
experiment_results_file.write("# of games played: {}\n".format(len(all_scores)))
experiment_results_file.write(f"# of games won (by team1) = {num_positive_score}\n")
for _termination_condition, _count in all_termination_conditions.items():
experiment_results_file.write('% of {}: {:.2f}\n'.format(_termination_condition, 1.0 * _count / len(all_scores)))
experiment_results_file.write('avg. game turns = {:.2f}, std. of game turns = {:.2f}\n'.format(mean_turns, std_turns))
experiment_results_file.write('avg. game score = {:.2f}, std. of game score = {:.2f}\n'.format(mean_score, std_score))
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--guesser", type=str, dest="guesser_type", default="heuristic")
argparser.add_argument("--giver", type=str, dest="giver_type", default="heuristic")
argparser.add_argument("--size", type=int, dest="board_size", default="5")
argparser.add_argument("--interactive", action="store_true")
argparser.add_argument("--num-games", type=int, help="Number of games to play")
argparser.add_argument("--game-data", type=str, default="data/codenames_dev.games")
argparser.add_argument("--guesser-embeddings-file", type=str, dest="guesser_embeddings_file",
default="data/uk_embeddings.txt")
argparser.add_argument("--giver-embeddings-file", type=str, dest="giver_embeddings_file",
default="data/uk_embeddings.txt")
argparser.add_argument("--load-model", dest="load_model", default=None,
help="Will not train if this argument is set.")
argparser.add_argument("--experiment-name", type=str, default="debug")
argparser.add_argument("--verbose", action="store_true")
args = argparser.parse_args()
main(args)
| codenames-master | codenames/gameplay/ai2_hack.py |
from typing import List
from codenames.utils.game_utils import Clue
class Giver:
def get_next_clue(self,
board: List[str],
allIDs: List[int],
game_state: List[int],
current_score: int) -> List[Clue]:
"""
Parameters
----------
board : `List[str]`
All the words on the board.
allIDs : `List[int]`
The true identities of all the words
game_state : `List[int]`
List of same size as self.board telling whether a word on board is already revealed
score : `int`
Current score
"""
raise NotImplementedError
| codenames-master | codenames/clue_givers/giver.py |
codenames-master | codenames/clue_givers/__init__.py |
|
# from codenames.clue_givers.giver import Giver
import logging
import operator
from itertools import combinations, chain
from typing import List
import numpy as np
from random import choices
from codenames.clue_givers.giver import Giver
from codenames.embedding_handler import EmbeddingHandler
from codenames.utils.game_utils import Clue, DEFAULT_NUM_CLUES, UNREVEALED, GOOD, BAD, CIVILIAN, ASSASSIN, DEFAULT_NUM_TARGETS, CIVILIAN_PENALTY, ASSASSIN_PENALTY, MULTIGROUP_PENALTY
class HeuristicGiver(Giver):
def __init__(self,
embedding_handler: EmbeddingHandler,
blacklist: List[str] = [],
current_board: List[str] = None):
self.embedding_handler = embedding_handler
self.blacklist = blacklist
self.current_board = current_board
''' Returns list of n=NUM_CLUES of Clues for a given group of words'''
def _get_clues(self, pos_words_subset, neg_words, civ_words, ass_words, aggressive, num_clues=DEFAULT_NUM_CLUES,MULTIGROUP_PENALTY=MULTIGROUP_PENALTY):
clues = []
count = len(pos_words_subset) - 1
pos_words_vectors = self.embedding_handler.embed_words_list(pos_words_subset)
neg_words_vectors = self.embedding_handler.embed_words_list(neg_words)
civ_words_vectors = self.embedding_handler.embed_words_list(civ_words)
ass_words_vectors = self.embedding_handler.embed_words_list(ass_words)
if pos_words_vectors is None or neg_words_vectors is None:
return None
mean_vector = pos_words_vectors.mean(axis=0)
mean_vector /= np.sqrt(mean_vector.dot(mean_vector))
dotproducts = np.dot(self.embedding_handler.embedding_weights, mean_vector).reshape(-1)
closest = np.argsort(dotproducts)[::-1]
'''Skew 'good clues' towards larger groups of target words'''
if aggressive:
if count <= 1:
MULTIGROUP_PENALTY += .1
elif count <= 3:
MULTIGROUP_PENALTY += .4
else:
MULTIGROUP_PENALTY += .7
for i in range(num_clues):
clue_index = closest[i]
clue_word = self.embedding_handler.index_to_word[clue_index]
clue_vector = self.embedding_handler.get_embedding_by_index(clue_index)
if clue_word in pos_words_subset:
continue
clue_pos_words_similarities = np.dot(pos_words_vectors, clue_vector)
clue_neg_words_similarities= np.dot(neg_words_vectors, clue_vector)
min_clue_cosine = np.min(clue_pos_words_similarities) + MULTIGROUP_PENALTY
#logging.info('potential clue : {}'.format(clue_word))
max_neg_cosine = np.max(clue_neg_words_similarities)
if max_neg_cosine >= min_clue_cosine:
continue
if civ_words_vectors is not None:
clue_civ_words_similarities = np.dot(civ_words_vectors, clue_vector)
max_civ_cosine = np.max(clue_civ_words_similarities)
if max_civ_cosine >= min_clue_cosine - CIVILIAN_PENALTY:
continue
if ass_words_vectors is not None:
max_ass_cosine = np.dot(ass_words_vectors,clue_vector)
if max_ass_cosine >= min_clue_cosine - ASSASSIN_PENALTY:
continue
clues.append((Clue(clue_word, pos_words_subset, count),
np.mean(clue_pos_words_similarities)))
if len(clues) == 0:
clue_index = closest[0]
clue_word = self.embedding_handler.index_to_word[clue_index]
clues.append((Clue(clue_word,pos_words_subset,count),np.mean(clue_pos_words_similarities)))
return clues
'''List of Clues sorted by descending Cosine distance'''
def get_next_clue(self,
board: List[str],
allIDs: List[int],
game_state: List[int],
score: int):
if self.current_board != board:
self.blacklist = []
self.current_board = board
pos_words = [board[idx] for idx, val in enumerate(allIDs) if val == GOOD]
neg_words = [board[idx] for idx, val in enumerate(allIDs) if val == BAD]
civ_words = [board[idx] for idx, val in enumerate(allIDs) if val == CIVILIAN]
ass_words = [board[idx] for idx, val in enumerate(allIDs) if val == ASSASSIN]
available_targets = [word for word in pos_words if
game_state[board.index(word)] == UNREVEALED]
available_neg = [word for word in neg_words if
game_state[board.index(word)] == UNREVEALED]
available_civ = [word for word in civ_words if
game_state[board.index(word)] == UNREVEALED]
available_ass = [word for word in ass_words if
game_state[board.index(word)] == UNREVEALED]
num_revealed = 0
for idx, value in enumerate(game_state):
if value == -1:
num_revealed += 1
if num_revealed > len(game_state) / 2 and score < num_revealed:
aggressive = True
else:
aggressive = False
if len(available_targets) > DEFAULT_NUM_TARGETS:
num_words = DEFAULT_NUM_TARGETS
else:
num_words = len(available_targets)
clues_by_group = []
for count in range(num_words, 0, -1):
for group in combinations(range(num_words), count):
target_group = [available_targets[i] for i in group]
clues_for_group = self._get_clues(target_group, available_neg, available_civ, available_ass, aggressive)
if clues_for_group is not None:
clues_by_group.append(self._get_clues(target_group, available_neg, available_civ, available_ass, aggressive))
if len(clues_by_group) == 0:
options = choices(list(self.embedding_handler.word_indices.keys()), k=3)
for option in options:
clues_by_group.append(Clue(clue_word=option, intended_board_words=[], count=0))
else:
clues_by_group = list(chain.from_iterable(clues_by_group))
clues_by_group.sort(key=operator.itemgetter(1),reverse=True)
clues_by_group = [clue[0] for clue in clues_by_group]
filtered_clues_by_group = [clue for clue in clues_by_group if clue.clue_word not in self.blacklist]
self.blacklist.append(filtered_clues_by_group[0].clue_word)
return filtered_clues_by_group
| codenames-master | codenames/clue_givers/heuristic_giver.py |
from overrides import overrides
from gensim.test.utils import datapath, get_tmpfile
from gensim.models import KeyedVectors
from itertools import combinations, chain, permutations
from codenames.clue_givers.giver import Giver
import numpy as np
from codenames.utils.game_utils import get_available_choices, Clue
import operator
from typing import List
import logging
from nltk.corpus import wordnet as wn
class WordnetClueGiver(Giver):
'''
Clue giver is initialized with the board and the IDs of cards
'''
def __init__(self,
NUM_CLUES: int=10):
#self.board = board
super().__init__()#board, allIDs)
#positive words
#self.pos_words = [board[idx] for idx, val in enumerate(allIDs) if val == 1]
#negative words
# self.neg_words = [word for word in board if word not in self.pos_words]
#minimum number of clues we want our get_clues method to generate
self.NUM_CLUES = NUM_CLUES
'''
Internal method, used to get clues for a given group of pos and neg words.
Parameters
group : the list of positive words still in play
neg_words: the list of negative words still in play
num_clues: the minimum number of clues to generate. Default = 10
epsilon: the weight we want put in the scoring function. Scoring function considers
both similarity and number of cards in a clue. Epsilon is the weight given to the
number of clues, and (1-Epsilon) is the weight given to the similarity of the clue word.
Default = 0.8
'''
def _get_clues(self, group, neg_words, epsilon=0.8, safe_play=True):
clues = []
if safe_play:
# to play safe, just target one word at a time
num = 1
else:
#num starts off trying to give a hint for all positive words then moves down
num = len(group)
#potential permutations of positive cards of size num
potentials = list(permutations(group, num))
#placeholder variable for the location we are in our list of potential groups
place_in_potentials = 0
done = False
while not done:
#grabs potential group of positive clues
pot_clue_group = potentials[place_in_potentials]
#synsets are the positive card synsets. Restricted to the case where the first
#lemma is the seed word, otherwise we get weird clues.
synsets = []
for word in pot_clue_group:
curr_comm_synsets = wn.synsets(word)
curr_comm_synsets = [t for t in curr_comm_synsets if t.lemmas()[0].name() == word]
synsets.append(curr_comm_synsets)
#neg_synsets are the wordnet synsets for negative cards on the board
neg_synsets = []
for word in neg_words:
curr_comm_synsets = wn.synsets(word)
curr_comm_synsets = [t for t in curr_comm_synsets if t.lemmas()[0].name() == word]
neg_synsets.extend(curr_comm_synsets)
#dictionary variable, used to capture the score of a potential clue
clue_scorer = {}
#list used to store common synsets
common_synsets = []
pos1 = 0
pos2 = 0
#we need to iterate through synsets in two stages since we can only get pairwise comparisons
for synsetgroup1 in synsets:
common_synsets.append([])
pos1 = 0
for synset1 in synsetgroup1:
pos1 += 1
for synsetgroup2 in synsets:
pos2 = 0
curr_comm_synsets = []
for synset2 in synsetgroup2:
pos2 += 1
#if we aren't doing single word clues, we don't want to compare the same synsets
if synset1 == synset2 and num > 1:
continue
#the current common synsets for these two instances of synsets
curr_comm = synset1.common_hypernyms(synset2)
for common_ancester in curr_comm:
is_neg = False
#limits out synsets which are in negative word's hierarchy
for neg_synset in neg_synsets:
if common_ancester in neg_synset.common_hypernyms(neg_synset) or common_ancester == neg_synset:
is_neg = True
if not is_neg:
#gets similarity between clue + synset1
len1 = common_ancester.path_similarity(synset1)
curr_comm_synsets.append(common_ancester)
#adds synset to dict if not already there
if common_ancester not in clue_scorer:
clue_scorer[common_ancester] = []
#adds the score for the synset to the dict, according to scroing function
clue_scorer[common_ancester].append((1-epsilon) * (len(synsetgroup1) - pos1) * len1 + (epsilon * num))
#want to do for second synset if not the same
if synset1 != synset2:
len2 = common_ancester.path_similarity(synset2)
clue_scorer[common_ancester].append((1-epsilon) * (len(synsetgroup2) - pos2) * len2 + (epsilon * num))
common_synsets[-1].extend(curr_comm_synsets)
#now need to determine the intersection among the pairs of similar words so we can be sure that a clue
#applies to all words
if len(common_synsets) < 1:
continue
intersect = set(common_synsets[0])
for s in common_synsets[1:]:
intersect.intersection_update(s)
#if this is true, we found a clue
if len(intersect) != 0:
#for each clue in the set,
for clue in intersect:
#Want to go through all lemmas since those are ways of expressing a synset
for lemma in clue.lemmas():
#filters out multi-words
if "_" not in lemma.name() and lemma.name() not in group:
#appends clue along with normalized score
clues.append((Clue(lemma.name(), pot_clue_group, num-1), float(np.sum(clue_scorer[clue])) / float(len(clue_scorer[clue]))))
#advances to the next group of num words
place_in_potentials += 1
#if we need to decrement num and get new permutations
if place_in_potentials >= len(potentials):
num -= 1
#break conditions, when we've tried all combos or reached our minimum
if num < 1 or len(clues) >= self.NUM_CLUES:
break
#if we don't break, we get new permutations and reset our place
potentials = list(permutations(group, num))
place_in_potentials = 0
return clues
'''
External method to get_next_clue, which gets a list of clues, orders them by similarity, and returns them.
Params:
game_state: List of ints representing the current state of play
score: score of the game
epsilon: optional epsilon param, default will be equal to the score scaled to the range 0-1.
Intuitively, this means if the score is higher, we will take more risks and put more focus on
multi-word clues.
Returns:
all_clues, a list of Clue type.
'''
def get_next_clue(self, board: List[str],
allIDs: List[int],
game_state: List[int],
score: int,
epsilon: float=-1.0):
pos_words = [board[idx] for idx, val in enumerate(allIDs) if val == 1]
# negative words
neg_words = [board[idx] for idx, val in enumerate(allIDs) if val != 1]
available_targets = [word for word in pos_words if game_state[board.index(word)] == -1]
active_neg_words = [word for word in neg_words if game_state[board.index(word)] == -1]
logging.info(available_targets)
#scales epsilon based on score
if epsilon == -1.0:
epsilon = (score - -8.0) / (9.0 - -8.0)
#Gets clues
all_clues = self._get_clues(available_targets, active_neg_words)
#sorts by scoring function
all_clues.sort(key=operator.itemgetter(1))
all_clues = list(reversed(all_clues))
all_clues = [clue[0] for clue in all_clues]
return all_clues[:self.NUM_CLUES]
| codenames-master | codenames/clue_givers/wordnet_cluegiver.py |
from gensim.models import KeyedVectors
from itertools import combinations, chain
from codenames.embedding_handler import EmbeddingHandler
from codenames.clue_givers.giver import Giver
import numpy as np
from codenames.utils.game_utils import Clue
import operator
from typing import List
import logging
from sklearn.metrics.pairwise import cosine_similarity
class HeuristicGiver2(Giver):
def __init__(self, board: [str],
allIDs: List[int],
embeddinghandler: EmbeddingHandler,
NUM_CLUES: int=50):
super().__init__(board, allIDs)
self.assassin = [board[idx] for idx, val in enumerate(allIDs) if val == 0]
self.pos_words = [board[idx] for idx, val in enumerate(allIDs) if val == 1]
self.neg_words = [board[idx] for idx, val in enumerate(allIDs) if val == 2]
self.civilians = [board[idx] for idx, val in enumerate(allIDs) if val == 3]
self.embedding_handler = embeddinghandler
self.NUM_CLUES = NUM_CLUES
''' Returns list of n=NUM_CLUES of Clues for a given group of words'''
def _get_clues(self, group, assassin, neg_words, civilians, aggressive, NUM_CLUES):
clues = []
count = len(group)
clue_indices = [self.embedding_handler.vocab[word].index for word in group]
clue_vectors = self.embedding_handler.syn0[clue_indices]
neg_indices = [self.embedding_handler.vocab[word].index for word in neg_words]
neg_vectors = self.embedding_handler.syn0[neg_indices]
civilian_indices = [self.embedding_handler.vocab[word].index for word in civilians]
civilian_vectors = self.embedding_handler.syn0[civilian_indices]
assassin_indices = [self.embedding_handler.vocab[word].index for word in assassin]
assassin_vectors = self.embedding_handler.syn0[assassin_indices]
mean_vector = clue_vectors.mean(axis=0)
mean_vector /= np.sqrt(mean_vector.dot(mean_vector))
cosines = cosine_similarity(self.embedding_handler.syn0, mean_vector.reshape(1,-1)).flatten()
closest = np.argsort(cosines)[::-1]
civilian_penalty = .05
assassin_penalty = .08
'''Skew 'good clues' towards larger groups of target words'''
if aggressive and count > 1 and count < 3:
multigroup_penalty = .4
elif aggressive and count > 2:
multigroup_penalty = .8
else:
multigroup_penalty = 0
for i in range(NUM_CLUES):
clue = None
clue_index = closest[i]
clue = self.embedding_handler.index2word[clue_index]
if clue.lower() in group:
continue
clue_vector = self.embedding_handler.syn0[clue_index].reshape(1,-1)
clue_cosine = cosine_similarity(clue_vectors, clue_vector).flatten()
min_clue_cosine = np.min(clue_cosine) + multigroup_penalty
if neg_words:
neg_cosine = cosine_similarity(neg_vectors, clue_vector)
max_neg_cosine = np.max(neg_cosine)
if max_neg_cosine >= min_clue_cosine:
continue
if civilians:
civilian_cosine = cosine_similarity(civilian_vectors, clue_vector).flatten()
max_civ_cosine = np.max(civilian_cosine)
if max_civ_cosine >= min_clue_cosine - civilian_penalty:
continue
if assassin:
assassin_cosine = cosine_similarity(assassin_vectors, clue_vector).flatten()
max_ass_cosine = np.max(assassin_cosine)
if max_ass_cosine >= min_clue_cosine- assassin_penalty:
continue
clues.append((Clue(clue.lower(), group, count),min_clue_cosine))
return clues
def _unique_clues(self, ordered_list_of_clues):
clue_words = []
list_of_clues = []
for clue in ordered_list_of_clues:
if clue[0].clue_word not in clue_words:
clue_words.append(clue[0].clue_word)
list_of_clues.append(clue)
return list_of_clues
'''List of Clues sorted by descending Cosine distance'''
def get_next_clue(self, game_state: List[int],
score: int):
available_targets = [word for word in self.pos_words if game_state[self.board.tolist().index(word)] == -1]
available_civilians = [word for word in self.civilians if game_state[self.board.tolist().index(word)] == -1]
available_neg_words = [word for word in self.neg_words if game_state[self.board.tolist().index(word)] == -1]
available_assassins = [word for word in self.assassin if game_state[self.board.tolist().index(word)] == -1]
num_revealed = 0
for idx, value in enumerate(game_state):
if value == -1:
num_revealed += 1
if num_revealed > len(game_state)/2 and score < num_revealed:
aggressive = True
else:
aggressive = False
all_clues = []
num_words = len(available_targets)
for count in range(num_words, 0, -1):
for group in combinations(range(num_words),count):
logging.info(group, self.neg_words)
target_group = [available_targets[i] for i in group]
all_clues.append(self._get_clues(available_targets, available_assassins, available_neg_words, available_civilians, aggressive, self.NUM_CLUES))
all_clues = list(chain.from_iterable(all_clues))
all_clues.sort(key=operator.itemgetter(1),reverse=True)
all_clues = self._unique_clues(all_clues)
all_clues = [clue[0] for clue in all_clues]
return all_clues
def main():
test_embed = KeyedVectors.load_word2vec_format('~/Downloads/GoogleNews-vectors-negative300-SLIM.bin',binary=True)
test_board = ["water", "notebook", "board", "boy", "shoe", "cat", "pear", "sandwich","chair","pants","phone","internet"]
test_allIDs = [1, 2, 2, 1, 3, 1, 2, 3,1,1,0,1]
cg = HeuristicGiver(test_board, test_allIDs, test_embed)
if __name__ == "__main__":
main()
| codenames-master | codenames/clue_givers/heuristic_giver2.py |
from typing import Optional, Dict, List, Tuple
import csv
import random
import spacy
import torch
import tqdm
# Load the spacy model
nlp = spacy.load('en_core_web_sm')
# Just some type aliases to make things cleaner
RawRow = List[str]
ProcessedRow = Tuple[str, List[str]]
def process(row: RawRow) -> ProcessedRow:
"""
TODO: implement
row is [category, text]
want to return (category, list_of_tokens)
use spacy ("nlp") to tokenize text, and then
use token.text to get just the string tokens.
"""
pass
def load_data(filename: str) -> List[ProcessedRow]:
"""
TODO: implement
Read in the file, and use `process` on each line.
Make sure to use csv.reader!
"""
pass
# TODO: Load training data, validation data, and test data
training_data = ...
validation_data = ...
test_data = ...
# TODO: construct mappings idx_to_word and word_to_idx
# Hint: use a set to get unique words, then use `enumerate`
# to get a mapping word <-> index
idx_to_word = ...
word_to_idx = ...
# TODO: construct mapping idx_to_label and label_to_idx
idx_to_label = ...
label_to_idx = ...
class Model(torch.nn.Module):
def __init__(self,
word_to_idx: Dict[str, int],
label_to_idx: Dict[str, int],
embedding_dim: int = 100) -> None:
super().__init__()
# TODO: store passed in parameters
# TODO: create a torch.nn.Embedding layer.
# need to specify num_embeddings and embedding_dim
# TODO: create a torch.nn.Linear layer
# need to specify in_features and out_features
# TODO: create a loss function that's just torch.nn.CrossEntropyLoss
def forward(self,
tokens: List[str],
label: Optional[str] = None) -> Dict[str, torch.Tensor]:
# TODO: convert the tokens to a tensor of word indices
# TODO: use the embedding layer to embed the tokens
# TODO: take the mean of the embeddings along dimension 0 (sequence length)
# TODO: pass the encoding through the linear layer to get logits
if label is not None:
# TODO: find the corresponding label_id and stick it in a 1-D tensor
# TODO: use .unsqueeze(0) to add a batch dimension to the logits
# TODO: compute the loss
pass
# TODO: return a dict with the logits and (if we have it) the loss
pass
NUM_EPOCHS = 100
# instantiate the model
model = Model(word_to_idx, label_to_idx, 100)
# instantiate an optimizer
optimizer = torch.optim.Adagrad(model.parameters())
for epoch in range(NUM_EPOCHS):
print(f"epoch {epoch}")
# shuffle the training data
random.shuffle(training_data)
epoch_loss = 0.0
num_correct = 0
num_seen = 0
with tqdm.tqdm(training_data) as it:
# Set the model in train mode
model.train()
for label, text in it:
# TODO: zero out the gradients
# TODO: call the model on the inputs
# TODO: pull the loss out of the output
# TODO: add loss.item() to epoch_loss
# TODO: call .backward on the loss
# TODO: step the optimizer
# TODO: get the (actual) label_id and the predicted label_id
# hint: use torch.argmax for the second
# TODO: update num_seen and num_correct
# TODO: compute accuracy
# TODO: add accuracy and loss to the tqdm description
it.set_description(f"")
# Compute validation accuracy
# TODO: set the model to .eval() mode
# set num_correct and num_seen to 0
num_correct = 0
num_seen = 0
validation_loss = 0.0
with tqdm.tqdm(validation_data) as it:
for label, text in it:
# TODO: call the model on the inputs
# TODO: compute the actual label_id and the predicted label_id
# TODO: increment counters
# TODO: add accuracy and loss to the tqdm description
it.set_description(f"")
# TODO: evaluate accuracy on test dataset
| aiconf-allennlp-tutorial-master | by_hand.py |
import csv
import pathlib
import os
import re
import tqdm
DATA_ROOT = pathlib.Path("data")
BBC_ROOT = DATA_ROOT / 'bbc'
train = []
validate = []
test = []
for category in os.listdir(BBC_ROOT):
path = BBC_ROOT / category
if os.path.isdir(path):
for fn in os.listdir(path):
with open(path / fn, errors="ignore") as f:
text = f.read()
lines = text.split("\n")
lines = [line for line in lines if line]
lines = lines[:2]
text = "\n\n".join(lines)
number = int(fn[:3])
if number % 5 < 3:
train.append((category, text))
elif number % 5 == 3:
validate.append((category, text))
else:
test.append((category, text))
for fn, instances in [(DATA_ROOT / 'bbc-train.csv', train),
(DATA_ROOT / 'bbc-validate.csv', validate),
(DATA_ROOT / 'bbc-test.csv', test)]:
print(fn)
with open(fn, 'wt') as f:
writer = csv.writer(f)
for row in tqdm.tqdm(instances):
writer.writerow(row)
| aiconf-allennlp-tutorial-master | process_data.py |
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors import Predictor
from aiconf.reader import BBCReader
@Predictor.register("bbc")
class BBCPredictor(Predictor):
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
# 1. we expect that the json_dict has a "text" field and possibly
# a "category" field, so extract those values
# 2. every predictor has a self._dataset_reader, so just use
# text_to_instance from there to return an instance
return Instance({})
| aiconf-allennlp-tutorial-master | aiconf/predictor.py |
import pathlib
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from aiconf.reader import BBCReader
from aiconf.model import BBCModel
FIXTURES_ROOT = pathlib.Path(__file__).parent / 'fixtures'
class PaperModelTest(ModelTestCase):
def setUp(self):
super().setUp()
# 1. call self.set_up_model with the path to the experiment config
# and the path to the test fixture
def test_simple_tagger_can_train_save_and_load(self):
# self.ensure_model_can_train_save_and_load(self.param_file)
pass
def test_forward_pass_runs_correctly(self):
# feel free to add extra tests here
pass
| aiconf-allennlp-tutorial-master | aiconf/model_test.py |
from aiconf.reader import BBCReader
from aiconf.model import BBCModel
from aiconf.predictor import BBCPredictor
| aiconf-allennlp-tutorial-master | aiconf/__init__.py |
from typing import Dict, Optional
from allennlp.data.vocabulary import Vocabulary
from allennlp.models import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy
import torch
@Model.register("bbc")
class BBCModel(Model):
def __init__(self,
vocab: Vocabulary) -> None:
super().__init__(vocab)
# 1. We want the constructor to accept a TextFieldEmbedder
# and we need to save it as a class variable.
# 2. We want the constructor to accept a Seq2VecEncoder
# and we need to save it as a class variable
# 3. We need to construct the final linear layer, it should have
# in_features = the output dimension of the Seq2VecEncoder
# out_features = the number of classes we're predicting
# We can get the latter from the "labels" namespace of the vocabulary
# 4. We also need to instantiate a loss function for our model.
# Here we'll just use PyTorch's built in cross-entropy loss
# https://pytorch.org/docs/stable/nn.html#crossentropyloss
# 5. Finally, we want to track some metrics as we train, at the very
# least, categorical accuracy:
# https://allenai.github.io/allennlp-docs/api/allennlp.training.metrics.html#categorical-accuracy
# store them in a dictionary so that `self.get_metrics` works correctly.
def forward(self) -> Dict[str, torch.Tensor]:
# Our forward function needs to take arguments that correspond
# to the fields of our instance.
# 1. In this case we'll always have a "text" input
# 2. and we'll sometimes have a "category" input
# (so it should have a None default value for when we're doing inference)
# 3. our first step should be to apply our TextFieldEmbedder to the text input
# 4. then we should apply our Seq2VecEncoder to the embedded text
# We'll need to provide a _mask_ which we can get from util.get_text_field_mask
# 5. we can then apply apply our linear layer to the encoded text to get
# the logits corresponding to the predicted class probabilities
# 6. our outputs need to be in a dict, so create one that contains the logits
# 7. then, only if a `category` was provided,
# 7a. compute the loss and add it to the output
# 7b. update all the metrics
# 8. finally, return the output
return {}
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {name: metric.get_metric(reset)
for name, metric in self.metrics.items()}
| aiconf-allennlp-tutorial-master | aiconf/model.py |
import pathlib
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.token_indexers import SingleIdTokenIndexer
from aiconf.reader import BBCReader
FIXTURES_ROOT = pathlib.Path(__file__).parent / 'fixtures'
class ReaderTest(AllenNlpTestCase):
def test_reader(self):
token_indexers = {"tokens": SingleIdTokenIndexer()}
reader = BBCReader(token_indexers)
instances = reader.read(str(FIXTURES_ROOT / 'tiny.csv'))
# Some ideas of things to test:
# * test that there are 5 instances
# * test that there's one of each label
# * test that the first instance has the right values in its fields
| aiconf-allennlp-tutorial-master | aiconf/reader_test.py |
from typing import Iterable, Dict, Optional
import gzip
import csv
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.fields import TextField, LabelField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
@DatasetReader.register("bbc")
class BBCReader(DatasetReader):
def __init__(self,
token_indexers: Dict[str, TokenIndexer],
tokenizer: Tokenizer = WordTokenizer()) -> None:
super().__init__()
self.token_indexers = token_indexers
self.tokenizer = tokenizer
def text_to_instance(self, text: str, category: Optional[str] = None) -> Instance:
"""
1. tokenize text
2. create a TextField for the text
3. create a LabelField for the category (if provided)
4. return an Instance
"""
return Instance(fields={})
def _read(self, file_path: str) -> Iterable[Instance]:
"""
Here our data is a csv with rows [category, text], so we want to
1. read the csv file
2. pass the fields to text_to_instance
3. yield the instances
"""
yield Instance({})
| aiconf-allennlp-tutorial-master | aiconf/reader.py |
aiconf-allennlp-tutorial-master | aiconf/predictor_test.py |
|
"""
In order to create a package for pypi, you need to follow several steps.
1. Create a .pypirc in your home directory. It should look like this:
```
[distutils]
index-servers =
pypi
pypitest
[pypi]
repository=https://pypi.python.org/pypi
username=deep-qa
password= Get the password from LastPass.
[pypitest]
repository=https://testpypi.python.org/pypi
username=deep-qa
password= Get the password from LastPass.
```
run chmod 600 ./pypirc so only you can read/write.
2. Update the RELEASE.md with the new features, bug fixes and api changes provided in this release.
3. Change the version in docs/conf.py and setup.py.
4. Commit these changes with the message: "Release: VERSION"
5. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
6. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level deep_qa directory.
(this will build a wheel for the python version you use to build it - make sure you use python 3.x).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions of deep_qa.
7. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi deep_qa
8. Upload the final version to actual pypi:
twine upload dist/* -r pypi
9. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
"""
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
setup(name='deep_qa',
version='0.1.1',
description='Using deep learning to answer Aristo\'s science questions',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='deep_qa NLP deep learning machine reading',
url='https://github.com/allenai/deep_qa',
author='Matt Gardner',
author_email='[email protected]',
license='Apache',
packages=find_packages(),
install_requires=[
'keras==2.0.5',
'tensorflow>=1.0.1', # If you are using GPUs, you will need to install tensorflow-gpu.
'h5py',
'scikit-learn',
'grpcio',
'grpcio-tools',
'pyhocon',
'dill',
'typing',
'numpy',
'matplotlib',
'spacy',
'nltk',
'overrides'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
include_package_data=True,
zip_safe=False)
| deep_qa-master | setup.py |
from typing import Dict, List, Tuple, Union
import sys
import logging
import os
import json
from copy import deepcopy
import random
import pyhocon
import numpy
# pylint: disable=wrong-import-position
from .common.params import Params, replace_none, ConfigurationError
from .common.tee_logger import TeeLogger
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def prepare_environment(params: Union[Params, dict]):
"""
Sets random seeds for reproducible experiments. This may not work as expected
if you use this from within a python project in which you have already imported Keras.
If you use the scripts/run_model.py entry point to training models with this library,
your experiments should be reproducible. If you are using this from your own project,
you will want to call this function before importing Keras.
Parameters
----------
params: Params object or dict, required.
A ``Params`` object or dict holding the json parameters.
"""
seed = params.pop("random_seed", 13370)
numpy_seed = params.pop("numpy_seed", 1337)
if "keras" in sys.modules:
logger.warning("You have already imported Keras in your code. If you are using DeepQA"
"functionality to set random seeds, they will have no effect, as code"
"prior to calling this function will be non-deterministic. We will not"
"the random seed here.")
seed = None
numpy_seed = None
if seed is not None:
random.seed(seed)
if numpy_seed is not None:
numpy.random.seed(numpy_seed)
from deep_qa.common.checks import log_keras_version_info
log_keras_version_info()
def run_model_from_file(param_path: str):
"""
A wrapper around the run_model function which loads json from a file.
Parameters
----------
param_path: str, required.
A json paramter file specifying a DeepQA model.
"""
param_dict = pyhocon.ConfigFactory.parse_file(param_path)
run_model(param_dict)
def run_model(param_dict: Dict[str, any], model_class=None):
"""
This function is the normal entry point to DeepQA. Use this to run a DeepQA model in
your project. Note that if you care about exactly reproducible experiments,
you should avoid importing Keras before you import and use this function, as
Keras relies on random seeds which can be set in this function via a
JSON specification file.
Note that this function performs training and will also evaluate the trained
model on development and test sets if provided in the parameter json.
Parameters
----------
param_dict: Dict[str, any], required.
A parameter file specifying a DeepQaModel.
model_class: DeepQaModel, optional (default=None).
This option is useful if you have implemented a new model class which
is not one of the ones implemented in this library.
"""
params = Params(replace_none(param_dict))
prepare_environment(params)
# These have to be imported _after_ we set the random seed,
# because keras uses the numpy random seed.
from deep_qa.models import concrete_models
import tensorflow
from keras import backend as K
log_dir = params.get("model_serialization_prefix", None) # pylint: disable=no-member
if log_dir is not None:
sys.stdout = TeeLogger(log_dir + "_stdout.log", sys.stdout)
sys.stderr = TeeLogger(log_dir + "_stderr.log", sys.stderr)
handler = logging.FileHandler(log_dir + "_python_logging.log")
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s'))
logging.getLogger().addHandler(handler)
serialisation_params = deepcopy(params).as_dict(quiet=True)
with open(log_dir + "_model_params.json", "w") as param_file:
json.dump(serialisation_params, param_file)
num_threads = os.environ.get('OMP_NUM_THREADS')
config = {
"allow_soft_placement": True,
"log_device_placement": params.pop("log_device_placement", False)
}
if num_threads is not None:
config["intra_op_parallelism_threads"] = int(num_threads)
global_session = tensorflow.Session(config=tensorflow.ConfigProto(**config))
K.set_session(global_session)
if model_class is None:
model_type = params.pop_choice('model_class', concrete_models.keys())
model_class = concrete_models[model_type]
else:
if params.pop('model_class', None) is not None:
raise ConfigurationError("You have specified a local model class and passed a model_class argument"
"in the json specification. These options are mutually exclusive.")
model = model_class(params)
if model.can_train():
logger.info("Training model")
model.train()
K.clear_session()
else:
raise ConfigurationError("The supplied model does not have enough training inputs.")
def load_model(param_path: str, model_class=None):
"""
Loads and returns a model.
Parameters
----------
param_path: str, required
A json file specifying a DeepQaModel.
model_class: DeepQaModel, optional (default=None)
This option is useful if you have implemented a new model
class which is not one of the ones implemented in this library.
Returns
-------
A ``DeepQaModel`` instance.
"""
logger.info("Loading model from parameter file: %s", param_path)
param_dict = pyhocon.ConfigFactory.parse_file(param_path)
params = Params(replace_none(param_dict))
prepare_environment(params)
from deep_qa.models import concrete_models
if model_class is None:
model_type = params.pop_choice('model_class', concrete_models.keys())
model_class = concrete_models[model_type]
else:
if params.pop('model_class', None) is not None:
raise ConfigurationError("You have specified a local model class and passed a model_class argument"
"in the json specification. These options are mutually exclusive.")
model = model_class(params)
model.load_model()
return model
def score_dataset(param_path: str, dataset_files: List[str], model_class=None):
"""
Loads a model from a saved parameter path and scores a dataset with it, returning the
predictions.
Parameters
----------
param_path: str, required
A json file specifying a DeepQaModel.
dataset_files: List[str]
A list of dataset files to score, the same as you would have specified as ``train_files``
or ``test_files`` in your parameter file.
model_class: DeepQaModel, optional (default=None)
This option is useful if you have implemented a new model class which
is not one of the ones implemented in this library.
Returns
-------
predictions: numpy.array
Numpy array of model predictions in the format of model.outputs (typically one array, but
could be List[numpy.array] if your model has multiple outputs).
labels: numpy.array
The labels on the dataset, as read by the model. We return this so you can compute
whatever metrics you want, if the data was labeled.
"""
model = load_model(param_path, model_class=model_class)
dataset = model.load_dataset_from_files(dataset_files)
return model.score_dataset(dataset)
def evaluate_model(param_path: str, dataset_files: List[str]=None, model_class=None):
"""
Loads a model and evaluates it on some test set.
Parameters
----------
param_path: str, required
A json file specifying a DeepQaModel.
dataset_files: List[str], optional, (default=None)
A list of dataset files to evaluate on. If this is ``None``, we'll evaluate from the
``test_files`` parameter in the input files. If that's also ``None``, we'll crash.
model_class: DeepQaModel, optional (default=None)
This option is useful if you have implemented a new model class which
is not one of the ones implemented in this library.
Returns
-------
Numpy arrays of model predictions in the format of model.outputs.
"""
model = load_model(param_path, model_class=model_class)
if dataset_files is None:
dataset_files = model.test_files
model.evaluate_model(dataset_files)
def score_dataset_with_ensemble(param_paths: List[str],
dataset_files: List[str],
model_class=None) -> Tuple[numpy.array, numpy.array]:
"""
Loads all of the models specified in ``param_paths``, uses each of them to score the dataset
specified by ``dataset_files``, and averages their scores, return an array of ensembled model
predictions.
Parameters
----------
param_paths: List[str]
A list of parameter files that were used to train models. You must have already trained
the corresponding model, as we'll load it and use it in an ensemble here.
dataset_files: List[str]
A list of dataset files to score, the same as you would have specified as ``test_files`` in
any one of the model parameter files.
model_class: ``DeepQaModel``, optional (default=None)
This option is useful if you have implemented a new model class which is not one of the
ones implemented in this library.
Returns
-------
predictions: numpy.array
Numpy array of model predictions in the format of model.outputs (typically one array, but
could be List[numpy.array] if your model has multiple outputs).
labels: numpy.array
The labels on the dataset, as read by the first model. We return this so you can compute
whatever metrics you want, if the data was labeled. Note that if your models all represent
that data differently, this will only give the first one. Hopefully the representation of
the labels is consistent across the models, though; if not, the whole idea of ensembling
them this way is moot, anyway.
"""
models = [load_model(param_path, model_class) for param_path in param_paths]
predictions = []
labels_to_return = None
for i, model in enumerate(models):
logger.info("Scoring model %d of %d", i + 1, len(models))
dataset = model.load_dataset_from_files(dataset_files)
model_predictions, labels = model.score_dataset(dataset)
predictions.append(model_predictions)
if labels_to_return is None:
labels_to_return = labels
logger.info("Averaging model predictions")
all_predictions = numpy.stack(predictions)
averaged = numpy.mean(all_predictions, axis=0)
return averaged, labels_to_return
def compute_accuracy(predictions: numpy.array, labels: numpy.array):
"""
Computes a simple categorical accuracy metric, useful if you used ``score_dataset`` to get
predictions.
"""
accuracy = numpy.mean(numpy.equal(numpy.argmax(predictions, axis=-1),
numpy.argmax(labels, axis=-1)))
logger.info("Accuracy: %f", accuracy)
return accuracy
| deep_qa-master | deep_qa/run.py |
from .run import run_model, evaluate_model, load_model, score_dataset, score_dataset_with_ensemble
from .run import compute_accuracy, run_model_from_file
| deep_qa-master | deep_qa/__init__.py |
from keras import backend as K
from overrides import overrides
from .masked_layer import MaskedLayer
from ..tensors.backend import switch
class BiGRUIndexSelector(MaskedLayer):
"""
This Layer takes 3 inputs: a tensor of document indices, the seq2seq GRU output
over the document feeding it in forward, the seq2seq GRU output over the document
feeding it in backwards. It also takes one parameter, the word index whose
biGRU outputs we want to extract
Inputs:
- document indices: shape ``(batch_size, document_length)``
- forward GRU output: shape ``(batch_size, document_length, GRU hidden dim)``
- backward GRU output: shape ``(batch_size, document_length, GRU hidden dim)``
Output:
- GRU outputs at index: shape ``(batch_size, GRU hidden dim * 2)``
Parameters
----------
target_index : int
The word index to extract the forward and backward GRU output from.
"""
def __init__(self, target_index, **kwargs):
self.target_index = target_index
super(BiGRUIndexSelector, self).__init__(**kwargs)
@overrides
def compute_output_shape(self, input_shapes):
return (input_shapes[1][0], input_shapes[1][2]*2)
@overrides
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
return None
@overrides
def call(self, inputs, mask=None):
"""
Extract the GRU output for the target document index for the forward
and backwards GRU outputs, and then concatenate them. If the target word index
is at index l, and there are T total document words, the desired output
in the forward pass is at GRU_f[l] (ignoring the batched case) and the
desired output of the backwards pass is at GRU_b[T-l].
We need to get these two vectors and concatenate them. To do so, we'll
reverse the backwards GRU, which allows us to use the same index/mask for both.
"""
# TODO(nelson): deal with case where cloze token appears multiple times
# in a question.
word_indices, gru_f, gru_b = inputs
index_mask = K.cast(K.equal((K.ones_like(word_indices) * self.target_index),
word_indices), "float32")
gru_mask = K.repeat_elements(K.expand_dims(index_mask, -1), K.int_shape(gru_f)[-1], K.ndim(gru_f) - 1)
masked_gru_f = switch(gru_mask, gru_f, K.zeros_like(gru_f))
selected_gru_f = K.sum(masked_gru_f, axis=1)
masked_gru_b = switch(gru_mask, gru_b, K.zeros_like(gru_b))
selected_gru_b = K.sum(masked_gru_b, axis=1)
selected_bigru = K.concatenate([selected_gru_f, selected_gru_b], axis=-1)
return selected_bigru
@overrides
def get_config(self):
config = {'target_index': self.target_index}
base_config = super(BiGRUIndexSelector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| deep_qa-master | deep_qa/layers/bigru_index_selector.py |
from keras.layers import Layer
class MaskedLayer(Layer):
"""
Keras 2.0 allowed for arbitrary differences in arguments to the ``call`` method of ``Layers``.
As part of this, they removed the default ``mask=None`` argument, which means that if you want
to implement ``call`` with a mask, you need to disable a pylint warning. Instead of disabling
it in every single layer in our codebase, which could lead to uncaught errors, we'll have a
single place where we disable it, and have other layers inherit from this class.
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(MaskedLayer, self).__init__(**kwargs)
def call(self, inputs, mask=None): # pylint: disable=arguments-differ
raise NotImplementedError
| deep_qa-master | deep_qa/layers/masked_layer.py |
from keras import backend as K
from overrides import overrides
from .masked_layer import MaskedLayer
class VectorMatrixMerge(MaskedLayer):
"""
This ``Layer`` takes a tensor with ``K`` modes and a collection of other tensors with ``K - 1``
modes, and concatenates the lower-order tensors at the beginning of the higher-order tensor
along a given mode. We call this a vector-matrix merge to evoke the notion of appending
vectors onto a matrix, but this will also work with higher-order tensors.
For example, if you have a memory tensor of shape ``(batch_size, knowledge_length,
encoding_dim)``, containing ``knowledge_length`` encoded sentences, you could use this layer to
concatenate ``N`` individual encoded sentences with it, resulting in a tensor of shape
``(batch_size, N + knowledge_length, encoding_dim)``.
This layer supports masking - we will pass through whatever mask you have on the matrix, and
concatenate ones to it, similar to how to we concatenate the inputs. We need to know what axis
to do that concatenation on, though - we'll default to the input concatenation axis, but you
can specify a different one if you need to. We just ignore masks on the vectors, because doing
the right thing with masked vectors here is complicated. If you want to handle that later,
submit a PR.
This ``Layer`` is essentially the opposite of a
:class:`~deep_qa.layers.vector_matrix_split.VectorMatrixSplit`.
Parameters
----------
concat_axis: int
The axis to concatenate the vectors and matrix on.
mask_concat_axis: int, optional (default=None)
The axis to concatenate the masks on (defaults to ``concat_axis`` if ``None``)
"""
def __init__(self,
concat_axis: int,
mask_concat_axis: int=None,
propagate_mask: bool=True,
**kwargs):
self.concat_axis = concat_axis
self.mask_concat_axis = mask_concat_axis
self.propagate_mask = propagate_mask
super(VectorMatrixMerge, self).__init__(**kwargs)
@overrides
def call(self, inputs, mask=None):
# We need to reverse these here, so that the order is preserved when we roll out the
# concatenations.
vectors = inputs[:-1]
matrix = inputs[-1]
expanded_vectors = [K.expand_dims(vector, axis=self.concat_axis) for vector in vectors]
return K.concatenate(expanded_vectors + [matrix], axis=self.concat_axis)
@overrides
def compute_output_shape(self, input_shapes):
num_vectors = len(input_shapes) - 1
matrix_shape = input_shapes[-1]
new_shape = list(matrix_shape)
new_shape[self.concat_axis] += num_vectors
return tuple(new_shape)
@overrides
def compute_mask(self, inputs, mask=None):
if mask is None or all(m is None for m in mask) or not self.propagate_mask:
return None
mask_concat_axis = self.mask_concat_axis
if mask_concat_axis is None:
mask_concat_axis = self.concat_axis
if mask_concat_axis < 0:
mask_concat_axis %= K.ndim(inputs[-1])
num_vectors = len(mask) - 1
matrix_mask = mask[-1]
if mask_concat_axis >= K.ndim(matrix_mask):
# This means we're concatenating along an axis in the tensor that is greater than the
# number of dimensions in the mask. E.g., we're adding a single pre-computed feature
# to a word embedding (if it was multiple features, you'd already have evenly shaped
# tensors, so you could just use a Concatenate layer). In this case, we take all of
# the masks, assume they have the same shape, and compute K.any() with them.
masks = [matrix_mask] + [m for m in mask[:-1] if m is not None]
shapes = set([K.int_shape(m) for m in masks])
assert len(shapes) == 1, "Can't compute mask with uneven shapes: " + shapes
expanded_masks = [K.expand_dims(m, axis=-1) for m in masks]
concated_masks = K.concatenate(expanded_masks, axis=-1)
return K.any(concated_masks, axis=-1)
vector_masks = []
for i in range(num_vectors):
vector_mask = mask[i]
if vector_mask is None:
vector_mask_template = K.sum(K.cast(matrix_mask, 'uint8'), axis=mask_concat_axis)
vector_mask = K.cast(K.ones_like(vector_mask_template), 'bool')
vector_masks.append(K.expand_dims(vector_mask, axis=mask_concat_axis))
return K.concatenate(vector_masks + [matrix_mask], axis=mask_concat_axis)
@overrides
def get_config(self):
base_config = super(VectorMatrixMerge, self).get_config()
config = {
'concat_axis': self.concat_axis,
'mask_concat_axis': self.mask_concat_axis,
'propagate_mask': self.propagate_mask,
}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/vector_matrix_merge.py |
from keras import backend as K
from overrides import overrides
from deep_qa.layers.masked_layer import MaskedLayer
from deep_qa.tensors.backend import VERY_LARGE_NUMBER
class SubtractMinimum(MaskedLayer):
'''
This layer is used to normalize across a tensor axis. Normalization is done by finding the
minimum value across the specified axis, and then subtracting that value from all values
(again, across the spcified axis). Note that this also works just fine if you want to find the
minimum across more than one axis.
Inputs:
- A tensor with arbitrary dimension, and a mask of the same shape (currently doesn't
support masks with other shapes).
Output:
- The same tensor, with the minimum across one (or more) of the dimensions subtracted.
Parameters
----------
axis: int
The axis (or axes) across which to find the minimum. Can be a single int, a list of ints,
or None. We just call `K.min` with this parameter, so anything that's valid there works
here too.
'''
def __init__(self, axis: int, **kwargs):
self.axis = axis
super(SubtractMinimum, self).__init__(**kwargs)
@overrides
def compute_output_shape(self, input_shape): # pylint: disable=no-self-use
return input_shape
@overrides
def compute_mask(self, inputs, mask=None):
return mask
@overrides
def call(self, inputs, mask=None):
if mask is not None:
mask_value = False if K.dtype(mask) == 'bool' else 0
# Make sure masked values don't affect the input, by adding a very large number.
mask_flipped_and_scaled = K.cast(K.equal(mask, mask_value), "float32") * VERY_LARGE_NUMBER
minimums = K.min(inputs + mask_flipped_and_scaled, axis=self.axis, keepdims=True)
else:
minimums = K.min(inputs, axis=self.axis, keepdims=True)
normalized = inputs - minimums
return normalized
@overrides
def get_config(self):
base_config = super(SubtractMinimum, self).get_config()
config = {'axis': self.axis}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/subtract_minimum.py |
from typing import List, Tuple
from keras import backend as K
from overrides import overrides
from .masked_layer import MaskedLayer
from ..common.checks import ConfigurationError
class ComplexConcat(MaskedLayer):
"""
This ``Layer`` does ``K.concatenate()`` on a collection of tensors, but
allows for more complex operations than ``Merge(mode='concat')``.
Specifically, you can perform an arbitrary number of elementwise linear
combinations of the vectors, and concatenate all of the results. If you do
not need to do this, you should use the regular ``Merge`` layer instead of
this ``ComplexConcat``.
Because the inputs all have the same shape, we assume that the masks are
also the same, and just return the first mask.
Input:
- A list of tensors. The tensors that you combine **must** have the
same shape, so that we can do elementwise operations on them, and
all tensors must have the same number of dimensions, and match on
all dimensions except the concatenation axis.
Output:
- A tensor with some combination of the input tensors concatenated
along a specific dimension.
Parameters
----------
axis : int
The axis to use for ``K.concatenate``.
combination: List of str
A comma-separated list of combinations to perform on the input tensors.
These are either tensor indices (1-indexed), or an arithmetic
operation between two tensor indices (valid operations: ``*``, ``+``,
``-``, ``/``). For example, these are all valid combination
parameters: ``"1,2"``, ``"1,2*3"``, ``"1-2,2-1"``, ``"1,1*1"``,
and ``"1,2,1*2"``.
"""
def __init__(self, combination: str, axis: int=-1, **kwargs):
self.axis = axis
self.combination = combination
self.combinations = self.combination.split(",")
self.num_combinations = len(self.combinations)
super(ComplexConcat, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
return mask[0]
@overrides
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, list):
raise ConfigurationError("ComplexConcat input must be a list")
output_shape = list(input_shape[0])
output_shape[self.axis] = 0
for combination in self.combinations:
output_shape[self.axis] += self._get_combination_length(combination, input_shape)
return tuple(output_shape)
@overrides
def call(self, x, mask=None):
combined_tensor = self._get_combination(self.combinations[0], x)
for combination in self.combinations[1:]:
to_concatenate = self._get_combination(combination, x)
combined_tensor = K.concatenate([combined_tensor, to_concatenate], axis=self.axis)
return combined_tensor
def _get_combination(self, combination: str, tensors: List['Tensor']):
if combination.isdigit():
return tensors[int(combination) - 1] # indices in the combination string are 1-indexed
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = self._get_combination(combination[0], tensors)
second_tensor = self._get_combination(combination[2], tensors)
if K.int_shape(first_tensor) != K.int_shape(second_tensor):
shapes_message = "Shapes were: {} and {}".format(K.int_shape(first_tensor),
K.int_shape(second_tensor))
raise ConfigurationError("Cannot combine two tensors with different shapes! " +
shapes_message)
operation = combination[1]
if operation == '*':
return first_tensor * second_tensor
elif operation == '/':
return first_tensor / second_tensor
elif operation == '+':
return first_tensor + second_tensor
elif operation == '-':
return first_tensor - second_tensor
else:
raise ConfigurationError("Invalid operation: " + operation)
def _get_combination_length(self, combination: str, input_shapes: List[Tuple[int]]):
if combination.isdigit():
# indices in the combination string are 1-indexed
return input_shapes[int(combination) - 1][self.axis]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_length = self._get_combination_length(combination[0], input_shapes)
second_length = self._get_combination_length(combination[2], input_shapes)
if first_length != second_length:
raise ConfigurationError("Cannot combine two tensors with different shapes!")
return first_length
@overrides
def get_config(self):
config = {"combination": self.combination,
"axis": self.axis,
}
base_config = super(ComplexConcat, self).get_config()
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/complex_concat.py |
# Individual layers.
from .additive import Additive
from .bigru_index_selector import BiGRUIndexSelector
from .complex_concat import ComplexConcat
from .highway import Highway
from .l1_normalize import L1Normalize
from .masked_layer import MaskedLayer
from .noisy_or import BetweenZeroAndOne, NoisyOr
from .option_attention_sum import OptionAttentionSum
from .overlap import Overlap
from .vector_matrix_merge import VectorMatrixMerge
from .vector_matrix_split import VectorMatrixSplit
| deep_qa-master | deep_qa/layers/__init__.py |
from keras.layers import Highway as KerasHighway
class Highway(KerasHighway):
"""
Keras' `Highway` layer does not support masking, but it easily could, just by returning the
mask. This `Layer` makes this possible.
"""
def __init__(self, **kwargs):
super(Highway, self).__init__(**kwargs)
self.supports_masking = True
| deep_qa-master | deep_qa/layers/highway.py |
from keras import backend as K
from overrides import overrides
from .masked_layer import MaskedLayer
from ..tensors.backend import l1_normalize
class L1Normalize(MaskedLayer):
"""
This Layer normalizes a tensor by its L1 norm. This could just be a
``Lambda`` layer that calls our ``tensors.l1_normalize`` function,
except that ``Lambda`` layers do not properly handle masked input.
The expected input to this layer is a tensor of shape
``(batch_size, x)``, with an optional mask of the same shape.
We also accept as input a tensor of shape ``(batch_size, x, 1)``,
which will be squeezed to shape ``(batch_size, x)``
(though the mask must still be of shape ``(batch_size, x)``).
We give no output mask, as we expect this to only be used at the end of
the model, to get a final probability distribution over class labels. If
you need this to propagate the mask for your model, it would be pretty
easy to change it to optionally do so - submit a PR.
"""
def __init__(self, **kwargs):
super(L1Normalize, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
# We do not need a mask beyond this layer.
return None
@overrides
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1])
@overrides
def call(self, inputs, mask=None):
if K.ndim(inputs) == 3:
inputs = K.squeeze(inputs, axis=2)
if K.ndim(inputs) != 2:
raise ValueError("L1Normalize layer only supports inputs of shape "
"(batch_size, x) or (batch_size, x, 1)")
return l1_normalize(inputs, mask)
| deep_qa-master | deep_qa/layers/l1_normalize.py |
from keras import backend as K
from overrides import overrides
from ..tensors.backend import switch
from .masked_layer import MaskedLayer
class Overlap(MaskedLayer):
"""
This Layer takes 2 inputs: a ``tensor_a`` (e.g. a document) and a ``tensor_b``
(e.g. a question). It returns a one-hot vector suitable for feature
representation with the same shape as ``tensor_a``,
indicating at each index whether the element in ``tensor_a`` appears in
``tensor_b``. Note that the output is not the same shape as ``tensor_a``.
Inputs:
- tensor_a: shape ``(batch_size, length_a)``
- tensor_b shape ``(batch_size, length_b)``
Output:
- Collection of one-hot vectors indicating
overlap: shape ``(batch_size, length_a, 2)``
Notes
-----
This layer is used to implement the "Question Evidence Common Word Feature"
discussed in section 3.2.4 of `Dhingra et. al, 2016
<https://arxiv.org/pdf/1606.01549.pdf>`_.
"""
@overrides
def __init__(self, **kwargs):
super(Overlap, self).__init__(**kwargs)
@overrides
def compute_output_shape(self, input_shapes):
return (input_shapes[0][0], input_shapes[0][1], 2)
@overrides
def call(self, inputs, mask=None):
# tensor_a, mask_a are of shape (batch size, length_a)
# tensor_b mask_b are of shape (batch size, length_b)
tensor_a, tensor_b = inputs
if mask is None:
mask_b = K.ones_like(tensor_b)
else:
mask_b = mask[1]
length_a = K.int_shape(tensor_a)[1]
length_b = K.int_shape(tensor_b)[1]
# change the indices that are masked in b to -1, since no indices
# in the document will ever be -1.
tensor_b = K.cast(switch(mask_b, tensor_b, -1*K.ones_like(tensor_b)), "int32")
# reshape tensor_a to shape (batch_size, length_a, length_b)
tensor_a_tiled = K.repeat_elements(K.expand_dims(tensor_a, 2),
length_b,
axis=2)
# reshape tensor_b to shape (batch_size, length_a, length_b)
tensor_b_tiled = K.repeat_elements(K.expand_dims(tensor_b, 1),
length_a,
axis=1)
overlap_mask = K.cast(K.equal(tensor_a_tiled, tensor_b_tiled), "float32")
indices_overlap = K.sum(overlap_mask, axis=-1)
binary_indices_overlap = K.cast(K.not_equal(indices_overlap,
K.zeros_like(indices_overlap)),
"int32")
one_hot_overlap = K.cast(K.one_hot(binary_indices_overlap, 2), "float32")
return one_hot_overlap
| deep_qa-master | deep_qa/layers/overlap.py |
from keras import backend as K
from keras.constraints import Constraint
from keras.regularizers import l1_l2
from overrides import overrides
from .masked_layer import MaskedLayer
class BetweenZeroAndOne(Constraint):
"""
Constrains the weights to be between zero and one
"""
def __call__(self, p):
# Clip values less than or equal to zero to epsilon.
p *= K.cast(p >= 0., K.floatx())
leaky_zeros_mask = K.epsilon() * K.cast(K.equal(p, K.zeros_like(p)), 'float32')
p = p + leaky_zeros_mask
# Clip values greater to 1 to 1.
p *= K.cast(p <= 1., K.floatx())
leaky_ones_mask = K.cast(K.equal(p, K.zeros_like(p)), K.floatx())
p += leaky_ones_mask
return p
class NoisyOr(MaskedLayer):
r"""
This layer takes as input a tensor of probabilities and calculates the
noisy-or probability across a given axis based on the noisy-or equation:
- :math:`p(x) = 1 - \prod_{i=1:N}(1 - q * p(x|y_n))`
where :math`q` is the noise parameter.
Inputs:
- probabilities: shape ``(batch, ..., N, ...)``
Optionally takes a mask of the same shape,
where N is the number of y's in the above equation
(i.e. the number of probabilities being combined in the product),
in the dimension corresponding to the specified axis.
Output:
- X: shape ``(batch, ..., ...)``
The output has one less dimension than the input, and has an
optional mask of the same shape. The lost dimension corresponds
to the specified axis. The output mask is the result of ``K.any()``
on the input mask, along the specified axis.
Parameters
----------
axis : int, default=-1
The axis over which to combine probabilities.
name : string, default='noisy_or'
Name of the layer, ued to debug both the layer and its parameter.
param_init : string, default='uniform'
The initialization of the noise parameter.
noise_param_constraint : Keras Constraint, default=None
Optional, a constraint which would be applied to the noise parameter.
"""
def __init__(self, axis=-1, name="noisy_or", param_init='uniform', noise_param_constraint=None, **kwargs):
self.axis = axis
self.param_init = param_init
self.name = name
# The noisy-or equation includes a noise parameter (q) which is learned during training.
self.noise_parameter = None
self.noise_param_constraint = noise_param_constraint
super(NoisyOr, self).__init__(**kwargs)
def build(self, input_shape):
# Add the trainable weight variable for the noise parameter.
self.noise_parameter = self.add_weight(shape=(),
name=self.name + '_noise_param',
initializer=self.param_init,
regularizer=l1_l2(l2=0.001),
constraint=self.noise_param_constraint,
trainable=True)
super(NoisyOr, self).build(input_shape)
def compute_output_shape(self, input_shape):
if self.axis == -1:
return input_shape[:-1]
return input_shape[:self.axis - 1] + input_shape[self.axis:]
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is not None:
return K.any(mask, axis=self.axis)
return None
def call(self, inputs, mask=None):
# shape: (batch size, ..., num_probs, ...)
probabilities = inputs
if mask is not None:
probabilities *= K.cast(mask, "float32")
noisy_probs = self.noise_parameter * probabilities
# shape: (batch size, ..., num_probs, ...)
noisy_probs = 1.0 - noisy_probs
# shape: (batch size, ..., ...)
probability_product = K.prod(noisy_probs, axis=self.axis)
return 1.0 - probability_product
| deep_qa-master | deep_qa/layers/noisy_or.py |
from keras import backend as K
from overrides import overrides
from .masked_layer import MaskedLayer
class VectorMatrixSplit(MaskedLayer):
"""
This Layer takes a tensor with K modes and splits it into a tensor with K - 1 modes and a
tensor with K modes, but one less row in one of the dimensions. We call this a vector-matrix
split to evoke the notion of taking a row- (or column-) vector off of a matrix and returning
both the vector and the remaining matrix, but this will also work with higher-order tensors.
For example, if you have a sentence that has a combined (word + characters) representation of
the tokens in the sentence, you'd have a tensor of shape
(batch_size, sentence_length, word_length + 1). You could split that using this Layer into a
tensor of shape (batch_size, sentence_length) for the word tokens in the sentence, and a tensor
of shape (batch_size, sentence_length, word_length) for the character for each word token.
This layer supports masking - we will split the mask the same way that we split the inputs.
This Layer is essentially the opposite of a VectorMatrixMerge.
"""
def __init__(self,
split_axis: int,
mask_split_axis: int=None,
propagate_mask: bool=True,
**kwargs):
self.split_axis = split_axis
self.mask_split_axis = mask_split_axis if mask_split_axis is not None else split_axis
self.propagate_mask = propagate_mask
super(VectorMatrixSplit, self).__init__(**kwargs)
@overrides
def call(self, inputs, mask=None):
return self._split_tensor(inputs, self.split_axis)
@overrides
def compute_output_shape(self, input_shape):
vector_shape = list(input_shape)
del vector_shape[self.split_axis]
matrix_shape = list(input_shape)
if matrix_shape[self.split_axis] is not None:
matrix_shape[self.split_axis] -= 1
return [tuple(vector_shape), tuple(matrix_shape)]
@overrides
def compute_mask(self, inputs, input_mask=None): # pylint: disable=unused-argument
if input_mask is None or not self.propagate_mask:
return [None, None]
return self._split_tensor(input_mask, self.mask_split_axis)
@staticmethod
def _split_tensor(tensor, split_axis: int):
modes = K.ndim(tensor)
if split_axis < 0:
split_axis = modes + split_axis
vector_slice = []
matrix_slice = []
for mode in range(modes):
if mode == split_axis:
vector_slice.append(0)
matrix_slice.append(slice(1, None, None))
else:
vector_slice.append(slice(None, None, None))
matrix_slice.append(slice(None, None, None))
return [tensor[vector_slice], tensor[matrix_slice]]
@overrides
def get_config(self):
base_config = super(VectorMatrixSplit, self).get_config()
config = {
'split_axis': self.split_axis,
'mask_split_axis': self.mask_split_axis,
'propagate_mask': self.propagate_mask,
}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/vector_matrix_split.py |
from keras import backend as K
from overrides import overrides
from .masked_layer import MaskedLayer
from ..common.checks import ConfigurationError
from ..tensors.backend import switch
class OptionAttentionSum(MaskedLayer):
"""
This Layer takes three inputs: a tensor of document indices, a tensor of
document probabilities, and a tensor of answer options. In addition, it takes a
parameter: a string describing how to calculate the probability of options
that consist of multiple words. We compute the probability of each of
the answer options in the fashion described in the paper "Text
Comprehension with the Attention Sum Reader Network" (Kadlec et. al 2016).
Inputs:
- document indices: shape ``(batch_size, document_length)``
- document probabilities: shape ``(batch_size, document_length)``
- options: shape ``(batch size, num_options, option_length)``
Output:
- option_probabilities ``(batch_size, num_options)``
"""
def __init__(self, multiword_option_mode="mean", **kwargs):
"""
Construct a new OptionAttentionSum layer.
Parameters
----------
multiword_option_mode: str, optional (default="mean")
Describes how to calculate the probability of options
that contain multiple words. If "mean", the probability of
the option is taken to be the mean of the probabilities of
its constituent words. If "sum", the probability of the option
is taken to be the sum of the probabilities of its constituent
words.
"""
if multiword_option_mode != "mean" and multiword_option_mode != "sum":
raise ConfigurationError("multiword_option_mode must be 'mean' or "
"'sum', got {}.".format(multiword_option_mode))
self.multiword_option_mode = multiword_option_mode
super(OptionAttentionSum, self).__init__(**kwargs)
@overrides
def compute_output_shape(self, input_shapes):
return (input_shapes[2][0], input_shapes[2][1])
@overrides
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
options = inputs[2]
padding_mask = K.not_equal(options, K.zeros_like(options))
return K.cast(K.any(padding_mask, axis=2), "float32")
@overrides
def call(self, inputs, mask=None):
"""
Calculate the probability of each answer option.
Parameters
----------
inputs: List of Tensors
The inputs to the layer must be passed in as a list to the
``call`` function. The inputs expected are a Tensor of
document indices, a Tensor of document probabilities, and
a Tensor of options (in that order).
The documents indices tensor is a 2D tensor of shape
(batch size, document_length).
The document probabilities tensor is a 2D Tensor of shape
(batch size, document_length).
The options tensor is of shape (batch size, num_options,
option_length).
mask: Tensor or None, optional (default=None)
Tensor of shape (batch size, max number of options) representing
which options are padding and thus have a 0 in the associated
mask position.
Returns
-------
options_probabilities : Tensor
Tensor with shape (batch size, max number of options) with floats,
where each float is the normalized probability of the option as
calculated based on ``self.multiword_option_mode``.
"""
document_indices, document_probabilities, options = inputs
# This takes `document_indices` from (batch_size, document_length) to
# (batch_size, num_options, option_length, document_length), with the
# original indices repeated, so that we can create a mask indicating
# which options are used in the probability computation. We do the
# same thing for `document_probababilities` to select the probability
# values corresponding to the words in the options.
expanded_indices = K.expand_dims(K.expand_dims(document_indices, 1), 1)
tiled_indices = K.repeat_elements(K.repeat_elements(expanded_indices,
K.int_shape(options)[1], axis=1),
K.int_shape(options)[2], axis=2)
expanded_probabilities = K.expand_dims(K.expand_dims(document_probabilities, 1), 1)
tiled_probabilities = K.repeat_elements(K.repeat_elements(expanded_probabilities,
K.int_shape(options)[1], axis=1),
K.int_shape(options)[2], axis=2)
expanded_options = K.expand_dims(options, 3)
tiled_options = K.repeat_elements(expanded_options,
K.int_shape(document_indices)[-1], axis=3)
# This generates a binary tensor of the same shape as tiled_options /
# tiled_indices that indicates if index is option or padding.
options_words_mask = K.cast(K.equal(tiled_options, tiled_indices),
"float32")
# This applies a mask to the probabilities to select the
# indices for probabilities that correspond with option words.
selected_probabilities = options_words_mask * tiled_probabilities
# This sums up the probabilities to get the aggregate probability for
# each option's constituent words.
options_word_probabilities = K.sum(selected_probabilities, axis=3)
sum_option_words_probabilities = K.sum(options_word_probabilities,
axis=2)
if self.multiword_option_mode == "mean":
# This block figures out how many words (excluding
# padding) are in each option.
# Here we generate the mask on the input option.
option_mask = K.cast(K.not_equal(options, K.zeros_like(options)),
"float32")
# This tensor stores the number words in each option.
divisor = K.sum(option_mask, axis=2)
# If the divisor is zero at a position, we add epsilon to it.
is_zero_divisor = K.equal(divisor, K.zeros_like(divisor))
divisor = switch(is_zero_divisor, K.ones_like(divisor)*K.epsilon(), divisor)
else:
# Since we're taking the sum, we divide all sums by 1.
divisor = K.ones_like(sum_option_words_probabilities)
# Now we divide the sums by the divisor we generated above.
option_probabilities = sum_option_words_probabilities / divisor
return option_probabilities
@overrides
def get_config(self):
config = {'multiword_option_mode': self.multiword_option_mode}
base_config = super(OptionAttentionSum, self).get_config()
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/option_attention_sum.py |
from overrides import overrides
from .masked_layer import MaskedLayer
class Additive(MaskedLayer):
"""
This ``Layer`` `adds` a parameter value to each cell in the input tensor, similar to a bias
vector in a ``Dense`` layer, but this `only` adds, one value per cell. The value to add is
learned.
Parameters
----------
initializer: str, optional (default='glorot_uniform')
Keras initializer for the additive weight.
"""
def __init__(self, initializer='glorot_uniform', **kwargs):
super(Additive, self).__init__(**kwargs)
self.initializer = initializer
self._additive_weight = None
@overrides
def build(self, input_shape):
super(Additive, self).build(input_shape)
self._additive_weight = self.add_weight(shape=input_shape[1:],
name='%s_additive' % self.name,
initializer=self.initializer)
@overrides
def call(self, inputs, mask=None):
return inputs + self._additive_weight
@overrides
def get_config(self):
base_config = super(Additive, self).get_config()
config = {
'initializer': self.initializer,
}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/additive.py |
from keras import backend as K
from deep_qa.layers.wrappers.time_distributed import TimeDistributed
class EncoderWrapper(TimeDistributed):
'''
This class TimeDistributes a sentence encoder, applying the encoder to several word sequences.
The only difference between this and the regular TimeDistributed is in how we handle the mask.
Typically, an encoder will handle masked embedded input, and return None as its mask, as it
just returns a vector and no more masking is necessary. However, if the encoder is
TimeDistributed, we might run into a situation where _all_ of the words in a given sequence are
masked (because we padded the number of sentences, for instance). In this case, we just want
to mask the entire sequence. EncoderWrapper returns a mask with the same dimension as the
input sequences, where sequences are masked if _all_ of their words were masked.
Notes
-----
For seq2seq encoders, one should use either ``TimeDistributed`` or
``TimeDistributedWithMask`` since ``EncoderWrapper`` reduces the dimensionality
of the input mask.
'''
def compute_mask(self, x, input_mask=None):
# pylint: disable=unused-argument
# Input mask (coming from Embedding) will be of shape (batch_size, knowledge_length, num_words).
# Output mask should be of shape (batch_size, knowledge_length) with 0s for background sentences that
# are all padding.
if input_mask is None:
return None
else:
# An output bit is 0 only if the bits corresponding to all input words are 0.
return K.any(input_mask, axis=-1)
| deep_qa-master | deep_qa/layers/wrappers/encoder_wrapper.py |
from keras import backend as K
from keras.layers import InputSpec, TimeDistributed as KerasTimeDistributed
from overrides import overrides
class TimeDistributed(KerasTimeDistributed):
"""
This class fixes two bugs in Keras: (1) the input mask is not passed to the wrapped layer, and
(2) Keras' TimeDistributed currently only allows a single input, not a list. We currently
don't handle the case where the _output_ of the wrapped layer is a list, however. (Not that
that's particularly hard, we just haven't needed it yet, so haven't implemented it.)
Notes
-----
If the output shape for TimeDistributed has a final dimension of 1, we essentially sqeeze it,
reshaping to have one fewer dimension. That change takes place in the actual ``call`` method as well as
the ``compute_output_shape`` method.
"""
def __init__(self, layer, keep_dims=False, **kwargs):
self.keep_dims = keep_dims
super(TimeDistributed, self).__init__(layer, **kwargs)
@overrides
def build(self, input_shape):
if isinstance(input_shape, tuple):
input_shape = [input_shape]
assert all(len(shape) >= 3 for shape in input_shape), "Need 3 dims to TimeDistribute"
all_timesteps = [i[1] for i in input_shape]
assert len(set(all_timesteps)) == 1, "Tensors must have same number of timesteps"
self.input_spec = [InputSpec(shape=shape) for shape in input_shape]
if not self.layer.built:
child_input_shape = [(shape[0],) + shape[2:] for shape in input_shape]
if len(input_shape) == 1:
child_input_shape = child_input_shape[0]
self.layer.build(child_input_shape)
self.layer.built = True
self.built = True
# It's important that we call Wrapper.build() here, because it sets some important member
# variables. But we can't call KerasTimeDistributed.build(), because it assumes only one
# input, which we're trying to fix. So we use super(KerasTimeDistributed, self).build()
# here on purpose - this is not a copy-paste bug.
super(KerasTimeDistributed, self).build(input_shape) # pylint: disable=bad-super-call
@overrides
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, list):
input_shape = [input_shape]
child_input_shape = [(shape[0],) + shape[2:] for shape in input_shape]
timesteps = input_shape[0][1]
if len(input_shape) == 1:
child_input_shape = child_input_shape[0]
child_output_shape = self.layer.compute_output_shape(child_input_shape)
reshaped_shape = (child_output_shape[0], timesteps) + child_output_shape[1:]
if reshaped_shape[-1] == 1 and not self.keep_dims:
reshaped_shape = reshaped_shape[:-1]
return reshaped_shape
def get_output_mask_shape_for(self, input_shape):
if not isinstance(input_shape, list):
input_shape = [input_shape]
child_input_shape = [(shape[0],) + shape[2:] for shape in input_shape]
timesteps = input_shape[0][1]
if len(input_shape) == 1:
child_input_shape = child_input_shape[0]
child_output_shape = self.layer.get_output_mask_shape_for(child_input_shape)
return (child_output_shape[0], timesteps) + child_output_shape[1:]
@staticmethod
def reshape_inputs_and_masks(inputs, masks):
reshaped_xs = []
reshaped_masks = []
for x_i, mask_i in zip(inputs, masks):
input_shape = K.int_shape(x_i)
reshaped_x = K.reshape(x_i, (-1,) + input_shape[2:]) # (batch_size * timesteps, ...)
if mask_i is not None:
mask_ndim = K.ndim(mask_i)
input_ndim = K.ndim(x_i)
if mask_ndim == input_ndim:
mask_shape = input_shape
elif mask_ndim == input_ndim - 1:
mask_shape = input_shape[:-1]
else:
raise Exception("Mask is of an unexpected shape. Mask's ndim: %s, input's ndim %s" %
(mask_ndim, input_ndim))
mask_i = K.reshape(mask_i, (-1,) + mask_shape[2:]) # (batch_size * timesteps, ...)
reshaped_xs.append(reshaped_x)
reshaped_masks.append(mask_i)
if len(inputs) == 1:
reshaped_xs = reshaped_xs[0]
reshaped_masks = reshaped_masks[0]
return reshaped_xs, reshaped_masks
@overrides
def call(self, inputs, mask=None):
# Much of this is copied from the Keras 1.0(ish) version of TimeDistributed, though we've
# modified it quite a bit, to fix the problems mentioned in the docstring and to use better
# names.
if not isinstance(inputs, list):
inputs = [inputs]
mask = [mask]
else:
if mask is None:
mask = [None] * len(inputs)
timesteps = K.int_shape(inputs[0])[1]
input_shape = [K.int_shape(x_i) for x_i in inputs]
if len(inputs) == 1:
input_shape = input_shape[0]
if len(inputs) == 1 and input_shape[0]:
# The batch size is passed when defining the layer in some cases (for example if it is
# stateful). We respect the input shape in that case and don't reshape the input. This
# is slower. K.rnn also expects only a single tensor, so we can't do this if we have
# multiple inputs.
inputs = inputs[0]
mask = mask[0]
def step(x_i, _):
output = self.layer.call(x_i)
return output, []
_, outputs, _ = K.rnn(step, inputs, mask=mask, initial_states=[])
else:
reshaped_xs, reshaped_masks = self.reshape_inputs_and_masks(inputs, mask)
outputs = self.layer.call(reshaped_xs, mask=reshaped_masks)
output_shape = self.compute_output_shape(input_shape)
reshaped_shape = (-1, timesteps) + output_shape[2:]
if reshaped_shape[-1] == 1 and not self.keep_dims:
reshaped_shape = reshaped_shape[:-1]
outputs = K.reshape(outputs, reshaped_shape)
return outputs
@overrides
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
if isinstance(mask, list):
if not any(mask):
return None
else:
raise RuntimeError("This version of TimeDistributed doesn't handle multiple masked "
"inputs! Use a subclass of TimeDistributed instead.")
return mask
@overrides
def get_config(self):
base_config = super(TimeDistributed, self).get_config()
config = {'keep_dims': self.keep_dims}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/wrappers/time_distributed.py |
from .add_encoder_mask import AddEncoderMask
from .encoder_wrapper import EncoderWrapper
from .output_mask import OutputMask
from .time_distributed import TimeDistributed
| deep_qa-master | deep_qa/layers/wrappers/__init__.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class AddEncoderMask(MaskedLayer):
"""
This ``Layer`` handles masking for ``TimeDistributed`` encoders, like LSTMs, that condense
sequences of vectors into single vectors (not LSTMs that return sequences; masking is already
handled there correctly). Our :class:`~.encoder_wrapper.EncoderWrapper` class does the correct
masking computation, but it inherits from ``TimeDistributed``, which does not work with unknown
dimensions at run-time. If you want to wrap an encoder using
:class:`~..backend.CollapseToBatch` and :class:`~..backend.ExpandFromBatch`, you need a way to
get the mask back into the right form after running your encoder. This is an issue because
Keras' encoders don't return masks when they output single vectors.
For example, say you have a list of sentences, like [[5, 2, 1, 0], [2, 3, 1, 1], [0, 0, 0, 0]]
(using word indices instead of embeddings for simplicity), which has been padded to be three
sentences, even though only two of them are actually used. After passing it though an encoder,
you'll have something like [[vector], [vector], [vector]], and you want a mask that looks like
[1, 1, 0]. Keras' LSTMs and such won't give this to you. This method adds it back.
Inputs:
- A tensor with shape ``(batch_size, ..., encoding_dim)`` that is the output of some
encoder that you got with
:func:`~deep_qa.training.text_trainer.TextTrainer._get_encoder()` (not a seq2seq encoder
that returns sequences).
The mask for this tensor must be ``None``.
- A tensor with shape ``(batch_size, ..., num_words, embedding_dim)`` that was the `input`
to that encoder. The mask for this tensor must have shape ``(batch_size, ...,
num_words)``.
Output:
- The first input tensor, with a mask computed from the second input tensor. The
computation is just ``K.any()`` on the last dimension.
"""
@overrides
def compute_output_shape(self, input_shape):
return input_shape[0]
@overrides
def compute_mask(self, inputs, mask=None):
encoder_mask, embedding_mask = mask
if encoder_mask is not None:
raise RuntimeError("Refusing to add an encoder mask, because the tensor already has one")
return K.any(embedding_mask, axis=-1)
@overrides
def call(self, inputs, mask=None): # pylint: disable=unused-argument
# It turns out that Keras doesn't like it if you just return inputs, so we need to return a
# different tensor object. Just doing a cast apparently doesn't work, either, so we'll
# add 0.
return inputs[0] + 0.0
| deep_qa-master | deep_qa/layers/wrappers/add_encoder_mask.py |
from overrides import overrides
from ..masked_layer import MaskedLayer
class OutputMask(MaskedLayer):
"""
This Layer is purely for debugging. You can wrap this on a layer's output to get the mask
output by that layer as a model output, for easier visualization of what the model is actually
doing.
Don't try to use this in an actual model.
"""
@overrides
def compute_mask(self, inputs, mask=None):
return None
@overrides
def call(self, inputs, mask=None): # pylint: disable=unused-argument
return mask
| deep_qa-master | deep_qa/layers/wrappers/output_mask.py |
from copy import deepcopy
from typing import Any, Dict
from keras import backend as K
from overrides import overrides
from ...common.params import pop_choice
from ..masked_layer import MaskedLayer
from ...tensors.masked_operations import masked_softmax
from ...tensors.similarity_functions import similarity_functions
class Attention(MaskedLayer):
"""
This Layer takes two inputs: a vector and a matrix. We compute the similarity between the
vector and each row in the matrix, and then (optionally) perform a softmax over rows using
those computed similarities. We handle masking properly for masked rows in the matrix, though
we ignore any masking on the vector.
By default similarity is computed with a dot product, but you can
alternatively use a parameterized similarity function if you wish.
Inputs:
- vector: shape ``(batch_size, embedding_dim)``, mask is ignored if provided
- matrix: shape ``(batch_size, num_rows, embedding_dim)``, with mask ``(batch_size, num_rows)``
Output:
- attention: shape ``(batch_size, num_rows)``. If ``normalize`` is ``True``, we return no
mask, as we've already applied it (masked input rows have value 0 in the output). If
``normalize`` is ``False``, we return the matrix mask, if there was one.
Parameters
----------
similarity_function_params : ``Dict[str, Any]``, optional (default: ``{}``)
These parameters get passed to a similarity function (see
:mod:`deep_qa.tensors.similarity_functions` for more info on what's acceptable). The
default similarity function with no parameters is a simple dot product.
normalize : ``bool``, optional (default: ``True``)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, similarity_function: Dict[str, Any]=None, normalize: bool=True, **kwargs):
super(Attention, self).__init__(**kwargs)
self.similarity_function_params = deepcopy(similarity_function)
if similarity_function is None:
similarity_function = {}
sim_function_choice = pop_choice(similarity_function, 'type',
list(similarity_functions.keys()),
default_to_first_choice=True)
similarity_function['name'] = self.name + '_similarity_function'
self.similarity_function = similarity_functions[sim_function_choice](**similarity_function)
self.normalize = normalize
@overrides
def build(self, input_shape):
tensor_1_dim = input_shape[0][-1]
tensor_2_dim = input_shape[1][-1]
self.trainable_weights = self.similarity_function.initialize_weights(tensor_1_dim, tensor_2_dim)
super(Attention, self).build(input_shape)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if self.normalize or mask is None:
# If we've normalized the distribution, we've already incorporated the mask, and we do
# not want to return it.
return None
return mask[1]
@overrides
def compute_output_shape(self, input_shapes):
return (input_shapes[1][0], input_shapes[1][1])
@overrides
def call(self, inputs, mask=None):
vector, matrix = inputs
if mask is None:
matrix_mask = None
else:
matrix_mask = mask[1]
num_rows = K.int_shape(matrix)[1]
tiled_vector = K.repeat_elements(K.expand_dims(vector, axis=1), num_rows, axis=1)
similarities = self.similarity_function.compute_similarity(tiled_vector, matrix)
if self.normalize:
return masked_softmax(similarities, matrix_mask)
else:
return similarities
@overrides
def get_config(self):
base_config = super(Attention, self).get_config()
config = {
'similarity_function': self.similarity_function_params,
'normalize': self.normalize
}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/attention/attention.py |
from copy import deepcopy
from typing import Any, Dict
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
from ...common.params import pop_choice
from ...tensors.similarity_functions import similarity_functions
class MatrixAttention(MaskedLayer):
'''
This ``Layer`` takes two matrices as input and returns a matrix of attentions.
We compute the similarity between each row in each matrix and return unnormalized similarity
scores. We don't worry about zeroing out any masked values, because we propagate a correct
mask.
By default similarity is computed with a dot product, but you can alternatively use a
parameterized similarity function if you wish.
This is largely similar to using ``TimeDistributed(Attention)``, except the result is
unnormalized, and we return a mask, so you can do a masked normalization with the result. You
should use this instead of ``TimeDistributed(Attention)`` if you want to compute multiple
normalizations of the attention matrix.
Input:
- matrix_1: ``(batch_size, num_rows_1, embedding_dim)``, with mask
``(batch_size, num_rows_1)``
- matrix_2: ``(batch_size, num_rows_2, embedding_dim)``, with mask
``(batch_size, num_rows_2)``
Output:
- ``(batch_size, num_rows_1, num_rows_2)``, with mask of same shape
Parameters
----------
similarity_function_params: Dict[str, Any], default={}
These parameters get passed to a similarity function (see
:mod:`deep_qa.tensors.similarity_functions` for more info on what's acceptable). The
default similarity function with no parameters is a simple dot product.
'''
def __init__(self, similarity_function: Dict[str, Any]=None, **kwargs):
super(MatrixAttention, self).__init__(**kwargs)
self.similarity_function_params = deepcopy(similarity_function)
if similarity_function is None:
similarity_function = {}
sim_function_choice = pop_choice(similarity_function, 'type',
list(similarity_functions.keys()),
default_to_first_choice=True)
similarity_function['name'] = self.name + '_similarity_function'
self.similarity_function = similarity_functions[sim_function_choice](**similarity_function)
@overrides
def build(self, input_shape):
tensor_1_dim = input_shape[0][-1]
tensor_2_dim = input_shape[1][-1]
self.trainable_weights = self.similarity_function.initialize_weights(tensor_1_dim, tensor_2_dim)
super(MatrixAttention, self).build(input_shape)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
mask_1, mask_2 = mask
if mask_1 is None and mask_2 is None:
return None
if mask_1 is None:
mask_1 = K.ones_like(K.sum(inputs[0], axis=-1))
if mask_2 is None:
mask_2 = K.ones_like(K.sum(inputs[1], axis=-1))
# Theano can't do batch_dot on ints, so we need to cast to float and then back.
mask_1 = K.cast(K.expand_dims(mask_1, axis=2), 'float32')
mask_2 = K.cast(K.expand_dims(mask_2, axis=1), 'float32')
return K.cast(K.batch_dot(mask_1, mask_2), 'uint8')
@overrides
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1], input_shape[1][1])
@overrides
def call(self, inputs, mask=None):
matrix_1, matrix_2 = inputs
num_rows_1 = K.shape(matrix_1)[1]
num_rows_2 = K.shape(matrix_2)[1]
tile_dims_1 = K.concatenate([[1, 1], [num_rows_2], [1]], 0)
tile_dims_2 = K.concatenate([[1], [num_rows_1], [1, 1]], 0)
tiled_matrix_1 = K.tile(K.expand_dims(matrix_1, axis=2), tile_dims_1)
tiled_matrix_2 = K.tile(K.expand_dims(matrix_2, axis=1), tile_dims_2)
return self.similarity_function.compute_similarity(tiled_matrix_1, tiled_matrix_2)
@overrides
def get_config(self):
base_config = super(MatrixAttention, self).get_config()
config = {'similarity_function': self.similarity_function_params}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/attention/matrix_attention.py |
from .attention import Attention
from .gated_attention import GatedAttention
from .masked_softmax import MaskedSoftmax
from .matrix_attention import MatrixAttention
from .max_similarity_softmax import MaxSimilaritySoftmax
from .weighted_sum import WeightedSum
| deep_qa-master | deep_qa/layers/attention/__init__.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
from ...tensors.backend import last_dim_flatten
from ...tensors.masked_operations import masked_softmax
class MaskedSoftmax(MaskedLayer):
'''
This Layer performs a masked softmax. This could just be a `Lambda` layer that calls our
`tensors.masked_softmax` function, except that `Lambda` layers do not properly handle masked
input.
The expected input to this layer is a tensor of shape `(batch_size, num_options)`, with a mask
of the same shape. We also accept an input tensor of shape `(batch_size, num_options, 1)`,
which we will squeeze to be `(batch_size, num_options)` (though the mask must still be
`(batch_size, num_options)`).
While we give the expected input as having two modes, we also accept higher-order tensors. In
those cases, we'll first perform a `last_dim_flatten` on both the input and the mask, so that
we always do the softmax over a single dimension (the last one).
We give no output mask, as we expect this to only be used at the end of the model, to get a
final probability distribution over class labels (and it's a softmax, so you'll have zeros in
the tensor itself; do you really still need a mask?). If you need this to propagate the mask
for whatever reason, it would be pretty easy to change it to optionally do so - submit a PR.
'''
def __init__(self, **kwargs):
super(MaskedSoftmax, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
# We do not need a mask beyond this layer.
return None
@overrides
def compute_output_shape(self, input_shape):
if input_shape[-1] == 1:
return input_shape[:-1]
else:
return input_shape
@overrides
def call(self, inputs, mask=None):
input_shape = K.int_shape(inputs)
if input_shape[-1] == 1:
inputs = K.squeeze(inputs, axis=-1)
input_shape = input_shape[:-1]
if len(input_shape) > 2:
original_inputs = inputs
inputs = last_dim_flatten(inputs)
if mask is not None:
mask = last_dim_flatten(mask)
# Now we have both inputs and mask with shape (?, num_options), and can do a softmax.
softmax_result = masked_softmax(inputs, mask)
if len(input_shape) > 2:
original_shape = K.shape(original_inputs)
input_shape = K.concatenate([[-1], original_shape[1:]], 0)
softmax_result = K.reshape(softmax_result, input_shape)
return softmax_result
| deep_qa-master | deep_qa/layers/attention/masked_softmax.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
from ...tensors.masked_operations import masked_batch_dot, masked_softmax
class MaxSimilaritySoftmax(MaskedLayer):
'''
This layer takes encoded questions and knowledge in a multiple choice
setting and computes the similarity between each of the question embeddings
and the background knowledge, and returns a softmax over the options.
Inputs:
- encoded_questions (batch_size, num_options, encoding_dim)
- encoded_knowledge (batch_size, num_options, knowledge_length, encoding_dim)
Output:
- option_probabilities (batch_size, num_options)
This is a pretty niche layer that does a very specific computation. We only
made it its own class instead of a ``Lambda`` layer so that we could handle
masking correctly, which ``Lambda`` does not.
'''
def __init__(self, knowledge_axis, max_knowledge_length, **kwargs):
self.knowledge_axis = knowledge_axis
self.max_knowledge_length = max_knowledge_length
super(MaxSimilaritySoftmax, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
return None
@overrides
def compute_output_shape(self, input_shapes):
# (batch_size, num_options)
return (input_shapes[0][0], input_shapes[0][1])
@overrides
def call(self, inputs, mask=None):
questions, knowledge = inputs
question_mask, knowledge_mask = mask
question_knowledge_similarity = masked_batch_dot(questions, knowledge, question_mask, knowledge_mask)
max_knowledge_similarity = K.max(question_knowledge_similarity, axis=-1) # (samples, num_options)
return masked_softmax(max_knowledge_similarity, question_mask)
| deep_qa-master | deep_qa/layers/attention/max_similarity_softmax.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
from ...common.checks import ConfigurationError
from ...tensors.backend import switch
GATING_FUNCTIONS = ["*", "+", "||"]
class GatedAttention(MaskedLayer):
r"""
This layer implements the majority of the Gated Attention module described in
`"Gated-Attention Readers for Text Comprehension" by Dhingra et. al 2016
<https://arxiv.org/pdf/1606.01549.pdf>`_.
The module is described in section 3.2.2. For each token :math:`d_i` in :math:`D`,
the GA module forms a "token-specific representation" of the query :math:`q_i` using
soft attention, and then multiplies the query representation element-wise with the document
token representation.
- 1. :math:`\alpha_i = softmax(Q^T d_i)`
- 2. :math:`q_i = Q \alpha_i`
- 3. :math:`x_i = d_i \odot q_i` (:math:`\odot` is element-wise multiplication)
This layer implements equations 2 and 3 above but in a batched manner to get
:math:`X`, a tensor with all :math:`x_i`. Thus, the input
to the layer is :math:`\alpha` (``normalized_qd_attention``), a tensor with
all :math:`\alpha_i`, as well as :math:`Q` (``question_matrix``), and
:math:`D` (``document_matrix``), a tensor with all :math:`d_i`. Equation 6 uses
element-wise multiplication to model the interactions between :math:`d_i` and :math:`q_i`,
and the paper reports results when using other such gating functions like sum or
concatenation.
Inputs:
- ``document_``, a matrix of shape ``(batch, document length, biGRU hidden length)``.
Represents the document as encoded by the biGRU.
- ``question_matrix``, a matrix of shape ``(batch, question length, biGRU hidden length)``.
Represents the question as encoded by the biGRU.
- ``normalized_qd_attention``, the soft attention over the document and question.
Matrix of shape ``(batch, document length, question length)``.
Output:
- ``X``, a tensor of shape ``(batch, document length, biGRU hidden length)`` if the
gating function is ``*`` or ``+``, or ``(batch, document length, biGRU hidden length * 2)``
if the gating function is ``||`` This serves as a representation of each token in
the document.
Parameters
----------
gating_function : string, default="*"
The gating function to use for modeling the interactions between the document and
query token. Supported gating functions are ``"*"`` for elementwise multiplication,
``"+"`` for elementwise addition, and ``"||"`` for concatenation.
Notes
-----
To find out how we calculated equation 1, see the GatedAttentionReader model (roughly,
a ``masked_batch_dot`` and a ``masked_softmax``)
"""
def __init__(self, gating_function="*", **kwargs):
# We need to wait until below to actually handle this, because self.name gets set in
# super.__init__.
# allowed gating functions are "*" (multiply), "+" (sum), and "||" (concatenate)
self.gating_function = gating_function
if self.gating_function not in GATING_FUNCTIONS:
raise ConfigurationError("Invalid gating function "
"{}, expected one of {}".format(self.gating_function,
GATING_FUNCTIONS))
super(GatedAttention, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
return mask[0]
@overrides
def compute_output_shape(self, input_shapes):
return (input_shapes[0][0], input_shapes[0][1], input_shapes[0][2])
@overrides
def call(self, inputs, mask=None):
# document_matrix is of shape (batch, document length, biGRU hidden length).
# question_matrix is of shape (batch, question length, biGRU hidden length).
# normalized_qd_attention is of shape (batch, document length, question length).
document_matrix, question_matrix, normalized_qd_attention = inputs
if mask is None:
document_mask = K.ones_like(document_matrix)[:, :, 0]
else:
document_mask = mask[0]
# question_update is of shape (batch, document length, bigru hidden).
question_update = K.batch_dot(normalized_qd_attention, question_matrix, axes=[2, 1])
# We use the gating function to calculate the new document representation
# which is of shape (batch, document length, biGRU hidden length).
masked_representation = None
if self.gating_function == "||":
# shape (batch, document length, biGRU hidden length*2)
unmasked_representation = K.concatenate([question_update, document_matrix])
# Apply the mask from the document to zero out things that should be masked.
# The mask is of shape (batch, document length), so we tile it to
# shape (batch, document length, biGRU hidden length*2)
tiled_mask = K.repeat_elements(K.expand_dims(document_mask, axis=2),
(2*K.int_shape(document_matrix)[2]), 2)
masked_representation = switch(tiled_mask, unmasked_representation,
K.zeros_like(unmasked_representation))
return masked_representation
if self.gating_function == "*":
unmasked_representation = question_update * document_matrix
if self.gating_function == "+":
# shape (batch, document length, biGRU hidden length)
unmasked_representation = question_update + document_matrix
# Apply the mask from the document to zero out things that should be masked.
# The mask is of shape (batch, document length), so we tile it to
# shape (batch, document length, biGRU hidden length)
tiled_mask = K.repeat_elements(K.expand_dims(document_mask, axis=2),
K.int_shape(document_matrix)[2], 2)
masked_representation = switch(tiled_mask, unmasked_representation, K.zeros_like(unmasked_representation))
if masked_representation is not None:
return masked_representation
else:
raise ConfigurationError("Invalid gating function "
"{}, expected one of {}".format(self.gating_function,
GATING_FUNCTIONS))
@overrides
def get_config(self):
config = {'gating_function': self.gating_function}
base_config = super(GatedAttention, self).get_config()
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/attention/gated_attention.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class WeightedSum(MaskedLayer):
"""
This ``Layer`` takes a matrix of vectors and a vector of row weights, and returns a weighted
sum of the vectors. You might use this to get some aggregate sentence representation after
computing an attention over the sentence, for example.
Inputs:
- matrix: ``(batch_size, num_rows, embedding_dim)``, with mask ``(batch_size, num_rows)``
- vector: ``(batch_size, num_rows)``, mask is ignored
Outputs:
- A weighted sum of the rows in the matrix, with shape ``(batch_size, embedding_dim)``, with
mask=``None``.
Parameters
----------
use_masking: bool, default=True
If true, we will apply the input mask to the matrix before doing the weighted sum. If
you've computed your vector weights with masking, so that masked entries are 0, this is
unnecessary, and you can set this parameter to False to avoid an expensive computation.
Notes
-----
You probably should have used a mask when you computed your attention weights, so any row
that's masked in the matrix `should` already be 0 in the attention vector. But just in case
you didn't, we'll handle a mask on the matrix here too. If you know that you did masking right
on the attention, you can optionally remove the mask computation here, which will save you a
bit of time and memory.
While the above spec shows inputs with 3 and 2 modes, we also allow inputs of any order; we
always sum over the second-to-last dimension of the "matrix", weighted by the last dimension of
the "vector". Higher-order tensors get complicated for matching things, though, so there is a
hard constraint: all dimensions in the "matrix" before the final embedding must be matched in
the "vector".
For example, say I have a "matrix" with dimensions (batch_size, num_queries, num_words,
embedding_dim), representing some kind of embedding or encoding of several multi-word queries.
My attention "vector" must then have at least those dimensions, and could have more. So I
could have an attention over words per query, with shape (batch_size, num_queries, num_words),
or I could have an attention over query words for every document in some list, with shape
(batch_size, num_documents, num_queries, num_words). Both of these cases are fine. In the
first case, the returned tensor will have shape (batch_size, num_queries, embedding_dim), and
in the second case, it will have shape (batch_size, num_documents, num_queries, embedding_dim).
But you `can't` have an attention "vector" that does not include all of the queries, so shape
(batch_size, num_words) is not allowed - you haven't specified how to handle that dimension in
the "matrix", so we can't do anything with this input.
"""
def __init__(self, use_masking: bool=True, **kwargs):
self.use_masking = use_masking
super(WeightedSum, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
# We don't need to worry about a mask after we've summed over the rows of the matrix.
# You might actually still need a mask if you used a higher-order tensor, but probably the
# right place to handle that is with careful use of TimeDistributed. Or submit a PR.
return None
@overrides
def compute_output_shape(self, input_shapes):
matrix_shape, attention_shape = input_shapes
return attention_shape[:-1] + matrix_shape[-1:]
@overrides
def call(self, inputs, mask=None):
# pylint: disable=redefined-variable-type
matrix, attention_vector = inputs
num_attention_dims = K.ndim(attention_vector)
num_matrix_dims = K.ndim(matrix) - 1
for _ in range(num_attention_dims - num_matrix_dims):
matrix = K.expand_dims(matrix, axis=1)
if mask is None:
matrix_mask = None
else:
matrix_mask = mask[0]
if self.use_masking and matrix_mask is not None:
for _ in range(num_attention_dims - num_matrix_dims):
matrix_mask = K.expand_dims(matrix_mask, axis=1)
matrix = K.cast(K.expand_dims(matrix_mask), 'float32') * matrix
return K.sum(K.expand_dims(attention_vector, axis=-1) * matrix, -2)
@overrides
def get_config(self):
base_config = super(WeightedSum, self).get_config()
config = {'use_masking': self.use_masking}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/attention/weighted_sum.py |
from keras import backend as K
from overrides import overrides
from ...tensors.backend import switch
from ..masked_layer import MaskedLayer
class ReplaceMaskedValues(MaskedLayer):
"""
This ``Layer`` replaces all masked values in a tensor with some value. You might want to do
this before passing the tensor into a layer that does a max or a min, for example, to replace
all masked values with something very large or very negative. We basically just call
``switch`` on the mask.
Input:
- tensor: a tensor of arbitrary shape
Output:
- the same tensor, with masked values replaced by some input value
Parameters
----------
replace_with: float
We will replace all masked values in the tensor with this value.
"""
def __init__(self, replace_with: float, **kwargs):
self.replace_with = replace_with
super(ReplaceMaskedValues, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
return mask
@overrides
def compute_output_shape(self, input_shape):
return input_shape
@overrides
def call(self, inputs, mask=None):
if mask is None:
# It turns out that Keras doesn't like it if you just return inputs, so we need to
# return a different tensor object. Just doing a cast apparently doesn't work, either,
# so we'll add 0.
return inputs + 0.0
return switch(mask, inputs, K.ones_like(inputs) * self.replace_with)
@overrides
def get_config(self):
config = {'replace_with': self.replace_with}
base_config = super(ReplaceMaskedValues, self).get_config()
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/backend/replace_masked_values.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
from ...tensors.backend import switch, very_negative_like
class Max(MaskedLayer):
"""
This ``Layer`` performs a max over some dimension. Keras has a similar layer called
``GlobalMaxPooling1D``, but it is not as configurable as this one, and it does not support
masking.
If the mask is not ``None``, it must be the same shape as the input.
Input:
- A tensor of arbitrary shape (having at least 3 dimensions).
Output:
- A tensor with one less dimension, where we have taken a max over one of the dimensions.
"""
def __init__(self, axis: int=-1, **kwargs):
self.axis = axis
super(Max, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None:
return None
return K.any(mask, axis=self.axis)
@overrides
def compute_output_shape(self, input_shape):
axis = self.axis
if axis < 0:
axis += len(input_shape)
return input_shape[:axis] + input_shape[axis+1:]
@overrides
def call(self, inputs, mask=None):
if mask is not None:
inputs = switch(mask, inputs, very_negative_like(inputs))
return K.max(inputs, axis=self.axis)
@overrides
def get_config(self):
config = {'axis': self.axis}
base_config = super(Max, self).get_config()
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/backend/max.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class ExpandFromBatch(MaskedLayer):
"""
Reshapes a collapsed tensor, taking the batch size and separating it into ``num_to_expand``
dimensions, following the shape of a second input tensor. This is meant to be used in
conjunction with :class:`~deep_qa.layers.backend.collapse_to_batch.CollapseToBatch`, to achieve
the same effect as Keras' ``TimeDistributed`` layer, but for shapes that are not fully
specified at graph compilation time.
For example, say you had an original tensor of shape ``(None (2), 4, None (5), 3)``, then
collapsed it with ``CollapseToBatch(2)(tensor)`` to get a tensor with shape ``(None (40), 3)``
(here I'm using ``None (x)`` to denote a dimension with unknown length at graph compilation
time, where ``x`` is the actual runtime length). You can then call
``ExpandFromBatch(2)(collapsed, tensor)`` with the result to expand the first two dimensions
out of the batch again (presumably after you've done some computation when it was collapsed).
Inputs:
- a tensor that has been collapsed with ``CollapseToBatch(num_to_expand)``.
- the original tensor that was used as input to ``CollapseToBatch`` (or one with identical
shape in the collapsed dimensions). We will use this input only to get its shape.
Output:
- tensor with ``ndim = input_ndim + num_to_expand``, with the additional dimensions coming
immediately after the first (batch-size) dimension.
Parameters
----------
num_to_expand: int
The number of dimensions to expand from the batch size.
"""
def __init__(self, num_to_expand: int, **kwargs):
self.num_to_expand = num_to_expand
super(ExpandFromBatch, self).__init__(**kwargs)
@overrides
def call(self, inputs, mask=None):
return self.__reshape_tensors(inputs[0], inputs[1])
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None:
return None
if mask[0] is None or mask[1] is None:
return None
return self.__reshape_tensors(mask[0], mask[1])
@overrides
def compute_output_shape(self, input_shape):
collapsed_shape, original_shape = input_shape
return (None,) + original_shape[1:1 + self.num_to_expand] + collapsed_shape[1:]
@overrides
def get_config(self):
base_config = super(ExpandFromBatch, self).get_config()
config = {'num_to_expand': self.num_to_expand}
config.update(base_config)
return config
def __reshape_tensors(self, collapsed_tensor, original_tensor):
collapsed_shape = K.shape(original_tensor)[1:1 + self.num_to_expand]
remaining_shape = K.shape(collapsed_tensor)[1:]
new_shape = K.concatenate([[-1], collapsed_shape, remaining_shape], 0)
return K.reshape(collapsed_tensor, new_shape)
| deep_qa-master | deep_qa/layers/backend/expand_from_batch.py |
from typing import Tuple
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class Permute(MaskedLayer):
"""
This ``Layer`` calls ``K.permute_dimensions`` on both the input and the mask.
If the mask is not ``None``, it must have the same shape as the input.
Input:
- A tensor of arbitrary shape.
Output:
- A tensor with permuted dimensions.
"""
def __init__(self, pattern: Tuple[int], **kwargs):
self.pattern = pattern
super(Permute, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None:
return None
return K.permute_dimensions(mask, self.pattern)
@overrides
def compute_output_shape(self, input_shape):
return tuple([input_shape[i] for i in self.pattern])
@overrides
def call(self, inputs, mask=None):
return K.permute_dimensions(inputs, pattern=self.pattern)
| deep_qa-master | deep_qa/layers/backend/permute.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class Multiply(MaskedLayer):
"""
This ``Layer`` performs elementwise multiplication between two tensors, supporting masking. We
literally just call ``tensor_1 * tensor_2``; the only reason this is a ``Layer`` is so that we
can support masking (and because it's slightly nicer to read in a model definition than a
lambda layer).
We also try to be a little bit smart if you're wanting to broadcast the multiplication, by
having the tensors differ in the number of dimensions by one.
Input:
- tensor_1: a tensor of arbitrary shape, with an optional mask of the same shape
- tensor_2: a tensor with the same shape as ``tensor_1`` (or one less or one more
dimension), with an optional mask of the same shape
Output:
- ``tensor_1 * tensor_2``.
"""
def __init__(self, **kwargs):
super(Multiply, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
tensor_1, tensor_2 = inputs
tensor_1_mask, tensor_2_mask = mask
if tensor_1_mask is None:
tensor_1_mask = K.ones_like(tensor_1)
if tensor_2_mask is None:
tensor_2_mask = K.ones_like(tensor_2)
tensor_1_mask, tensor_2_mask = self.expand_dims_if_necessary(tensor_1_mask, tensor_2_mask)
return K.cast(tensor_1_mask, 'uint8') * K.cast(tensor_2_mask, 'uint8')
@overrides
def compute_output_shape(self, input_shape):
return input_shape[0]
@overrides
def call(self, inputs, mask=None):
tensor_1, tensor_2 = inputs
tensor_1, tensor_2 = self.expand_dims_if_necessary(tensor_1, tensor_2)
return tensor_1 * tensor_2
@staticmethod
def expand_dims_if_necessary(tensor_1, tensor_2):
tensor_1_ndim = K.ndim(tensor_1)
tensor_2_ndim = K.ndim(tensor_2)
if tensor_1_ndim == tensor_2_ndim:
return tensor_1, tensor_2
elif tensor_1_ndim == tensor_2_ndim - 1:
return K.expand_dims(tensor_1), tensor_2
elif tensor_2_ndim == tensor_1_ndim - 1:
return tensor_1, K.expand_dims(tensor_2)
else:
raise RuntimeError("Can't multiply two tensors with ndims "
"{} and {}".format(tensor_1_ndim, tensor_2_ndim))
| deep_qa-master | deep_qa/layers/backend/multiply.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class AddMask(MaskedLayer):
"""
This ``Layer`` adds a mask to a tensor. It is intended solely for testing, though if you have
a use case for this outside of testing, feel free to use it. The ``call()`` method just
returns the inputs, and the ``compute_mask`` method calls ``K.not_equal(inputs, mask_value)``,
and that's it. This is different from Keras' ``Masking`` layer, which assumes higher-order
input and does a ``K.any()`` call in ``compute_mask``.
Input:
- tensor: a tensor of arbitrary shape
Output:
- the same tensor, now with a mask attached of the same shape
Parameters
----------
mask_value: float, optional (default=0.0)
This is the value that we will compare to in ``compute_mask``.
"""
def __init__(self, mask_value: float=0.0, **kwargs):
self.mask_value = mask_value
super(AddMask, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
return K.cast(K.not_equal(inputs, self.mask_value), 'bool')
@overrides
def compute_output_shape(self, input_shape):
return input_shape
@overrides
def call(self, inputs, mask=None):
# It turns out that Keras doesn't like it if you just return inputs, so we need to return a
# different tensor object. Just doing a cast apparently doesn't work, either, so we'll
# add 0.
return inputs + 0.0
@overrides
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(AddMask, self).get_config()
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/backend/add_mask.py |
import keras.backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class BatchDot(MaskedLayer):
"""
This ``Layer`` calls ``K.batch_dot()`` on two inputs ``tensor_a`` and ``tensor_b``.
This function will work for tensors of arbitrary size as long as
``abs(K.ndim(tensor_a) - K.ndim(tensor_b)) < 1``, due to limitations in ``K.batch_dot()``.
When the input tensors have more than three dimensions, they must have the same shape, except
for the last two dimensions. See the examples for more explanation of what this means.
We always assume the dimension to perform the dot is the last one, and that
the masks have one fewer dimension that the tensors. Note that this layer
does not return zeroes in places that are masked, but does pass a correct
mask forward. If this then gets fed into ``masked_softmax``, for instance,
your tensor will be correctly normalized. We always assume the dimension to
perform the dot is the last one, and that the masks have one fewer
dimension than the tensors.
Inputs:
- tensor_a: tensor with ``ndim >= 2``.
- tensor_b: tensor with ``ndim >= 2``.
Output:
- a_dot_b
Examples
--------
The following examples will try to give some insight on how this layer works in relation
to ``K.batch_dot()``. Note that the Keras documentation (as of 2/13/17) on ``K.batch_dot``
is incorrect, and that this layer behaves differently from the documented behavior.
As a first example, let's suppose that ``tensor_a`` and ``tensor_b`` have the same
number of dimensions. Let the shape of ``tensor_a`` be ``(2, 3, 2)``, and let the shape
of ``tensor_b`` be ``(2, 4, 2)``. The mask accompanying these inputs always has one less
dimension, so the ``tensor_a_mask`` has shape ``(2, 3)`` and ``tensor_b_mask`` has
shape ``(2, 4)``. The shape of the ``batch_dot`` output would thus be ``(2, 3, 4)``. This is
because we are taking the batch dot of the last dimension, so the output shape is ``(2, 3)``
(from tensor_a) with ``(4)`` (from tensor_b) appended on (to get ``(2, 3, 4)`` in total). The
output mask has the same shape as the output, and is thus ``(2, 3, 4)`` as well.
>>> import keras.backend as K
>>> tensor_a = K.ones(shape=(2, 3, 2))
>>> tensor_b = K.ones(shape=(2, 4, 2))
>>> K.eval(K.batch_dot(tensor_a, tensor_b, axes=(2,2))).shape
(2, 3, 4)
Next, let's look at an example where ``tensor_a`` and ``tensor_b`` are "uneven" (different number
of dimensions). Let the shape of ``tensor_a`` be ``(2, 4, 2)``, and let the shape of ``tensor_b``
be ``(2, 4, 3, 2)``. The mask accompanying these inputs always has one less dimension, so the
``tensor_a_mask`` has shape ``(2, 4)`` and ``tensor_b_mask`` has shape ``(2, 4, 3)``. The shape
of the ``batch_dot`` output would thus be ``(2, 4, 3)``. In the case of uneven tensors, we always
expand the last dimension of the smaller tensor to make them even. Thus in this case, we expand
``tensor_a`` to get a new shape of ``(2, 4, 2, 1)``. Now we are taking the ``batch_dot`` of a
tensor with shape ``(2, 4, 2, 1)`` and ``(2, 4, 3, 2)``. Note that the first two dimensions of
this tensor are the same ``(2, 4)`` -- this is a requirement imposed by ``K.batch_dot``.
Following the methodology of calculating the output shape above, we get that the output is
``(2, 4, 1, 3)`` since we get ``(2, 4, 1)`` from ``tensor_a`` and ``(3)`` from ``tensor_b``. We
then squeeze the tensor to remove the 1-dimension to get a final shape of ``(2, 4, 3)``. Note
that the mask has the same shape.
>>> import keras.backend as K
>>> tensor_a = K.ones(shape=(2, 4, 2))
>>> tensor_b = K.ones(shape=(2, 4, 3, 2))
>>> tensor_a_expanded = K.expand_dims(tensor_a, axis=-1)
>>> unsqueezed_bd = K.batch_dot(tensor_a_expanded, tensor_b, axes=(2,3))
>>> final_bd = K.squeeze(unsqueezed_bd, axis=K.ndim(tensor_a)-1)
>>> K.eval(final_bd).shape
(2, 4, 3)
Lastly, let's look at the uneven case where ``tensor_a`` has more dimensions than ``tensor_b``.
Let the shape of ``tensor_a`` be ``(2, 3, 4, 2)``, and let the shape of ``tensor_b``
be ``(2, 3, 2)``. Since the mask accompanying these inputs always has one less dimension,
``tensor_a_mask`` has shape ``(2, 3, 4)`` and ``tensor_b_mask`` has shape ``(2, 3)``. The shape
of the ``batch_dot`` output would thus be ``(2, 3, 4)``. Since these tensors are uneven, expand
the smaller tensor, ``tensor_b``, to get a new shape of ``(2, 3, 2, 1)``. Now we are taking
the ``batch_dot`` of a tensor with shape ``(2, 3, 4, 2)`` and ``(2, 3, 2, 1)``. Note again that the
first two dimensions of this tensor are the same ``(2, 3)``. We can see that the output shape is
``(2, 3, 4, 1)`` since we get ``(2, 3, 4)`` from ``tensor_a`` and ``(1)`` from ``tensor_b``. We
then squeeze the tensor to remove the 1-dimension to get a final shape of ``(2, 3, 4)``. Note
that the mask has the same shape.
>>> import keras.backend as K
>>> tensor_a = K.ones(shape=(2, 3, 4, 2))
>>> tensor_b = K.ones(shape=(2, 3, 2))
>>> tensor_b_expanded = K.expand_dims(tensor_b, axis=-1)
>>> unsqueezed_bd = K.batch_dot(tensor_a, tensor_b_expanded, axes=(3, 2))
>>> final_bd = K.squeeze(unsqueezed_bd, axis=K.ndim(tensor_a)-1)
>>> K.eval(final_bd).shape
(2, 3, 4)
"""
def __init__(self, **kwargs):
super(BatchDot, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
tensor_a, tensor_b = inputs
mask_a, mask_b = mask
a_dot_axis = K.ndim(tensor_a) - 1
b_dot_axis = K.ndim(tensor_b) - 1
if mask_a is None and mask_b is None:
return None
elif mask_a is None:
mask_a = K.sum(K.ones_like(tensor_a), axis=-1)
elif mask_b is None:
# (batch_size, b_length)
mask_b = K.sum(K.ones_like(tensor_b), axis=-1)
float_mask_a = K.cast(mask_a, "float32")
float_mask_b = K.cast(mask_b, "float32")
if b_dot_axis == a_dot_axis:
# tensor_a and tensor_b have the same length.
float_mask_a = K.expand_dims(float_mask_a, axis=-1)
float_mask_b = K.expand_dims(float_mask_b, axis=-1)
final_mask = K.batch_dot(float_mask_a, float_mask_b,
axes=(a_dot_axis, b_dot_axis))
elif a_dot_axis < b_dot_axis:
# tensor_a has less dimensions than tensor_b.
# We would tile tensor_a to have the same shape as tensor_b,
# but we can just expand tensor_a and have TF broadcast
# over the last dimension
float_mask_a = K.expand_dims(float_mask_a, axis=-1)
final_mask = float_mask_a * float_mask_b
else:
# tensor_a has more dimensions than tensor_b.
# We would tile tensor_b to have the same shape as tensor_a,
# but we can just expand tensor_b and have TF broadcast
# over the last dimension
float_mask_b = K.expand_dims(float_mask_b, axis=-1)
final_mask = float_mask_a * float_mask_b
return final_mask
@overrides
def compute_output_shape(self, input_shape):
tensor_a_shape, tensor_b_shape = input_shape
a_dot_axis = len(tensor_a_shape) - 1
b_dot_axis = len(tensor_b_shape) - 1
if b_dot_axis < a_dot_axis:
tensor_b_shape += (1,)
if b_dot_axis > a_dot_axis:
tensor_a_shape += (1,)
# This assumes that we do the dot product over the last dimension
final_out_shape = []
for i in range(0, len(tensor_a_shape)):
if i != a_dot_axis:
final_out_shape.append(tensor_a_shape[i])
for i in range(len(tensor_b_shape)-2, len(tensor_b_shape)):
if i != b_dot_axis and i != 0:
final_out_shape.append(tensor_b_shape[i])
if b_dot_axis != a_dot_axis:
# remove the 1 we inserted
final_out_shape.pop(a_dot_axis)
if len(final_out_shape) == 1:
final_out_shape.append(1)
return tuple(final_out_shape)
@overrides
def call(self, inputs, mask=None):
tensor_a, tensor_b = inputs
a_dot_axis = K.ndim(tensor_a) - 1
b_dot_axis = K.ndim(tensor_b) - 1
if a_dot_axis > b_dot_axis:
tensor_b = K.expand_dims(tensor_b, axis=-1)
if a_dot_axis < b_dot_axis:
tensor_a = K.expand_dims(tensor_a, axis=-1)
a_dot_b = K.batch_dot(tensor_a, tensor_b, axes=(a_dot_axis, b_dot_axis))
if a_dot_axis != b_dot_axis:
a_dot_b = K.squeeze(a_dot_b, axis=a_dot_axis)
return a_dot_b
| deep_qa-master | deep_qa/layers/backend/batch_dot.py |
from .add_mask import AddMask
from .batch_dot import BatchDot
from .collapse_to_batch import CollapseToBatch
from .envelope import Envelope
from .expand_from_batch import ExpandFromBatch
from .max import Max
from .multiply import Multiply
from .permute import Permute
from .replace_masked_values import ReplaceMaskedValues
from .repeat import Repeat
from .repeat_like import RepeatLike
from .squeeze import Squeeze
| deep_qa-master | deep_qa/layers/backend/__init__.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class Repeat(MaskedLayer):
"""
This ``Layer`` calls ``K.repeat_elements`` on both the input and the mask, after calling
``K.expand_dims``.
If the mask is not ``None``, we must be able to call ``K.expand_dims`` using the same axis
parameter as we do for the input.
Input:
- A tensor of arbitrary shape.
Output:
- The input tensor repeated along one of the dimensions.
Parameters
----------
axis: int
We will add a dimension to the input tensor at this axis.
repetitions: int
The new dimension will have this size to it, with each slice being identical to the
original input tensor.
"""
def __init__(self, axis: int, repetitions: int, **kwargs):
self.axis = axis
self.repetitions = repetitions
super(Repeat, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None:
return None
return self.__repeat_tensor(mask)
@overrides
def compute_output_shape(self, input_shape):
return input_shape[:self.axis] + (self.repetitions,) + input_shape[self.axis:]
@overrides
def call(self, inputs, mask=None):
return self.__repeat_tensor(inputs)
def __repeat_tensor(self, tensor):
return K.repeat_elements(K.expand_dims(tensor, self.axis), self.repetitions, self.axis)
@overrides
def get_config(self):
base_config = super(Repeat, self).get_config()
config = {'axis': self.axis, 'repetitions': self.repetitions}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/backend/repeat.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class CollapseToBatch(MaskedLayer):
"""
Reshapes a higher order tensor, taking the first ``num_to_collapse`` dimensions after the batch
dimension and folding them into the batch dimension. For example, a tensor of shape (2, 4, 5,
3), collapsed with ``num_to_collapse = 2``, would become a tensor of shape (40, 3). We perform
identical computation on the input mask, if there is one.
This is essentially what Keras' ``TimeDistributed`` layer does (and then undoes) to apply a
layer to a higher-order tensor, and that's the intended use for this layer. However,
``TimeDistributed`` cannot handle distributing across dimensions with unknown lengths at graph
compilation time. This layer works even in that case. So, if your actual tensor shape at
graph compilation time looks like (None, None, None, 3), or (None, 4, None, 3), you can still
use this layer (and :class:`~deep_qa.layers.backend.expand_from_batch.ExpandFromBatch`) to get
the same result as ``TimeDistributed``. If your shapes are fully known at graph compilation
time, just use ``TimeDistributed``, as it's a nicer API for the same functionality.
Inputs:
- tensor with ``ndim >= 3``
Output:
- tensor with ``ndim = input_ndim - num_to_collapse``, with the removed dimensions folded
into the first (batch-size) dimension
Parameters
----------
num_to_collapse: int
The number of dimensions to fold into the batch size.
"""
def __init__(self, num_to_collapse: int, **kwargs):
self.num_to_collapse = num_to_collapse
super(CollapseToBatch, self).__init__(**kwargs)
@overrides
def call(self, inputs, mask=None):
return self.__collapse_tensor(inputs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None:
return None
return self.__collapse_tensor(mask)
@overrides
def compute_output_shape(self, input_shape):
return (None,) + input_shape[1 + self.num_to_collapse:]
@overrides
def get_config(self):
base_config = super(CollapseToBatch, self).get_config()
config = {'num_to_collapse': self.num_to_collapse}
config.update(base_config)
return config
def __collapse_tensor(self, tensor):
# If we were to call K.int_shape(inputs), we would get something back that has None in it
# (other than the batch dimension), because the shape is not fully known at graph
# compilation time. We can't do a reshape with more than one unknown dimension, which is
# why we're doing this whole layer in the first place instead of just using
# TimeDistributed. tf.reshape will let us pass in a tensor that has the shape, instead of
# just some ints. So we can use tf.shape(tensor) to get the actual runtime shape of the
# tensor _as a tensor_, which we then pass to tf.reshape().
new_shape = K.concatenate([[-1], K.shape(tensor)[1 + self.num_to_collapse:]], 0)
return K.reshape(tensor, new_shape)
| deep_qa-master | deep_qa/layers/backend/collapse_to_batch.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class Squeeze(MaskedLayer):
"""
This ``Layer`` removes a 1-D dimension from the tensor at index ``axis``, acting as simply
a layer version of the backend squeeze function.
If the mask is not ``None``, it must be the same shape as the input.
Input:
- A tensor of arbitrary shape (having at least 3 dimensions).
Output:
- A tensor with the same data as ``inputs`` but reduced dimensions.
Parameters
----------
axis: int, optional (default=-1)
The axis that we should squeeze.
"""
def __init__(self, axis: int=-1, **kwargs):
self.axis = axis
super(Squeeze, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None:
return None
return K.squeeze(mask, axis=self.axis)
@overrides
def compute_output_shape(self, input_shape):
axis = self.axis
if axis < 0:
axis += len(input_shape)
return input_shape[:axis] + input_shape[axis+1:]
@overrides
def call(self, inputs, mask=None):
return K.squeeze(inputs, axis=self.axis)
@overrides
def get_config(self):
base_config = super(Squeeze, self).get_config()
config = {'axis': self.axis}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/backend/squeeze.py |
from overrides import overrides
from keras import backend as K
from ..masked_layer import MaskedLayer
class Envelope(MaskedLayer):
"""
Given a probability distribution over a begin index and an end index of some sequence, this
``Layer`` computes an envelope over the sequence, a probability that each element lies within
"begin" and "end".
Specifically, the computation done here is the following::
after_span_begin = K.cumsum(span_begin, axis=-1)
after_span_end = K.cumsum(span_end, axis=-1)
before_span_end = 1 - after_span_end
envelope = after_span_begin * before_span_end
Inputs:
- span_begin: tensor with shape ``(batch_size, sequence_length)``, representing a
probability distribution over a start index in the sequence
- span_end: tensor with shape ``(batch_size, sequence_length)``, representing a probability
distribution over an end index in the sequence
Outputs:
- envelope: tensor with shape ``(batch_size, sequence_length)``, representing a probability
for each index of the sequence belonging in the span
If there is a mask associated with either of the inputs, we ignore it, assuming that you used
the mask correctly when you computed your probability distributions. But we support masking in
this layer, so that you have an output mask if you really need it. We just return the first
mask that is not ``None`` (or ``None``, if both are ``None``).
"""
def __init__(self, **kwargs):
super(Envelope, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
span_begin_mask, span_end_mask = mask
return span_begin_mask if span_begin_mask is not None else span_end_mask
@overrides
def compute_output_shape(self, input_shape):
span_begin_shape, _ = input_shape
return span_begin_shape
@overrides
def call(self, inputs, mask=None):
span_begin, span_end = inputs
after_span_begin = K.cumsum(span_begin, axis=-1)
after_span_end = K.cumsum(span_end, axis=-1)
before_span_end = 1.0 - after_span_end
return after_span_begin * before_span_end
| deep_qa-master | deep_qa/layers/backend/envelope.py |
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
class RepeatLike(MaskedLayer):
"""
This ``Layer`` is like :class:`~.repeat.Repeat`, but gets the number of repetitions to use from
a second input tensor. This allows doing a number of repetitions that is unknown at graph
compilation time, and is necessary when the ``repetitions`` argument to ``Repeat`` would be
``None``.
If the mask is not ``None``, we must be able to call ``K.expand_dims`` using the same axis
parameter as we do for the input.
Input:
- A tensor of arbitrary shape, which we will expand and tile.
- A second tensor whose shape along one dimension we will copy
Output:
- The input tensor repeated along one of the dimensions.
Parameters
----------
axis: int
We will add a dimension to the input tensor at this axis.
copy_from_axis: int
We will copy the dimension from the second tensor at this axis.
"""
def __init__(self, axis: int, copy_from_axis: int, **kwargs):
self.axis = axis
self.copy_from_axis = copy_from_axis
super(RepeatLike, self).__init__(**kwargs)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None or mask[0] is None:
return None
return self.__repeat_tensor(mask[0], inputs[1])
@overrides
def compute_output_shape(self, input_shape):
return input_shape[0][:self.axis] + (input_shape[1][self.copy_from_axis],) + input_shape[0][self.axis:]
@overrides
def call(self, inputs, mask=None):
return self.__repeat_tensor(inputs[0], inputs[1])
def __repeat_tensor(self, to_repeat, to_copy):
expanded = K.expand_dims(to_repeat, self.axis)
ones = [1] * K.ndim(expanded)
num_repetitions = K.shape(to_copy)[self.copy_from_axis]
tile_shape = K.concatenate([ones[:self.axis], [num_repetitions], ones[self.axis+1:]], 0)
return K.tile(expanded, tile_shape)
@overrides
def get_config(self):
base_config = super(RepeatLike, self).get_config()
config = {'axis': self.axis, 'copy_from_axis': self.copy_from_axis}
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/backend/repeat_like.py |
from collections import OrderedDict
from keras.layers import LSTM
from keras.layers.wrappers import Bidirectional
from keras.regularizers import l1_l2
from ...common.params import Params
from .bag_of_words import BOWEncoder
from .convolutional_encoder import CNNEncoder
from .positional_encoder import PositionalEncoder
from .shareable_gru import ShareableGRU as GRU
from .attentive_gru import AttentiveGru
def set_regularization_params(encoder_type: str, params: Params):
"""
This method takes regularization parameters that are specified in `params` and converts them
into Keras regularization objects, modifying `params` to contain the correct keys for the given
encoder_type.
Currently, we only allow specifying a consistent regularization across all the weights of a
layer.
"""
l2_regularization = params.pop("l2_regularization", None)
l1_regularization = params.pop("l1_regularization", None)
regularizer = lambda: l1_l2(l1=l1_regularization, l2=l2_regularization)
if encoder_type == 'cnn':
# Regularization with the CNN encoder is complicated, so we'll just pass in the L1 and L2
# values directly, and let the encoder deal with them.
params["l1_regularization"] = l1_regularization
params["l2_regularization"] = l2_regularization
elif encoder_type == 'lstm':
params["W_regularizer"] = regularizer()
params["U_regularizer"] = regularizer()
params["b_regularizer"] = regularizer()
elif encoder_type == 'tree_lstm':
params["W_regularizer"] = regularizer()
params["U_regularizer"] = regularizer()
params["V_regularizer"] = regularizer()
params["b_regularizer"] = regularizer()
return params
# The first item added here will be used as the default in some cases.
encoders = OrderedDict() # pylint: disable=invalid-name
encoders["bow"] = BOWEncoder
encoders["lstm"] = LSTM
encoders["gru"] = GRU
encoders["cnn"] = CNNEncoder
encoders["positional"] = PositionalEncoder
encoders["bi_gru"] = (lambda **params: Bidirectional(GRU(return_sequences=False,
**params)))
seq2seq_encoders = OrderedDict() # pylint: disable=invalid-name
seq2seq_encoders["bi_gru"] = (lambda **params:
Bidirectional(GRU(return_sequences=True,
**(params["encoder_params"])),
**(params["wrapper_params"])))
seq2seq_encoders["bi_lstm"] = (lambda **params:
Bidirectional(LSTM(return_sequences=True,
**(params["encoder_params"])),
**(params["wrapper_params"])))
| deep_qa-master | deep_qa/layers/encoders/__init__.py |
from typing import Tuple
from keras import backend as K
from keras.engine import InputSpec
from keras.layers import Convolution1D, Concatenate, Dense
from keras.regularizers import l1_l2
from overrides import overrides
from ..masked_layer import MaskedLayer
class CNNEncoder(MaskedLayer):
'''
CNNEncoder is a combination of multiple convolution layers and max pooling layers. This is
defined as a single layer to be consistent with the other encoders in terms of input and output
specifications. The input to this "layer" is of shape (batch_size, num_words, embedding_dim)
and the output is of size (batch_size, output_dim).
The CNN has one convolution layer per each ngram filter size. Each convolution operation gives
out a vector of size num_filters. The number of times a convolution layer will be used
depends on the ngram size: input_length - ngram_size + 1. The corresponding maxpooling layer
aggregates all these outputs from the convolution layer and outputs the max.
This operation is repeated for every ngram size passed, and consequently the dimensionality of
the output after maxpooling is len(ngram_filter_sizes) * num_filters.
We then use a fully connected layer to project in back to the desired output_dim. For more
details, refer to "A Sensitivity Analysis of (and Practitioners’ Guide to) Convolutional Neural
Networks for Sentence Classification", Zhang and Wallace 2016, particularly Figure 1.
Parameters
----------
units: int
After doing convolutions, we'll project the collected features into a vector of this size.
This used to be ``output_dim``, but Keras changed it to ``units``. I prefer the name
``output_dim``, so we'll leave the code using ``output_dim``, and just use the name
``units`` in the external API.
num_filters: int
This is the output dim for each convolutional layer, which is the same as the number of
"filters" learned by that layer.
ngram_filter_sizes: Tuple[int], optional (default=(2, 3, 4, 5))
This specifies both the number of convolutional layers we will create and their sizes. The
default of (2, 3, 4, 5) will have four convolutional layers, corresponding to encoding
ngrams of size 2 to 5 with some number of filters.
conv_layer_activation: str, optional (default='relu')
l1_regularization: float, optional (default=None)
l2_regularization: float, optional (default=None)
'''
def __init__(self,
units: int,
num_filters: int,
ngram_filter_sizes: Tuple[int]=(2, 3, 4, 5),
conv_layer_activation: str='relu',
l1_regularization: float=None,
l2_regularization: float=None,
**kwargs):
self.num_filters = num_filters
self.ngram_filter_sizes = ngram_filter_sizes
self.output_dim = units
self.conv_layer_activation = conv_layer_activation
self.l1_regularization = l1_regularization
self.l2_regularization = l2_regularization
self.regularizer = lambda: l1_l2(l1=self.l1_regularization, l2=self.l2_regularization)
# These are member variables that will be defined during self.build().
self.convolution_layers = None
self.max_pooling_layers = None
self.projection_layer = None
self.input_spec = [InputSpec(ndim=3)]
super(CNNEncoder, self).__init__(**kwargs)
@overrides
def build(self, input_shape):
# We define convolution, maxpooling and dense layers first.
self.convolution_layers = [Convolution1D(filters=self.num_filters,
kernel_size=ngram_size,
activation=self.conv_layer_activation,
kernel_regularizer=self.regularizer(),
bias_regularizer=self.regularizer())
for ngram_size in self.ngram_filter_sizes]
self.projection_layer = Dense(self.output_dim)
# Building all layers because these sub-layers are not explitly part of the computatonal graph.
for convolution_layer in self.convolution_layers:
with K.name_scope(convolution_layer.name):
convolution_layer.build(input_shape)
maxpool_output_dim = self.num_filters * len(self.ngram_filter_sizes)
projection_input_shape = (input_shape[0], maxpool_output_dim)
with K.name_scope(self.projection_layer.name):
self.projection_layer.build(projection_input_shape)
# Defining the weights of this "layer" as the set of weights from all convolution
# and maxpooling layers.
self.trainable_weights = []
for layer in self.convolution_layers + [self.projection_layer]:
self.trainable_weights.extend(layer.trainable_weights)
super(CNNEncoder, self).build(input_shape)
@overrides
def call(self, inputs, mask=None): # pylint: disable=unused-argument
# Each convolution layer returns output of size (batch_size, pool_length, num_filters),
# where `pool_length = num_words - ngram_size + 1`. We then do max pooling over each
# filter for the whole input sequence, just using K.max, giving a resultant tensor of shape
# (batch_size, num_filters), which then gets projected using the projection layer.
# TODO(matt): we need to use a convolutional layer here that supports masking.
filter_outputs = [K.max(convolution_layer.call(inputs), axis=1)
for convolution_layer in self.convolution_layers]
maxpool_output = Concatenate()(filter_outputs) if len(filter_outputs) > 1 else filter_outputs[0]
return self.projection_layer.call(maxpool_output)
@overrides
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
@overrides
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
# By default Keras propagates the mask from a layer that supports masking. We don't need it
# anymore. So eliminating it from the flow.
return None
@overrides
def get_config(self):
config = {"units": self.output_dim,
"num_filters": self.num_filters,
"ngram_filter_sizes": self.ngram_filter_sizes,
"conv_layer_activation": self.conv_layer_activation,
"l1_regularization": self.l1_regularization,
"l2_regularization": self.l2_regularization,
}
base_config = super(CNNEncoder, self).get_config()
config.update(base_config)
return config
| deep_qa-master | deep_qa/layers/encoders/convolutional_encoder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.