python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import logging
from typing import List
import sqlite3
import multiprocessing
from multiprocessing import Process
from allennlp.common.file_utils import cached_path
logger = logging.getLogger(__name__)
MULTIPROCESSING_LOGGER = multiprocessing.get_logger()
class SqlExecutor:
"""
This class evaluates SQL queries by connecting to a SQLite database. Because SQLite is disk-based
we just need to provide one file with the location. We execute the predicted SQL query and the labeled
queries against the database and check if they execute to the same table.
"""
def __init__(self, database_file: str) -> None:
# Initialize a cursor to our sqlite database, so we can execute SQL queries for denotation accuracy.
self._database_file = cached_path(database_file)
def evaluate_sql_query(self, predicted_sql_query: str, sql_query_labels: List[str]) -> int:
# We set the logging level for the subprocesses to warning, otherwise, it will
# log every time a process starts and stops.
MULTIPROCESSING_LOGGER.setLevel(logging.WARNING)
# Since the query might hang, we run in another process and kill it if it
# takes too long.
process = Process(
target=self._evaluate_sql_query_subprocess,
args=(self._database_file, predicted_sql_query, sql_query_labels),
)
process.start()
# If the query has not finished in 3 seconds then we will proceed.
process.join(10)
denotation_correct = process.exitcode # type: ignore
if process.is_alive():
logger.warning("Evaluating query took over 10 seconds, skipping query")
process.terminate()
process.join()
if denotation_correct is None:
denotation_correct = 0
return denotation_correct
@staticmethod
def _evaluate_sql_query_subprocess(
database_file: str, predicted_query: str, sql_query_labels: List[str]
) -> None:
"""
We evaluate here whether the predicted query and the query label evaluate to the
exact same table. This method is only called by the subprocess, so we just exit with
1 if it is correct and 0 otherwise.
"""
connection = sqlite3.connect(database_file)
cursor = connection.cursor()
postprocessed_predicted_query = SqlExecutor.postprocess_query_sqlite(predicted_query)
try:
cursor.execute(postprocessed_predicted_query)
predicted_rows = cursor.fetchall()
except sqlite3.Error as error:
logger.warning(f"Error executing predicted: {error}")
exit(0)
# If predicted table matches any of the reference tables then it is counted as correct.
target_rows = None
for sql_query_label in sql_query_labels:
postprocessed_sql_query_label = SqlExecutor.postprocess_query_sqlite(sql_query_label)
try:
cursor.execute(postprocessed_sql_query_label)
target_rows = cursor.fetchall()
except sqlite3.Error as error:
logger.warning(f"Error executing predicted: {error}")
if predicted_rows == target_rows:
exit(1)
exit(0)
@staticmethod
def postprocess_query_sqlite(query: str):
# The dialect of SQL that SQLite takes is not exactly the same as the labeled data.
# We strip off the parentheses that surround the entire query here.
query = query.strip()
if query.startswith("("):
return query[1 : query.rfind(")")] + ";"
return query
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/executors/sql_executor.py |
"""
Executors are classes that deterministically transform programs in domain specific languages
into denotations. We have one executor defined for each language-domain pair that we handle.
"""
from allennlp_semparse.parsimonious_languages.executors.sql_executor import SqlExecutor
| allennlp-semparse-master | allennlp_semparse/parsimonious_languages/executors/__init__.py |
import torch
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register("wikitables-parser")
class WikiTablesParserPredictor(Predictor):
"""
Wrapper for the
:class:`~allennlp.models.encoder_decoders.wikitables_semantic_parser.WikiTablesSemanticParser`
model.
"""
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like ``{"question": "...", "table": "..."}``.
"""
question_text = json_dict["question"]
table_rows = json_dict["table"].split("\n")
# We are directly passing the raw table rows here. The code in ``TableQuestionContext`` will do some
# minimal processing to extract dates and numbers from the cells.
instance = self._dataset_reader.text_to_instance(
question_text, # type: ignore
table_rows,
)
return instance
def predict_json(self, inputs: JsonDict) -> JsonDict:
"""
We need to override this because of the interactive beam search aspects.
"""
instance = self._json_to_instance(inputs)
# Get the rules out of the instance
index_to_rule = [
production_rule_field.rule
for production_rule_field in instance.fields["actions"].field_list
]
rule_to_index = {rule: i for i, rule in enumerate(index_to_rule)}
# A sequence of strings to force, then convert them to ints
initial_tokens = inputs.get("initial_sequence", [])
# Want to get initial_sequence on the same device as the model.
initial_sequence = torch.tensor(
[rule_to_index[token] for token in initial_tokens],
device=next(self._model.parameters()).device,
)
# Replace beam search with one that forces the initial sequence
original_beam_search = self._model._beam_search
interactive_beam_search = original_beam_search.constrained_to(initial_sequence)
self._model._beam_search = interactive_beam_search
# Now get results
results = self.predict_instance(instance)
# And add in the choices. Need to convert from idxs to rules.
results["choices"] = [
[
(probability, action)
for probability, action in zip(pa["action_probabilities"], pa["considered_actions"])
]
for pa in results["predicted_actions"]
]
results["beam_snapshots"] = {
# For each batch_index, we get a list of beam snapshots
batch_index: [
# Each beam_snapshots consists of a list of timesteps,
# each of which is a list of pairs (score, sequence).
# The sequence is the *indices* of the rules, which we
# want to convert to the string representations.
[
(score, [index_to_rule[idx] for idx in sequence])
for score, sequence in timestep_snapshot
]
for timestep_snapshot in beam_snapshots
]
for batch_index, beam_snapshots in interactive_beam_search.beam_snapshots.items()
}
# Restore original beam search
self._model._beam_search = original_beam_search
return results
| allennlp-semparse-master | allennlp_semparse/predictors/wikitables_parser.py |
from allennlp_semparse.predictors.atis_parser import AtisParserPredictor
from allennlp_semparse.predictors.nlvr_parser import NlvrParserPredictor
from allennlp_semparse.predictors.wikitables_parser import WikiTablesParserPredictor
| allennlp-semparse-master | allennlp_semparse/predictors/__init__.py |
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register("atis-parser")
class AtisParserPredictor(Predictor):
"""
Predictor for the :class:`~allennlp_semparse.models.atis.AtisSemanticParser` model.
"""
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like ``{"utterance": "..."}``.
"""
utterance = json_dict["utterance"]
return self._dataset_reader.text_to_instance([utterance])
| allennlp-semparse-master | allennlp_semparse/predictors/atis_parser.py |
import json
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register("nlvr-parser")
class NlvrParserPredictor(Predictor):
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
if "worlds" in json_dict:
# This is grouped data
worlds = json_dict["worlds"]
if isinstance(worlds, str):
worlds = json.loads(worlds)
else:
structured_rep = json_dict["structured_rep"]
if isinstance(structured_rep, str):
structured_rep = json.loads(structured_rep)
worlds = [structured_rep]
identifier = json_dict["identifier"] if "identifier" in json_dict else None
instance = self._dataset_reader.text_to_instance(
sentence=sentence, # type: ignore
structured_representations=worlds,
identifier=identifier,
)
return instance
def dump_line(self, outputs: JsonDict) -> str:
if "identifier" in outputs:
# Returning CSV lines for official evaluation
identifier = outputs["identifier"]
denotation = outputs["denotations"][0][0]
return f"{identifier},{denotation}\n"
else:
return json.dumps(outputs) + "\n"
| allennlp-semparse-master | allennlp_semparse/predictors/nlvr_parser.py |
from allennlp_semparse.models.atis.atis_semantic_parser import AtisSemanticParser
from allennlp_semparse.models.nlvr.nlvr_coverage_semantic_parser import NlvrCoverageSemanticParser
from allennlp_semparse.models.nlvr.nlvr_direct_semantic_parser import NlvrDirectSemanticParser
from allennlp_semparse.models.text2sql_parser import Text2SqlParser
from allennlp_semparse.models.wikitables.wikitables_erm_semantic_parser import (
WikiTablesErmSemanticParser,
)
from allennlp_semparse.models.wikitables.wikitables_mml_semantic_parser import (
WikiTablesMmlSemanticParser,
)
| allennlp-semparse-master | allennlp_semparse/models/__init__.py |
import logging
from typing import Any, Dict, List, Tuple, Optional
from collections import defaultdict
import difflib
import sqlparse
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Attention, Seq2SeqEncoder, TextFieldEmbedder, Embedding
from allennlp.nn import util
from allennlp.nn.initializers import InitializerApplicator
from allennlp.nn.regularizers import RegularizerApplicator
from allennlp.training.metrics import Average
from allennlp_semparse.fields.production_rule_field import ProductionRule
from allennlp_semparse.parsimonious_languages.contexts.sql_context_utils import (
action_sequence_to_sql,
)
from allennlp_semparse.state_machines import BeamSearch
from allennlp_semparse.state_machines.states import GrammarBasedState
from allennlp_semparse.state_machines.states import GrammarStatelet, RnnStatelet
from allennlp_semparse.state_machines.trainers import MaximumMarginalLikelihood
from allennlp_semparse.state_machines.transition_functions import BasicTransitionFunction
logger = logging.getLogger(__name__)
@Model.register("text2sql_parser")
class Text2SqlParser(Model):
"""
Parameters
----------
vocab : ``Vocabulary``
utterance_embedder : ``TextFieldEmbedder``
Embedder for utterances.
action_embedding_dim : ``int``
Dimension to use for action embeddings.
encoder : ``Seq2SeqEncoder``
The encoder to use for the input utterance.
decoder_beam_search : ``BeamSearch``
Beam search used to retrieve best sequences after training.
max_decoding_steps : ``int``
When we're decoding with a beam search, what's the maximum number of steps we should take?
This only applies at evaluation time, not during training.
input_attention: ``Attention``
We compute an attention over the input utterance at each step of the decoder, using the
decoder hidden state as the query. Passed to the transition function.
add_action_bias : ``bool``, optional (default=True)
If ``True``, we will learn a bias weight for each action that gets used when predicting
that action, in addition to its embedding.
dropout : ``float``, optional (default=0)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
"""
def __init__(
self,
vocab: Vocabulary,
utterance_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
decoder_beam_search: BeamSearch,
max_decoding_steps: int,
input_attention: Attention,
add_action_bias: bool = True,
dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super().__init__(vocab, regularizer)
self._utterance_embedder = utterance_embedder
self._encoder = encoder
self._max_decoding_steps = max_decoding_steps
self._add_action_bias = add_action_bias
self._dropout = torch.nn.Dropout(p=dropout)
self._exact_match = Average()
self._valid_sql_query = Average()
self._action_similarity = Average()
self._denotation_accuracy = Average()
# the padding value used by IndexField
self._action_padding_index = -1
num_actions = vocab.get_vocab_size("rule_labels")
input_action_dim = action_embedding_dim
if self._add_action_bias:
input_action_dim += 1
self._action_embedder = Embedding(
num_embeddings=num_actions, embedding_dim=input_action_dim
)
self._output_action_embedder = Embedding(
num_embeddings=num_actions, embedding_dim=action_embedding_dim
)
# This is what we pass as input in the first step of decoding, when we don't have a
# previous action, or a previous utterance attention.
self._first_action_embedding = torch.nn.Parameter(torch.FloatTensor(action_embedding_dim))
self._first_attended_utterance = torch.nn.Parameter(
torch.FloatTensor(encoder.get_output_dim())
)
torch.nn.init.normal_(self._first_action_embedding)
torch.nn.init.normal_(self._first_attended_utterance)
self._beam_search = decoder_beam_search
self._decoder_trainer = MaximumMarginalLikelihood(beam_size=1)
self._transition_function = BasicTransitionFunction(
encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
add_action_bias=self._add_action_bias,
dropout=dropout,
)
initializer(self)
def forward(
self, # type: ignore
tokens: Dict[str, torch.LongTensor],
valid_actions: List[List[ProductionRule]],
action_sequence: torch.LongTensor = None,
) -> Dict[str, torch.Tensor]:
"""
We set up the initial state for the decoder, and pass that state off to either a DecoderTrainer,
if we're training, or a BeamSearch for inference, if we're not.
Parameters
----------
tokens : Dict[str, torch.LongTensor]
The output of ``TextField.as_array()`` applied on the tokens ``TextField``. This will
be passed through a ``TextFieldEmbedder`` and then through an encoder.
valid_actions : ``List[List[ProductionRule]]``
A list of all possible actions for each ``World`` in the batch, indexed into a
``ProductionRule`` using a ``ProductionRuleField``. We will embed all of these
and use the embeddings to determine which action to take at each timestep in the
decoder.
action_sequence : torch.Tensor, optional (default=None)
The action sequence for the correct action sequence, where each action is an index into the list
of possible actions. This tensor has shape ``(batch_size, sequence_length, 1)``. We remove the
trailing dimension.
"""
embedded_utterance = self._utterance_embedder(tokens)
mask = util.get_text_field_mask(tokens)
batch_size = embedded_utterance.size(0)
# (batch_size, num_tokens, encoder_output_dim)
encoder_outputs = self._dropout(self._encoder(embedded_utterance, mask))
initial_state = self._get_initial_state(encoder_outputs, mask, valid_actions)
if action_sequence is not None:
# Remove the trailing dimension (from ListField[ListField[IndexField]]).
action_sequence = action_sequence.squeeze(-1)
target_mask = action_sequence != self._action_padding_index
else:
target_mask = None
outputs: Dict[str, Any] = {}
if action_sequence is not None:
# target_action_sequence is of shape (batch_size, 1, target_sequence_length)
# here after we unsqueeze it for the MML trainer.
loss_output = self._decoder_trainer.decode(
initial_state,
self._transition_function,
(action_sequence.unsqueeze(1), target_mask.unsqueeze(1)),
)
outputs.update(loss_output)
if not self.training:
action_mapping = []
for batch_actions in valid_actions:
batch_action_mapping = {}
for action_index, action in enumerate(batch_actions):
batch_action_mapping[action_index] = action[0]
action_mapping.append(batch_action_mapping)
outputs["action_mapping"] = action_mapping
# This tells the state to start keeping track of debug info, which we'll pass along in
# our output dictionary.
initial_state.debug_info = [[] for _ in range(batch_size)]
best_final_states = self._beam_search.search(
self._max_decoding_steps,
initial_state,
self._transition_function,
keep_final_unfinished_states=True,
)
outputs["best_action_sequence"] = []
outputs["debug_info"] = []
outputs["predicted_sql_query"] = []
outputs["sql_queries"] = []
for i in range(batch_size):
# Decoding may not have terminated with any completed valid SQL queries, if `num_steps`
# isn't long enough (or if the model is not trained enough and gets into an
# infinite action loop).
if i not in best_final_states:
self._exact_match(0)
self._denotation_accuracy(0)
self._valid_sql_query(0)
self._action_similarity(0)
outputs["predicted_sql_query"].append("")
continue
best_action_indices = best_final_states[i][0].action_history[0]
action_strings = [
action_mapping[i][action_index] for action_index in best_action_indices
]
predicted_sql_query = action_sequence_to_sql(action_strings)
if action_sequence is not None:
# Use a Tensor, not a Variable, to avoid a memory leak.
targets = action_sequence[i].data
sequence_in_targets = 0
sequence_in_targets = self._action_history_match(best_action_indices, targets)
self._exact_match(sequence_in_targets)
similarity = difflib.SequenceMatcher(None, best_action_indices, targets)
self._action_similarity(similarity.ratio())
outputs["best_action_sequence"].append(action_strings)
outputs["predicted_sql_query"].append(
sqlparse.format(predicted_sql_query, reindent=True)
)
outputs["debug_info"].append(best_final_states[i][0].debug_info[0]) # type: ignore
return outputs
def _get_initial_state(
self, encoder_outputs: torch.Tensor, mask: torch.Tensor, actions: List[List[ProductionRule]]
) -> GrammarBasedState:
batch_size = encoder_outputs.size(0)
# This will be our initial hidden state and memory cell for the decoder LSTM.
final_encoder_output = util.get_final_encoder_states(
encoder_outputs, mask, self._encoder.is_bidirectional()
)
memory_cell = encoder_outputs.new_zeros(batch_size, self._encoder.get_output_dim())
initial_score = encoder_outputs.data.new_zeros(batch_size)
# To make grouping states together in the decoder easier, we convert the batch dimension in
# all of our tensors into an outer list. For instance, the encoder outputs have shape
# `(batch_size, utterance_length, encoder_output_dim)`. We need to convert this into a list
# of `batch_size` tensors, each of shape `(utterance_length, encoder_output_dim)`. Then we
# won't have to do any index selects, or anything, we'll just do some `torch.cat()`s.
initial_score_list = [initial_score[i] for i in range(batch_size)]
encoder_output_list = [encoder_outputs[i] for i in range(batch_size)]
utterance_mask_list = [mask[i] for i in range(batch_size)]
initial_rnn_state = []
for i in range(batch_size):
initial_rnn_state.append(
RnnStatelet(
final_encoder_output[i],
memory_cell[i],
self._first_action_embedding,
self._first_attended_utterance,
encoder_output_list,
utterance_mask_list,
)
)
initial_grammar_state = [self._create_grammar_state(actions[i]) for i in range(batch_size)]
initial_state = GrammarBasedState(
batch_indices=list(range(batch_size)),
action_history=[[] for _ in range(batch_size)],
score=initial_score_list,
rnn_state=initial_rnn_state,
grammar_state=initial_grammar_state,
possible_actions=actions,
debug_info=None,
)
return initial_state
@staticmethod
def _action_history_match(predicted: List[int], targets: torch.LongTensor) -> int:
# TODO(mattg): this could probably be moved into a FullSequenceMatch metric, or something.
# Check if target is big enough to cover prediction (including start/end symbols)
if len(predicted) > targets.size(0):
return 0
predicted_tensor = targets.new_tensor(predicted)
targets_trimmed = targets[: len(predicted)]
# Return 1 if the predicted sequence is anywhere in the list of targets.
return predicted_tensor.equal(targets_trimmed)
@staticmethod
def is_nonterminal(token: str):
if token[0] == '"' and token[-1] == '"':
return False
return True
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
We track four metrics here:
1. exact_match, which is the percentage of the time that our best output action sequence
matches the SQL query exactly.
2. denotation_acc, which is the percentage of examples where we get the correct
denotation. This is the typical "accuracy" metric, and it is what you should usually
report in an experimental result. You need to be careful, though, that you're
computing this on the full data, and not just the subset that can be parsed. (make sure
you pass "keep_if_unparseable=True" to the dataset reader, which we do for validation data,
but not training data).
3. valid_sql_query, which is the percentage of time that decoding actually produces a
valid SQL query. We might not produce a valid SQL query if the decoder gets
into a repetitive loop, or we're trying to produce a super long SQL query and run
out of time steps, or something.
4. action_similarity, which is how similar the action sequence predicted is to the actual
action sequence. This is basically a soft measure of exact_match.
"""
validation_correct = self._exact_match._total_value
validation_total = self._exact_match._count
return {
"_exact_match_count": validation_correct,
"_example_count": validation_total,
"exact_match": self._exact_match.get_metric(reset),
"denotation_acc": self._denotation_accuracy.get_metric(reset),
"valid_sql_query": self._valid_sql_query.get_metric(reset),
"action_similarity": self._action_similarity.get_metric(reset),
}
def _create_grammar_state(self, possible_actions: List[ProductionRule]) -> GrammarStatelet:
"""
This method creates the GrammarStatelet object that's used for decoding. Part of creating
that is creating the `valid_actions` dictionary, which contains embedded representations of
all of the valid actions. So, we create that here as well.
The inputs to this method are for a `single instance in the batch`; none of the tensors we
create here are batched. We grab the global action ids from the input
``ProductionRules``, and we use those to embed the valid actions for every
non-terminal type. We use the input ``linking_scores`` for non-global actions.
Parameters
----------
possible_actions : ``List[ProductionRule]``
From the input to ``forward`` for a single batch instance.
"""
device = util.get_device_of(self._action_embedder.weight)
# TODO(Mark): This type is pure \(- . ^)/
translated_valid_actions: Dict[
str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]
] = {}
actions_grouped_by_nonterminal: Dict[str, List[Tuple[ProductionRule, int]]] = defaultdict(
list
)
for i, action in enumerate(possible_actions):
if action.rule == "":
continue
if action.is_global_rule:
actions_grouped_by_nonterminal[action.nonterminal].append((action, i))
else:
raise ValueError("The sql parser doesn't support non-global actions yet.")
for key, production_rule_arrays in actions_grouped_by_nonterminal.items():
translated_valid_actions[key] = {}
# `key` here is a non-terminal from the grammar, and `action_strings` are all the valid
# productions of that non-terminal. We'll first split those productions by global vs.
# linked action.
global_actions = []
for production_rule_array, action_index in production_rule_arrays:
global_actions.append((production_rule_array.rule_id, action_index))
if global_actions:
global_action_tensors, global_action_ids = zip(*global_actions)
global_action_tensor = torch.cat(global_action_tensors, dim=0).long()
if device >= 0:
global_action_tensor = global_action_tensor.to(device)
global_input_embeddings = self._action_embedder(global_action_tensor)
global_output_embeddings = self._output_action_embedder(global_action_tensor)
translated_valid_actions[key]["global"] = (
global_input_embeddings,
global_output_embeddings,
list(global_action_ids),
)
return GrammarStatelet(
["statement"], translated_valid_actions, self.is_nonterminal, reverse_productions=True
)
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. This is (confusingly) a separate notion from the "decoder"
in "encoder/decoder", where that decoder logic lives in ``TransitionFunction``.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_actions`` to the ``output_dict``.
"""
action_mapping = output_dict["action_mapping"]
best_actions = output_dict["best_action_sequence"]
debug_infos = output_dict["debug_info"]
batch_action_info = []
for batch_index, (predicted_actions, debug_info) in enumerate(
zip(best_actions, debug_infos)
):
instance_action_info = []
for predicted_action, action_debug_info in zip(predicted_actions, debug_info):
action_info = {}
action_info["predicted_action"] = predicted_action
considered_actions = action_debug_info["considered_actions"]
probabilities = action_debug_info["probabilities"]
actions = []
for action, probability in zip(considered_actions, probabilities):
if action != -1:
actions.append((action_mapping[batch_index][action], probability))
actions.sort()
considered_actions, probabilities = zip(*actions)
action_info["considered_actions"] = considered_actions
action_info["action_probabilities"] = probabilities
action_info["utterance_attention"] = action_debug_info.get("question_attention", [])
instance_action_info.append(action_info)
batch_action_info.append(instance_action_info)
output_dict["predicted_actions"] = batch_action_info
return output_dict
| allennlp-semparse-master | allennlp_semparse/models/text2sql_parser.py |
from typing import Any, Dict, List, Mapping, Sequence, Tuple
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.common.util import pad_sequence_to_length
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import (
Embedding,
Seq2SeqEncoder,
Seq2VecEncoder,
TextFieldEmbedder,
TimeDistributed,
)
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.nn import util
from allennlp.training.metrics import Average
from allennlp_semparse.common import ParsingError
from allennlp_semparse.domain_languages import WikiTablesLanguage, START_SYMBOL
from allennlp_semparse.domain_languages.domain_language import ExecutionError
from allennlp_semparse.fields.production_rule_field import ProductionRuleArray
from allennlp_semparse.state_machines.states import GrammarBasedState, GrammarStatelet, RnnStatelet
class WikiTablesSemanticParser(Model):
"""
A ``WikiTablesSemanticParser`` is a :class:`Model` which takes as input a table and a question,
and produces a logical form that answers the question when executed over the table. The
logical form is generated by a `type-constrained`, `transition-based` parser. This is an
abstract class that defines most of the functionality related to the transition-based parser. It
does not contain the implementation for actually training the parser. You may want to train it
using a learning-to-search algorithm, in which case you will want to use
``WikiTablesErmSemanticParser``, or if you have a set of approximate logical forms that give the
correct denotation, you will want to use ``WikiTablesMmlSemanticParser``.
Parameters
----------
vocab : ``Vocabulary``
question_embedder : ``TextFieldEmbedder``
Embedder for questions.
action_embedding_dim : ``int``
Dimension to use for action embeddings.
encoder : ``Seq2SeqEncoder``
The encoder to use for the input question.
entity_encoder : ``Seq2VecEncoder``
The encoder to used for averaging the words of an entity.
max_decoding_steps : ``int``
When we're decoding with a beam search, what's the maximum number of steps we should take?
This only applies at evaluation time, not during training.
add_action_bias : ``bool``, optional (default=True)
If ``True``, we will learn a bias weight for each action that gets used when predicting
that action, in addition to its embedding.
use_neighbor_similarity_for_linking : ``bool``, optional (default=False)
If ``True``, we will compute a max similarity between a question token and the `neighbors`
of an entity as a component of the linking scores. This is meant to capture the same kind
of information as the ``related_column`` feature.
dropout : ``float``, optional (default=0)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
num_linking_features : ``int``, optional (default=10)
We need to construct a parameter vector for the linking features, so we need to know how
many there are. The default of 8 here matches the default in the ``KnowledgeGraphField``,
which is to use all eight defined features. If this is 0, another term will be added to the
linking score. This term contains the maximum similarity value from the entity's neighbors
and the question.
rule_namespace : ``str``, optional (default=rule_labels)
The vocabulary namespace to use for production rules. The default corresponds to the
default used in the dataset reader, so you likely don't need to modify this.
"""
def __init__(
self,
vocab: Vocabulary,
question_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
entity_encoder: Seq2VecEncoder,
max_decoding_steps: int,
add_action_bias: bool = True,
use_neighbor_similarity_for_linking: bool = False,
dropout: float = 0.0,
num_linking_features: int = 10,
rule_namespace: str = "rule_labels",
) -> None:
super().__init__(vocab)
self._question_embedder = question_embedder
self._encoder = encoder
self._entity_encoder = TimeDistributed(entity_encoder)
self._max_decoding_steps = max_decoding_steps
self._add_action_bias = add_action_bias
self._use_neighbor_similarity_for_linking = use_neighbor_similarity_for_linking
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._rule_namespace = rule_namespace
self._denotation_accuracy = Average()
self._action_sequence_accuracy = Average()
self._has_logical_form = Average()
self._action_padding_index = -1 # the padding value used by IndexField
num_actions = vocab.get_vocab_size(self._rule_namespace)
if self._add_action_bias:
self._action_biases = Embedding(num_embeddings=num_actions, embedding_dim=1)
self._action_embedder = Embedding(
num_embeddings=num_actions, embedding_dim=action_embedding_dim
)
self._output_action_embedder = Embedding(
num_embeddings=num_actions, embedding_dim=action_embedding_dim
)
# This is what we pass as input in the first step of decoding, when we don't have a
# previous action, or a previous question attention.
self._first_action_embedding = torch.nn.Parameter(torch.FloatTensor(action_embedding_dim))
self._first_attended_question = torch.nn.Parameter(
torch.FloatTensor(encoder.get_output_dim())
)
torch.nn.init.normal_(self._first_action_embedding)
torch.nn.init.normal_(self._first_attended_question)
check_dimensions_match(
entity_encoder.get_output_dim(),
question_embedder.get_output_dim(),
"entity word average embedding dim",
"question embedding dim",
)
self._num_entity_types = 5 # TODO(mattg): get this in a more principled way somehow?
self._embedding_dim = question_embedder.get_output_dim()
self._entity_type_encoder_embedding = Embedding(
num_embeddings=self._num_entity_types, embedding_dim=self._embedding_dim
)
self._entity_type_decoder_embedding = Embedding(
num_embeddings=self._num_entity_types, embedding_dim=action_embedding_dim
)
self._neighbor_params = torch.nn.Linear(self._embedding_dim, self._embedding_dim)
if num_linking_features > 0:
self._linking_params = torch.nn.Linear(num_linking_features, 1)
else:
self._linking_params = None
if self._use_neighbor_similarity_for_linking:
self._question_entity_params = torch.nn.Linear(1, 1)
self._question_neighbor_params = torch.nn.Linear(1, 1)
else:
self._question_entity_params = None
self._question_neighbor_params = None
def _get_initial_rnn_and_grammar_state(
self,
question: Dict[str, torch.LongTensor],
table: Dict[str, torch.LongTensor],
world: List[WikiTablesLanguage],
actions: List[List[ProductionRuleArray]],
outputs: Dict[str, Any],
) -> Tuple[List[RnnStatelet], List[GrammarStatelet]]:
"""
Encodes the question and table, computes a linking between the two, and constructs an
initial RnnStatelet and GrammarStatelet for each batch instance to pass to the
decoder.
We take ``outputs`` as a parameter here and `modify` it, adding things that we want to
visualize in a demo.
"""
table_text = table["text"]
# (batch_size, question_length, embedding_dim)
embedded_question = self._question_embedder(question)
question_mask = util.get_text_field_mask(question)
# (batch_size, num_entities, num_entity_tokens, embedding_dim)
embedded_table = self._question_embedder(table_text, num_wrapping_dims=1)
table_mask = util.get_text_field_mask(table_text, num_wrapping_dims=1)
batch_size, num_entities, num_entity_tokens, _ = embedded_table.size()
num_question_tokens = embedded_question.size(1)
# (batch_size, num_entities, embedding_dim)
encoded_table = self._entity_encoder(embedded_table, table_mask)
# entity_types: tensor with shape (batch_size, num_entities), where each entry is the
# entity's type id.
# entity_type_dict: Dict[int, int], mapping flattened_entity_index -> type_index
# These encode the same information, but for efficiency reasons later it's nice
# to have one version as a tensor and one that's accessible on the cpu.
entity_types, entity_type_dict = self._get_type_vector(world, num_entities, encoded_table)
entity_type_embeddings = self._entity_type_encoder_embedding(entity_types)
# (batch_size, num_entities, num_neighbors) or None
neighbor_indices = self._get_neighbor_indices(world, num_entities, encoded_table)
if neighbor_indices is not None:
# Neighbor_indices is padded with -1 since 0 is a potential neighbor index.
# Thus, the absolute value needs to be taken in the index_select, and 1 needs to
# be added for the mask since that method expects 0 for padding.
# (batch_size, num_entities, num_neighbors, embedding_dim)
embedded_neighbors = util.batched_index_select(
encoded_table, torch.abs(neighbor_indices)
)
neighbor_mask = util.get_text_field_mask(
{"ignored": {"ignored": neighbor_indices + 1}}, num_wrapping_dims=1
).float()
# Encoder initialized to easily obtain a masked average.
neighbor_encoder = TimeDistributed(
BagOfEmbeddingsEncoder(self._embedding_dim, averaged=True)
)
# (batch_size, num_entities, embedding_dim)
embedded_neighbors = neighbor_encoder(embedded_neighbors, neighbor_mask)
projected_neighbor_embeddings = self._neighbor_params(embedded_neighbors.float())
# (batch_size, num_entities, embedding_dim)
entity_embeddings = torch.tanh(entity_type_embeddings + projected_neighbor_embeddings)
else:
# (batch_size, num_entities, embedding_dim)
entity_embeddings = torch.tanh(entity_type_embeddings)
# Compute entity and question word similarity. We tried using cosine distance here, but
# because this similarity is the main mechanism that the model can use to push apart logit
# scores for certain actions (like "n -> 1" and "n -> -1"), this needs to have a larger
# output range than [-1, 1].
question_entity_similarity = torch.bmm(
embedded_table.view(batch_size, num_entities * num_entity_tokens, self._embedding_dim),
torch.transpose(embedded_question, 1, 2),
)
question_entity_similarity = question_entity_similarity.view(
batch_size, num_entities, num_entity_tokens, num_question_tokens
)
# (batch_size, num_entities, num_question_tokens)
question_entity_similarity_max_score, _ = torch.max(question_entity_similarity, 2)
# (batch_size, num_entities, num_question_tokens, num_features)
linking_features = table["linking"]
linking_scores = question_entity_similarity_max_score
if self._use_neighbor_similarity_for_linking:
# The linking score is computed as a linear projection of two terms. The first is the
# maximum similarity score over the entity's words and the question token. The second
# is the maximum similarity over the words in the entity's neighbors and the question
# token.
#
# The second term, projected_question_neighbor_similarity, is useful when a column
# needs to be selected. For example, the question token might have no similarity with
# the column name, but is similar with the cells in the column.
#
# Note that projected_question_neighbor_similarity is intended to capture the same
# information as the related_column feature.
#
# Also note that this block needs to be _before_ the `linking_params` block, because
# we're overwriting `linking_scores`, not adding to it.
# (batch_size, num_entities, num_neighbors, num_question_tokens)
question_neighbor_similarity = util.batched_index_select(
question_entity_similarity_max_score, torch.abs(neighbor_indices)
)
# (batch_size, num_entities, num_question_tokens)
question_neighbor_similarity_max_score, _ = torch.max(question_neighbor_similarity, 2)
projected_question_entity_similarity = self._question_entity_params(
question_entity_similarity_max_score.unsqueeze(-1)
).squeeze(-1)
projected_question_neighbor_similarity = self._question_neighbor_params(
question_neighbor_similarity_max_score.unsqueeze(-1)
).squeeze(-1)
linking_scores = (
projected_question_entity_similarity + projected_question_neighbor_similarity
)
feature_scores = None
if self._linking_params is not None:
feature_scores = self._linking_params(linking_features).squeeze(3)
linking_scores = linking_scores + feature_scores
# (batch_size, num_question_tokens, num_entities)
linking_probabilities = self._get_linking_probabilities(
world, linking_scores.transpose(1, 2), question_mask, entity_type_dict
)
# (batch_size, num_question_tokens, embedding_dim)
link_embedding = util.weighted_sum(entity_embeddings, linking_probabilities)
encoder_input = torch.cat([link_embedding, embedded_question], 2)
# (batch_size, question_length, encoder_output_dim)
encoder_outputs = self._dropout(self._encoder(encoder_input, question_mask))
# This will be our initial hidden state and memory cell for the decoder LSTM.
final_encoder_output = util.get_final_encoder_states(
encoder_outputs, question_mask, self._encoder.is_bidirectional()
)
memory_cell = encoder_outputs.new_zeros(batch_size, self._encoder.get_output_dim())
# To make grouping states together in the decoder easier, we convert the batch dimension in
# all of our tensors into an outer list. For instance, the encoder outputs have shape
# `(batch_size, question_length, encoder_output_dim)`. We need to convert this into a list
# of `batch_size` tensors, each of shape `(question_length, encoder_output_dim)`. Then we
# won't have to do any index selects, or anything, we'll just do some `torch.cat()`s.
encoder_output_list = [encoder_outputs[i] for i in range(batch_size)]
question_mask_list = [question_mask[i] for i in range(batch_size)]
initial_rnn_state = []
for i in range(batch_size):
initial_rnn_state.append(
RnnStatelet(
final_encoder_output[i],
memory_cell[i],
self._first_action_embedding,
self._first_attended_question,
encoder_output_list,
question_mask_list,
)
)
initial_grammar_state = [
self._create_grammar_state(world[i], actions[i], linking_scores[i], entity_types[i])
for i in range(batch_size)
]
if not self.training:
# We add a few things to the outputs that will be returned from `forward` at evaluation
# time, for visualization in a demo.
outputs["linking_scores"] = linking_scores
if feature_scores is not None:
outputs["feature_scores"] = feature_scores
outputs["similarity_scores"] = question_entity_similarity_max_score
return initial_rnn_state, initial_grammar_state
@staticmethod
def _get_neighbor_indices(
worlds: List[WikiTablesLanguage], num_entities: int, tensor: torch.Tensor
) -> torch.LongTensor:
"""
This method returns the indices of each entity's neighbors. A tensor
is accepted as a parameter for copying purposes.
Parameters
----------
worlds : ``List[WikiTablesLanguage]``
num_entities : ``int``
tensor : ``torch.Tensor``
Used for copying the constructed list onto the right device.
Returns
-------
A ``torch.LongTensor`` with shape ``(batch_size, num_entities, num_neighbors)``. It is padded
with -1 instead of 0, since 0 is a valid neighbor index. If all the entities in the batch
have no neighbors, None will be returned.
"""
num_neighbors = 0
for world in worlds:
for entity in world.table_graph.entities:
if len(world.table_graph.neighbors[entity]) > num_neighbors:
num_neighbors = len(world.table_graph.neighbors[entity])
batch_neighbors = []
no_entities_have_neighbors = True
for world in worlds:
# Each batch instance has its own world, which has a corresponding table.
entities = world.table_graph.entities
entity2index = {entity: i for i, entity in enumerate(entities)}
entity2neighbors = world.table_graph.neighbors
neighbor_indexes = []
for entity in entities:
entity_neighbors = [entity2index[n] for n in entity2neighbors[entity]]
if entity_neighbors:
no_entities_have_neighbors = False
# Pad with -1 instead of 0, since 0 represents a neighbor index.
padded = pad_sequence_to_length(entity_neighbors, num_neighbors, lambda: -1)
neighbor_indexes.append(padded)
neighbor_indexes = pad_sequence_to_length(
neighbor_indexes, num_entities, lambda: [-1] * num_neighbors
)
batch_neighbors.append(neighbor_indexes)
# It is possible that none of the entities has any neighbors, since our definition of the
# knowledge graph allows it when no entities or numbers were extracted from the question.
if no_entities_have_neighbors:
return None
return tensor.new_tensor(batch_neighbors, dtype=torch.long)
@staticmethod
def _get_type_vector(
worlds: List[WikiTablesLanguage], num_entities: int, tensor: torch.Tensor
) -> Tuple[torch.LongTensor, Dict[int, int]]:
"""
Produces a tensor with shape ``(batch_size, num_entities)`` that encodes each entity's
type. In addition, a map from a flattened entity index to type is returned to combine
entity type operations into one method.
Parameters
----------
worlds : ``List[WikiTablesLanguage]``
num_entities : ``int``
tensor : ``torch.Tensor``
Used for copying the constructed list onto the right device.
Returns
-------
A ``torch.LongTensor`` with shape ``(batch_size, num_entities)``.
entity_types : ``Dict[int, int]``
This is a mapping from ((batch_index * num_entities) + entity_index) to entity type id.
"""
entity_types = {}
batch_types = []
for batch_index, world in enumerate(worlds):
types = []
for entity_index, entity in enumerate(world.table_graph.entities):
# We need numbers to be first, then date columns, then number columns, strings, and
# string columns, in that order, because our entities are going to be sorted. We do
# a split by type and then a merge later, and it relies on this sorting.
if entity.startswith("date_column:"):
entity_type = 1
elif entity.startswith("number_column:"):
entity_type = 2
elif entity.startswith("string:"):
entity_type = 3
elif entity.startswith("string_column:"):
entity_type = 4
else:
entity_type = 0
types.append(entity_type)
# For easier lookups later, we're actually using a _flattened_ version
# of (batch_index, entity_index) for the key, because this is how the
# linking scores are stored.
flattened_entity_index = batch_index * num_entities + entity_index
entity_types[flattened_entity_index] = entity_type
padded = pad_sequence_to_length(types, num_entities, lambda: 0)
batch_types.append(padded)
return tensor.new_tensor(batch_types, dtype=torch.long), entity_types
def _get_linking_probabilities(
self,
worlds: List[WikiTablesLanguage],
linking_scores: torch.FloatTensor,
question_mask: torch.LongTensor,
entity_type_dict: Dict[int, int],
) -> torch.FloatTensor:
"""
Produces the probability of an entity given a question word and type. The logic below
separates the entities by type since the softmax normalization term sums over entities
of a single type.
Parameters
----------
worlds : ``List[WikiTablesLanguage]``
linking_scores : ``torch.FloatTensor``
Has shape (batch_size, num_question_tokens, num_entities).
question_mask: ``torch.LongTensor``
Has shape (batch_size, num_question_tokens).
entity_type_dict : ``Dict[int, int]``
This is a mapping from ((batch_index * num_entities) + entity_index) to entity type id.
Returns
-------
batch_probabilities : ``torch.FloatTensor``
Has shape ``(batch_size, num_question_tokens, num_entities)``.
Contains all the probabilities for an entity given a question word.
"""
_, num_question_tokens, num_entities = linking_scores.size()
batch_probabilities = []
for batch_index, world in enumerate(worlds):
all_probabilities = []
num_entities_in_instance = 0
# NOTE: The way that we're doing this here relies on the fact that entities are
# implicitly sorted by their types when we sort them by name, and that numbers come
# before "date_column:", followed by "number_column:", "string:", and "string_column:".
# This is not a great assumption, and could easily break later, but it should work for now.
for type_index in range(self._num_entity_types):
# This index of 0 is for the null entity for each type, representing the case where a
# word doesn't link to any entity.
entity_indices = [0]
entities = world.table_graph.entities
for entity_index, _ in enumerate(entities):
if entity_type_dict[batch_index * num_entities + entity_index] == type_index:
entity_indices.append(entity_index)
if len(entity_indices) == 1:
# No entities of this type; move along...
continue
# We're subtracting one here because of the null entity we added above.
num_entities_in_instance += len(entity_indices) - 1
# We separate the scores by type, since normalization is done per type. There's an
# extra "null" entity per type, also, so we have `num_entities_per_type + 1`. We're
# selecting from a (num_question_tokens, num_entities) linking tensor on _dimension 1_,
# so we get back something of shape (num_question_tokens,) for each index we're
# selecting. All of the selected indices together then make a tensor of shape
# (num_question_tokens, num_entities_per_type + 1).
indices = linking_scores.new_tensor(entity_indices, dtype=torch.long)
entity_scores = linking_scores[batch_index].index_select(1, indices)
# We used index 0 for the null entity, so this will actually have some values in it.
# But we want the null entity's score to be 0, so we set that here.
entity_scores[:, 0] = 0
# No need for a mask here, as this is done per batch instance, with no padding.
type_probabilities = torch.nn.functional.softmax(entity_scores, dim=1)
all_probabilities.append(type_probabilities[:, 1:])
# We need to add padding here if we don't have the right number of entities.
if num_entities_in_instance != num_entities:
zeros = linking_scores.new_zeros(
num_question_tokens, num_entities - num_entities_in_instance
)
all_probabilities.append(zeros)
# (num_question_tokens, num_entities)
probabilities = torch.cat(all_probabilities, dim=1)
batch_probabilities.append(probabilities)
batch_probabilities = torch.stack(batch_probabilities, dim=0)
return batch_probabilities * question_mask.unsqueeze(-1).float()
@staticmethod
def _action_history_match(predicted: List[int], targets: torch.LongTensor) -> int:
# TODO(mattg): this could probably be moved into a FullSequenceMatch metric, or something.
# Check if target is big enough to cover prediction (including start/end symbols)
if len(predicted) > targets.size(1):
return 0
predicted_tensor = targets.new_tensor(predicted)
targets_trimmed = targets[:, : len(predicted)]
# Return 1 if the predicted sequence is anywhere in the list of targets.
return torch.max(torch.min(targets_trimmed.eq(predicted_tensor), dim=1)[0]).item()
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
We track three metrics here:
1. lf_retrieval_acc, which is the percentage of the time that our best output action
sequence is in the set of action sequences provided by offline search. This is an
easy-to-compute lower bound on denotation accuracy for the set of examples where we
actually have offline output. We only score lf_retrieval_acc on that subset.
2. denotation_acc, which is the percentage of examples where we get the correct
denotation. This is the typical "accuracy" metric, and it is what you should usually
report in an experimental result. You need to be careful, though, that you're
computing this on the full data, and not just the subset that has DPD output (make sure
you pass "keep_if_no_dpd=True" to the dataset reader, which we do for validation data,
but not training data).
3. lf_percent, which is the percentage of time that decoding actually produces a
finished logical form. We might not produce a valid logical form if the decoder gets
into a repetitive loop, or we're trying to produce a super long logical form and run
out of time steps, or something.
"""
return {
"lf_retrieval_acc": self._action_sequence_accuracy.get_metric(reset),
"denotation_acc": self._denotation_accuracy.get_metric(reset),
"lf_percent": self._has_logical_form.get_metric(reset),
}
def _create_grammar_state(
self,
world: WikiTablesLanguage,
possible_actions: List[ProductionRuleArray],
linking_scores: torch.Tensor,
entity_types: torch.Tensor,
) -> GrammarStatelet:
"""
This method creates the GrammarStatelet object that's used for decoding. Part of
creating that is creating the `valid_actions` dictionary, which contains embedded
representations of all of the valid actions. So, we create that here as well.
The way we represent the valid expansions is a little complicated: we use a
dictionary of `action types`, where the key is the action type (like "global", "linked", or
whatever your model is expecting), and the value is a tuple representing all actions of
that type. The tuple is (input tensor, output tensor, action id). The input tensor has
the representation that is used when `selecting` actions, for all actions of this type.
The output tensor has the representation that is used when feeding the action to the next
step of the decoder (this could just be the same as the input tensor). The action ids are
a list of indices into the main action list for each batch instance.
The inputs to this method are for a `single instance in the batch`; none of the tensors we
create here are batched. We grab the global action ids from the input
``ProductionRuleArrays``, and we use those to embed the valid actions for every
non-terminal type. We use the input ``linking_scores`` for non-global actions.
Parameters
----------
world : ``WikiTablesLanguage``
From the input to ``forward`` for a single batch instance.
possible_actions : ``List[ProductionRuleArray]``
From the input to ``forward`` for a single batch instance.
linking_scores : ``torch.Tensor``
Assumed to have shape ``(num_entities, num_question_tokens)`` (i.e., there is no batch
dimension).
entity_types : ``torch.Tensor``
Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension).
"""
# TODO(mattg): Move the "valid_actions" construction to another method.
action_map = {}
for action_index, action in enumerate(possible_actions):
action_string = action[0]
action_map[action_string] = action_index
entity_map = {}
for entity_index, entity in enumerate(world.table_graph.entities):
entity_map[entity] = entity_index
valid_actions = world.get_nonterminal_productions()
translated_valid_actions: Dict[
str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]
] = {}
for key, action_strings in valid_actions.items():
translated_valid_actions[key] = {}
# `key` here is a non-terminal from the grammar, and `action_strings` are all the valid
# productions of that non-terminal. We'll first split those productions by global vs.
# linked action.
action_indices = [action_map[action_string] for action_string in action_strings]
production_rule_arrays = [(possible_actions[index], index) for index in action_indices]
global_actions = []
linked_actions = []
for production_rule_array, action_index in production_rule_arrays:
if production_rule_array[1]:
global_actions.append((production_rule_array[2], action_index))
else:
linked_actions.append((production_rule_array[0], action_index))
# Then we get the embedded representations of the global actions if any.
if global_actions:
global_action_tensors, global_action_ids = zip(*global_actions)
global_action_tensor = torch.cat(global_action_tensors, dim=0)
global_input_embeddings = self._action_embedder(global_action_tensor)
if self._add_action_bias:
global_action_biases = self._action_biases(global_action_tensor)
global_input_embeddings = torch.cat(
[global_input_embeddings, global_action_biases], dim=-1
)
global_output_embeddings = self._output_action_embedder(global_action_tensor)
translated_valid_actions[key]["global"] = (
global_input_embeddings,
global_output_embeddings,
list(global_action_ids),
)
# Then the representations of the linked actions.
if linked_actions:
linked_rules, linked_action_ids = zip(*linked_actions)
entities = [rule.split(" -> ")[1] for rule in linked_rules]
entity_ids = [entity_map[entity] for entity in entities]
# (num_linked_actions, num_question_tokens)
entity_linking_scores = linking_scores[entity_ids]
# (num_linked_actions,)
entity_type_tensor = entity_types[entity_ids]
# (num_linked_actions, entity_type_embedding_dim)
entity_type_embeddings = self._entity_type_decoder_embedding(entity_type_tensor)
translated_valid_actions[key]["linked"] = (
entity_linking_scores,
entity_type_embeddings,
list(linked_action_ids),
)
return GrammarStatelet([START_SYMBOL], translated_valid_actions, world.is_nonterminal)
def _compute_validation_outputs(
self,
actions: List[List[ProductionRuleArray]],
best_final_states: Mapping[int, Sequence[GrammarBasedState]],
world: List[WikiTablesLanguage],
target_list: List[List[str]],
metadata: List[Dict[str, Any]],
outputs: Dict[str, Any],
) -> None:
"""
Does common things for validation time: computing logical form accuracy (which is expensive
and unnecessary during training), adding visualization info to the output dictionary, etc.
This doesn't return anything; instead it `modifies` the given ``outputs`` dictionary, and
calls metrics on ``self``.
"""
batch_size = len(actions)
action_mapping = {}
for batch_index, batch_actions in enumerate(actions):
for action_index, action in enumerate(batch_actions):
action_mapping[(batch_index, action_index)] = action[0]
outputs["action_mapping"] = action_mapping
outputs["best_action_sequence"] = []
outputs["debug_info"] = []
outputs["entities"] = []
outputs["logical_form"] = []
outputs["answer"] = []
for i in range(batch_size):
# Decoding may not have terminated with any completed logical forms, if `num_steps`
# isn't long enough (or if the model is not trained enough and gets into an
# infinite action loop).
outputs["logical_form"].append([])
if i in best_final_states:
all_action_indices = [
best_final_states[i][j].action_history[0]
for j in range(len(best_final_states[i]))
]
found_denotation = False
for action_indices in all_action_indices:
action_strings = [
action_mapping[(i, action_index)] for action_index in action_indices
]
has_logical_form = False
try:
logical_form = world[i].action_sequence_to_logical_form(action_strings)
has_logical_form = True
except ParsingError:
logical_form = "Error producing logical form"
if target_list is not None:
denotation_correct = world[i].evaluate_logical_form(
logical_form, target_list[i]
)
else:
denotation_correct = False
if not found_denotation:
try:
denotation = world[i].execute(logical_form)
if denotation:
outputs["answer"].append(denotation)
found_denotation = True
except ExecutionError:
pass
if found_denotation:
if has_logical_form:
self._has_logical_form(1.0)
else:
self._has_logical_form(0.0)
if target_list:
self._denotation_accuracy(1.0 if denotation_correct else 0.0)
outputs["best_action_sequence"].append(action_strings)
outputs["logical_form"][-1].append(logical_form)
if not found_denotation:
outputs["answer"].append(None)
self._denotation_accuracy(0.0)
outputs["debug_info"].append(best_final_states[i][0].debug_info[0]) # type: ignore
outputs["entities"].append(world[i].table_graph.entities)
else:
self._has_logical_form(0.0)
self._denotation_accuracy(0.0)
if metadata is not None:
outputs["question_tokens"] = [x["question_tokens"] for x in metadata]
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. This is (confusingly) a separate notion from the "decoder"
in "encoder/decoder", where that decoder logic lives in the ``TransitionFunction``.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
"""
action_mapping = output_dict["action_mapping"]
best_actions = output_dict["best_action_sequence"]
debug_infos = output_dict["debug_info"]
batch_action_info = []
for batch_index, (predicted_actions, debug_info) in enumerate(
zip(best_actions, debug_infos)
):
instance_action_info = []
for predicted_action, action_debug_info in zip(predicted_actions, debug_info):
action_info = {}
action_info["predicted_action"] = predicted_action
considered_actions = action_debug_info["considered_actions"]
probabilities = action_debug_info["probabilities"]
actions = []
for action, probability in zip(considered_actions, probabilities):
if action != -1:
actions.append((action_mapping[(batch_index, action)], probability))
actions.sort()
considered_actions, probabilities = zip(*actions)
action_info["considered_actions"] = considered_actions
action_info["action_probabilities"] = probabilities
action_info["question_attention"] = action_debug_info.get("question_attention", [])
instance_action_info.append(action_info)
batch_action_info.append(instance_action_info)
output_dict["predicted_actions"] = batch_action_info
return output_dict
| allennlp-semparse-master | allennlp_semparse/models/wikitables/wikitables_semantic_parser.py |
allennlp-semparse-master | allennlp_semparse/models/wikitables/__init__.py |
|
from typing import Any, Dict, List
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import (
Attention,
FeedForward,
Seq2SeqEncoder,
Seq2VecEncoder,
TextFieldEmbedder,
)
from allennlp_semparse.domain_languages import WikiTablesLanguage
from allennlp_semparse.fields.production_rule_field import ProductionRuleArray
from allennlp_semparse.models.wikitables.wikitables_semantic_parser import WikiTablesSemanticParser
from allennlp_semparse.state_machines import BeamSearch
from allennlp_semparse.state_machines.states import GrammarBasedState
from allennlp_semparse.state_machines.trainers import MaximumMarginalLikelihood
from allennlp_semparse.state_machines.transition_functions import LinkingTransitionFunction
@Model.register("wikitables_mml_parser")
class WikiTablesMmlSemanticParser(WikiTablesSemanticParser):
"""
A ``WikiTablesMmlSemanticParser`` is a :class:`WikiTablesSemanticParser` which is trained to
maximize the marginal likelihood of an approximate set of logical forms which give the correct
denotation. This is a re-implementation of the model used for the paper `Neural Semantic Parsing with Type
Constraints for Semi-Structured Tables
<https://www.semanticscholar.org/paper/Neural-Semantic-Parsing-with-Type-Constraints-for-Krishnamurthy-Dasigi/8c6f58ed0ebf379858c0bbe02c53ee51b3eb398a>`_,
by Jayant Krishnamurthy, Pradeep Dasigi, and Matt Gardner (EMNLP 2017). The language used by
this model is different from LambdaDCS, the one in the paper above though. This model uses the
variable free language from ``allennlp_semparse.domain_languages.wikitables_language``.
Parameters
----------
vocab : ``Vocabulary``
question_embedder : ``TextFieldEmbedder``
Embedder for questions. Passed to super class.
action_embedding_dim : ``int``
Dimension to use for action embeddings. Passed to super class.
encoder : ``Seq2SeqEncoder``
The encoder to use for the input question. Passed to super class.
entity_encoder : ``Seq2VecEncoder``
The encoder to used for averaging the words of an entity. Passed to super class.
decoder_beam_search : ``BeamSearch``
When we're not training, this is how we will do decoding.
max_decoding_steps : ``int``
When we're decoding with a beam search, what's the maximum number of steps we should take?
This only applies at evaluation time, not during training. Passed to super class.
attention : ``Attention``
We compute an attention over the input question at each step of the decoder, using the
decoder hidden state as the query. Passed to the transition function.
mixture_feedforward : ``FeedForward``, optional (default=None)
If given, we'll use this to compute a mixture probability between global actions and linked
actions given the hidden state at every timestep of decoding, instead of concatenating the
logits for both (where the logits may not be compatible with each other). Passed to
the transition function.
add_action_bias : ``bool``, optional (default=True)
If ``True``, we will learn a bias weight for each action that gets used when predicting
that action, in addition to its embedding. Passed to super class.
training_beam_size : ``int``, optional (default=None)
If given, we will use a constrained beam search of this size during training, so that we
use only the top ``training_beam_size`` action sequences according to the model in the MML
computation. If this is ``None``, we will use all of the provided action sequences in the
MML computation.
use_neighbor_similarity_for_linking : ``bool``, optional (default=False)
If ``True``, we will compute a max similarity between a question token and the `neighbors`
of an entity as a component of the linking scores. This is meant to capture the same kind
of information as the ``related_column`` feature. Passed to super class.
dropout : ``float``, optional (default=0)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer). Passed to super class.
num_linking_features : ``int``, optional (default=10)
We need to construct a parameter vector for the linking features, so we need to know how
many there are. The default of 10 here matches the default in the ``KnowledgeGraphField``,
which is to use all ten defined features. If this is 0, another term will be added to the
linking score. This term contains the maximum similarity value from the entity's neighbors
and the question. Passed to super class.
rule_namespace : ``str``, optional (default=rule_labels)
The vocabulary namespace to use for production rules. The default corresponds to the
default used in the dataset reader, so you likely don't need to modify this. Passed to super
class.
"""
def __init__(
self,
vocab: Vocabulary,
question_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
entity_encoder: Seq2VecEncoder,
decoder_beam_search: BeamSearch,
max_decoding_steps: int,
attention: Attention,
mixture_feedforward: FeedForward = None,
add_action_bias: bool = True,
training_beam_size: int = None,
use_neighbor_similarity_for_linking: bool = False,
dropout: float = 0.0,
num_linking_features: int = 10,
rule_namespace: str = "rule_labels",
) -> None:
use_similarity = use_neighbor_similarity_for_linking
super().__init__(
vocab=vocab,
question_embedder=question_embedder,
action_embedding_dim=action_embedding_dim,
encoder=encoder,
entity_encoder=entity_encoder,
max_decoding_steps=max_decoding_steps,
add_action_bias=add_action_bias,
use_neighbor_similarity_for_linking=use_similarity,
dropout=dropout,
num_linking_features=num_linking_features,
rule_namespace=rule_namespace,
)
self._beam_search = decoder_beam_search
self._decoder_trainer = MaximumMarginalLikelihood(training_beam_size)
self._decoder_step = LinkingTransitionFunction(
encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
input_attention=attention,
add_action_bias=self._add_action_bias,
mixture_feedforward=mixture_feedforward,
dropout=dropout,
)
def forward(
self, # type: ignore
question: Dict[str, torch.LongTensor],
table: Dict[str, torch.LongTensor],
world: List[WikiTablesLanguage],
actions: List[List[ProductionRuleArray]],
target_values: List[List[str]] = None,
target_action_sequences: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
In this method we encode the table entities, link them to words in the question, then
encode the question. Then we set up the initial state for the decoder, and pass that
state off to either a DecoderTrainer, if we're training, or a BeamSearch for inference,
if we're not.
Parameters
----------
question : Dict[str, torch.LongTensor]
The output of ``TextField.as_array()`` applied on the question ``TextField``. This will
be passed through a ``TextFieldEmbedder`` and then through an encoder.
table : ``Dict[str, torch.LongTensor]``
The output of ``KnowledgeGraphField.as_array()`` applied on the table
``KnowledgeGraphField``. This output is similar to a ``TextField`` output, where each
entity in the table is treated as a "token", and we will use a ``TextFieldEmbedder`` to
get embeddings for each entity.
world : ``List[WikiTablesLanguage]``
We use a ``MetadataField`` to get the ``WikiTablesLanguage`` object for each input instance.
Because of how ``MetadataField`` works, this gets passed to us as a ``List[WikiTablesLanguage]``,
actions : ``List[List[ProductionRuleArray]]``
A list of all possible actions for each ``world`` in the batch, indexed into a
``ProductionRuleArray`` using a ``ProductionRuleField``. We will embed all of these
and use the embeddings to determine which action to take at each timestep in the
decoder.
target_values : ``List[List[str]]``, optional (default = None)
For each instance, a list of target values taken from the example lisp string. We pass
this list to the evaluator along with logical forms to compute denotation accuracy.
target_action_sequences : torch.Tensor, optional (default = None)
A list of possibly valid action sequences, where each action is an index into the list
of possible actions. This tensor has shape ``(batch_size, num_action_sequences,
sequence_length)``.
metadata : ``List[Dict[str, Any]]``, optional (default = None)
Metadata containing the original tokenized question within a 'question_tokens' field.
"""
outputs: Dict[str, Any] = {}
rnn_state, grammar_state = self._get_initial_rnn_and_grammar_state(
question, table, world, actions, outputs
)
batch_size = len(rnn_state)
initial_score = rnn_state[0].hidden_state.new_zeros(batch_size)
initial_score_list = [initial_score[i] for i in range(batch_size)]
initial_state = GrammarBasedState(
batch_indices=list(range(batch_size)), # type: ignore
action_history=[[] for _ in range(batch_size)],
score=initial_score_list,
rnn_state=rnn_state,
grammar_state=grammar_state,
possible_actions=actions,
extras=target_values,
debug_info=None,
)
if target_action_sequences is not None:
# Remove the trailing dimension (from ListField[ListField[IndexField]]).
target_action_sequences = target_action_sequences.squeeze(-1)
target_mask = target_action_sequences != self._action_padding_index
else:
target_mask = None
if self.training:
return self._decoder_trainer.decode(
initial_state, self._decoder_step, (target_action_sequences, target_mask)
)
else:
if target_action_sequences is not None:
outputs["loss"] = self._decoder_trainer.decode(
initial_state, self._decoder_step, (target_action_sequences, target_mask)
)["loss"]
num_steps = self._max_decoding_steps
# This tells the state to start keeping track of debug info, which we'll pass along in
# our output dictionary.
initial_state.debug_info = [[] for _ in range(batch_size)]
best_final_states = self._beam_search.search(
num_steps, initial_state, self._decoder_step, keep_final_unfinished_states=False
)
for i in range(batch_size):
# Decoding may not have terminated with any completed logical forms, if `num_steps`
# isn't long enough (or if the model is not trained enough and gets into an
# infinite action loop).
if i in best_final_states:
best_action_indices = best_final_states[i][0].action_history[0]
if target_action_sequences is not None:
# Use a Tensor, not a Variable, to avoid a memory leak.
targets = target_action_sequences[i].data
sequence_in_targets = 0
sequence_in_targets = self._action_history_match(
best_action_indices, targets
)
self._action_sequence_accuracy(sequence_in_targets)
self._compute_validation_outputs(
actions, best_final_states, world, target_values, metadata, outputs
)
return outputs
default_predictor = "wikitables-parser"
| allennlp-semparse-master | allennlp_semparse/models/wikitables/wikitables_mml_semantic_parser.py |
import logging
import os
from functools import partial
from typing import Dict, List, Tuple, Set, Any
import torch
from allennlp.data import Vocabulary
from allennlp.models.archival import load_archive, Archive
from allennlp.models.model import Model
from allennlp.modules import (
Attention,
FeedForward,
Seq2SeqEncoder,
Seq2VecEncoder,
TextFieldEmbedder,
)
from allennlp.training.metrics import Average
from allennlp_semparse.domain_languages import WikiTablesLanguage
from allennlp_semparse.fields.production_rule_field import ProductionRule
from allennlp_semparse.models.wikitables.wikitables_semantic_parser import WikiTablesSemanticParser
from allennlp_semparse.state_machines import BeamSearch
from allennlp_semparse.state_machines.states import CoverageState, ChecklistStatelet
from allennlp_semparse.state_machines.trainers import ExpectedRiskMinimization
from allennlp_semparse.state_machines.transition_functions import LinkingCoverageTransitionFunction
logger = logging.getLogger(__name__)
@Model.register("wikitables_erm_parser")
class WikiTablesErmSemanticParser(WikiTablesSemanticParser):
"""
A ``WikiTablesErmSemanticParser`` is a :class:`WikiTablesSemanticParser` that learns to search
for logical forms that yield the correct denotations.
Parameters
----------
vocab : ``Vocabulary``
question_embedder : ``TextFieldEmbedder``
Embedder for questions. Passed to super class.
action_embedding_dim : ``int``
Dimension to use for action embeddings. Passed to super class.
encoder : ``Seq2SeqEncoder``
The encoder to use for the input question. Passed to super class.
entity_encoder : ``Seq2VecEncoder``
The encoder to used for averaging the words of an entity. Passed to super class.
attention : ``Attention``
We compute an attention over the input question at each step of the decoder, using the
decoder hidden state as the query. Passed to the transition function.
decoder_beam_size : ``int``
Beam size to be used by the ExpectedRiskMinimization algorithm.
decoder_num_finished_states : ``int``
Number of finished states for which costs will be computed by the ExpectedRiskMinimization
algorithm.
max_decoding_steps : ``int``
Maximum number of steps the decoder should take before giving up. Used both during training
and evaluation. Passed to super class.
add_action_bias : ``bool``, optional (default=True)
If ``True``, we will learn a bias weight for each action that gets used when predicting
that action, in addition to its embedding. Passed to super class.
normalize_beam_score_by_length : ``bool``, optional (default=False)
Should we normalize the log-probabilities by length before renormalizing the beam? This was
shown to work better for NML by Edunov et al., but that many not be the case for semantic
parsing.
checklist_cost_weight : ``float``, optional (default=0.6)
Mixture weight (0-1) for combining coverage cost and denotation cost. As this increases, we
weigh the coverage cost higher, with a value of 1.0 meaning that we do not care about
denotation accuracy.
use_neighbor_similarity_for_linking : ``bool``, optional (default=False)
If ``True``, we will compute a max similarity between a question token and the `neighbors`
of an entity as a component of the linking scores. This is meant to capture the same kind
of information as the ``related_column`` feature. Passed to super class.
dropout : ``float``, optional (default=0)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer). Passed to super class.
num_linking_features : ``int``, optional (default=10)
We need to construct a parameter vector for the linking features, so we need to know how
many there are. The default of 10 here matches the default in the ``KnowledgeGraphField``,
which is to use all ten defined features. If this is 0, another term will be added to the
linking score. This term contains the maximum similarity value from the entity's neighbors
and the question. Passed to super class.
rule_namespace : ``str``, optional (default=rule_labels)
The vocabulary namespace to use for production rules. The default corresponds to the
default used in the dataset reader, so you likely don't need to modify this. Passed to super
class.
mml_model_file : ``str``, optional (default=None)
If you want to initialize this model using weights from another model trained using MML,
pass the path to the ``model.tar.gz`` file of that model here.
"""
def __init__(
self,
vocab: Vocabulary,
question_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
entity_encoder: Seq2VecEncoder,
attention: Attention,
decoder_beam_size: int,
decoder_num_finished_states: int,
max_decoding_steps: int,
mixture_feedforward: FeedForward = None,
add_action_bias: bool = True,
normalize_beam_score_by_length: bool = False,
checklist_cost_weight: float = 0.6,
use_neighbor_similarity_for_linking: bool = False,
dropout: float = 0.0,
num_linking_features: int = 10,
rule_namespace: str = "rule_labels",
mml_model_file: str = None,
) -> None:
use_similarity = use_neighbor_similarity_for_linking
super().__init__(
vocab=vocab,
question_embedder=question_embedder,
action_embedding_dim=action_embedding_dim,
encoder=encoder,
entity_encoder=entity_encoder,
max_decoding_steps=max_decoding_steps,
add_action_bias=add_action_bias,
use_neighbor_similarity_for_linking=use_similarity,
dropout=dropout,
num_linking_features=num_linking_features,
rule_namespace=rule_namespace,
)
# Not sure why mypy needs a type annotation for this!
self._decoder_trainer: ExpectedRiskMinimization = ExpectedRiskMinimization(
beam_size=decoder_beam_size,
normalize_by_length=normalize_beam_score_by_length,
max_decoding_steps=self._max_decoding_steps,
max_num_finished_states=decoder_num_finished_states,
)
self._decoder_step = LinkingCoverageTransitionFunction(
encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
input_attention=attention,
add_action_bias=self._add_action_bias,
mixture_feedforward=mixture_feedforward,
dropout=dropout,
)
self._checklist_cost_weight = checklist_cost_weight
self._agenda_coverage = Average()
# We don't need a separate beam search since the trainer does that already. But we're defining one just to
# be able to use interactive beam search (a functionality that's only implemented in the ``BeamSearch``
# class) in the demo. We'll use this only at test time.
self._beam_search: BeamSearch = BeamSearch(beam_size=decoder_beam_size)
# TODO (pradeep): Checking whether file exists here to avoid raising an error when we've
# copied a trained ERM model from a different machine and the original MML model that was
# used to initialize it does not exist on the current machine. This may not be the best
# solution for the problem.
if mml_model_file is not None:
if os.path.isfile(mml_model_file):
archive = load_archive(mml_model_file)
self._initialize_weights_from_archive(archive)
else:
# A model file is passed, but it does not exist. This is expected to happen when
# you're using a trained ERM model to decode. But it may also happen if the path to
# the file is really just incorrect. So throwing a warning.
logger.warning(
"MML model file for initializing weights is passed, but does not exist."
" This is fine if you're just decoding."
)
def _initialize_weights_from_archive(self, archive: Archive) -> None:
logger.info("Initializing weights from MML model.")
model_parameters = dict(self.named_parameters())
archived_parameters = dict(archive.model.named_parameters())
question_embedder_weight = "_question_embedder.token_embedder_tokens.weight"
if (
question_embedder_weight not in archived_parameters
or question_embedder_weight not in model_parameters
):
raise RuntimeError(
"When initializing model weights from an MML model, we need "
"the question embedder to be a TokenEmbedder using namespace called "
"tokens."
)
for name, weights in archived_parameters.items():
if name in model_parameters:
if name == question_embedder_weight:
# The shapes of embedding weights will most likely differ between the two models
# because the vocabularies will most likely be different. We will get a mapping
# of indices from this model's token indices to the archived model's and copy
# the tensor accordingly.
vocab_index_mapping = self._get_vocab_index_mapping(archive.model.vocab)
archived_embedding_weights = weights.data
new_weights = model_parameters[name].data.clone()
for index, archived_index in vocab_index_mapping:
new_weights[index] = archived_embedding_weights[archived_index]
logger.info(
"Copied embeddings of %d out of %d tokens",
len(vocab_index_mapping),
new_weights.size()[0],
)
else:
new_weights = weights.data
logger.info("Copying parameter %s", name)
model_parameters[name].data.copy_(new_weights)
def _get_vocab_index_mapping(self, archived_vocab: Vocabulary) -> List[Tuple[int, int]]:
vocab_index_mapping: List[Tuple[int, int]] = []
for index in range(self.vocab.get_vocab_size(namespace="tokens")):
token = self.vocab.get_token_from_index(index=index, namespace="tokens")
archived_token_index = archived_vocab.get_token_index(token, namespace="tokens")
# Checking if we got the UNK token index, because we don't want all new token
# representations initialized to UNK token's representation. We do that by checking if
# the two tokens are the same. They will not be if the token at the archived index is
# UNK.
if (
archived_vocab.get_token_from_index(archived_token_index, namespace="tokens")
== token
):
vocab_index_mapping.append((index, archived_token_index))
return vocab_index_mapping
def forward(
self, # type: ignore
question: Dict[str, torch.LongTensor],
table: Dict[str, torch.LongTensor],
world: List[WikiTablesLanguage],
actions: List[List[ProductionRule]],
agenda: torch.LongTensor,
target_values: List[List[str]] = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
The output of ``TextField.as_array()`` applied on the question ``TextField``. This will
be passed through a ``TextFieldEmbedder`` and then through an encoder.
table : ``Dict[str, torch.LongTensor]``
The output of ``KnowledgeGraphField.as_array()`` applied on the table
``KnowledgeGraphField``. This output is similar to a ``TextField`` output, where each
entity in the table is treated as a "token", and we will use a ``TextFieldEmbedder`` to
get embeddings for each entity.
world : ``List[WikiTablesLanguage]``
We use a ``MetadataField`` to get the ``WikiTablesLanguage`` object for each input instance.
Because of how ``MetadataField`` works, this gets passed to us as a ``List[WikiTablesLanguage]``,
actions : ``List[List[ProductionRule]]``
A list of all possible actions for each ``world`` in the batch, indexed into a
``ProductionRule`` using a ``ProductionRuleField``. We will embed all of these
and use the embeddings to determine which action to take at each timestep in the
decoder.
agenda : ``torch.LongTensor``
Agenda vectors that the checklist vectors will be compared against to compute the checklist
cost.
target_values : ``List[List[str]]``, optional (default = None)
For each instance, a list of target values taken from the example lisp string. We pass
this list to the evaluator along with logical forms to compute denotation accuracy.
metadata : ``List[Dict[str, Any]]``, optional (default = None)
Metadata containing the original tokenized question within a 'question_tokens' field.
"""
# Each instance's agenda is of size (agenda_size, 1)
agenda_list = [a for a in agenda]
checklist_states = []
all_terminal_productions = [
set(instance_world.terminal_productions.values()) for instance_world in world
]
max_num_terminals = max([len(terminals) for terminals in all_terminal_productions])
for instance_actions, instance_agenda, terminal_productions in zip(
actions, agenda_list, all_terminal_productions
):
checklist_info = self._get_checklist_info(
instance_agenda, instance_actions, terminal_productions, max_num_terminals
)
checklist_target, terminal_actions, checklist_mask = checklist_info
initial_checklist = checklist_target.new_zeros(checklist_target.size())
checklist_states.append(
ChecklistStatelet(
terminal_actions=terminal_actions,
checklist_target=checklist_target,
checklist_mask=checklist_mask,
checklist=initial_checklist,
)
)
outputs: Dict[str, Any] = {}
rnn_state, grammar_state = self._get_initial_rnn_and_grammar_state(
question, table, world, actions, outputs
)
batch_size = len(rnn_state)
initial_score = rnn_state[0].hidden_state.new_zeros(batch_size)
initial_score_list = [initial_score[i] for i in range(batch_size)]
initial_state = CoverageState(
batch_indices=list(range(batch_size)), # type: ignore
action_history=[[] for _ in range(batch_size)],
score=initial_score_list,
rnn_state=rnn_state,
grammar_state=grammar_state,
checklist_state=checklist_states,
possible_actions=actions,
extras=target_values,
debug_info=None,
)
if target_values is not None:
logger.warning(f"TARGET VALUES: {target_values}")
trainer_outputs = self._decoder_trainer.decode( # type: ignore
initial_state, self._decoder_step, partial(self._get_state_cost, world)
)
outputs.update(trainer_outputs)
else:
initial_state.debug_info = [[] for _ in range(batch_size)]
batch_size = len(actions)
agenda_indices = [actions_[:, 0].cpu().data for actions_ in agenda]
action_mapping = {}
for batch_index, batch_actions in enumerate(actions):
for action_index, action in enumerate(batch_actions):
action_mapping[(batch_index, action_index)] = action[0]
best_final_states = self._beam_search.search(
self._max_decoding_steps,
initial_state,
self._decoder_step,
keep_final_unfinished_states=False,
)
for i in range(batch_size):
in_agenda_ratio = 0.0
# Decoding may not have terminated with any completed logical forms, if `num_steps`
# isn't long enough (or if the model is not trained enough and gets into an
# infinite action loop).
if i in best_final_states:
action_sequence = best_final_states[i][0].action_history[0]
action_strings = [
action_mapping[(i, action_index)] for action_index in action_sequence
]
instance_possible_actions = actions[i]
agenda_actions = []
for rule_id in agenda_indices[i]:
rule_id = int(rule_id)
if rule_id == -1:
continue
action_string = instance_possible_actions[rule_id][0]
agenda_actions.append(action_string)
actions_in_agenda = [action in action_strings for action in agenda_actions]
if actions_in_agenda:
# Note: This means that when there are no actions on agenda, agenda coverage
# will be 0, not 1.
in_agenda_ratio = sum(actions_in_agenda) / len(actions_in_agenda)
self._agenda_coverage(in_agenda_ratio)
self._compute_validation_outputs(
actions, best_final_states, world, target_values, metadata, outputs
)
return outputs
@staticmethod
def _get_checklist_info(
agenda: torch.LongTensor,
all_actions: List[ProductionRule],
terminal_productions: Set[str],
max_num_terminals: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Takes an agenda, a list of all actions, a set of terminal productions in the corresponding
world, and a length to pad the checklist vectors to, and returns a target checklist against
which the checklist at each state will be compared to compute a loss, indices of
``terminal_actions``, and a ``checklist_mask`` that indicates which of the terminal actions
are relevant for checklist loss computation.
Parameters
----------
``agenda`` : ``torch.LongTensor``
Agenda of one instance of size ``(agenda_size, 1)``.
``all_actions`` : ``List[ProductionRule]``
All actions for one instance.
``terminal_productions`` : ``Set[str]``
String representations of terminal productions in the corresponding world.
``max_num_terminals`` : ``int``
Length to which the checklist vectors will be padded till. This is the max number of
terminal productions in all the worlds in the batch.
"""
terminal_indices = []
target_checklist_list = []
agenda_indices_set = {int(x) for x in agenda.squeeze(0).detach().cpu().numpy()}
# We want to return checklist target and terminal actions that are column vectors to make
# computing softmax over the difference between checklist and target easier.
for index, action in enumerate(all_actions):
# Each action is a ProductionRule, a tuple where the first item is the production
# rule string.
if action[0] in terminal_productions:
terminal_indices.append([index])
if index in agenda_indices_set:
target_checklist_list.append([1])
else:
target_checklist_list.append([0])
while len(target_checklist_list) < max_num_terminals:
target_checklist_list.append([0])
terminal_indices.append([-1])
# (max_num_terminals, 1)
terminal_actions = agenda.new_tensor(terminal_indices)
# (max_num_terminals, 1)
target_checklist = agenda.new_tensor(target_checklist_list, dtype=torch.float)
checklist_mask = (target_checklist != 0).float()
return target_checklist, terminal_actions, checklist_mask
def _get_state_cost(
self, worlds: List[WikiTablesLanguage], state: CoverageState
) -> torch.Tensor:
if not state.is_finished():
raise RuntimeError("_get_state_cost() is not defined for unfinished states!")
world = worlds[state.batch_indices[0]]
# Our checklist cost is a sum of squared error from where we want to be, making sure we
# take into account the mask. We clamp the lower limit of the balance at 0 to avoid
# penalizing agenda actions produced multiple times.
checklist_balance = torch.clamp(state.checklist_state[0].get_balance(), min=0.0)
checklist_cost = torch.sum((checklist_balance) ** 2)
# This is the number of items on the agenda that we want to see in the decoded sequence.
# We use this as the denotation cost if the path is incorrect.
denotation_cost = torch.sum(state.checklist_state[0].checklist_target.float())
checklist_cost = self._checklist_cost_weight * checklist_cost
action_history = state.action_history[0]
batch_index = state.batch_indices[0]
action_strings = [state.possible_actions[batch_index][i][0] for i in action_history]
target_values = state.extras[batch_index]
evaluation = False
executor_logger = logging.getLogger(
"allennlp_semparse.domain_languages.wikitables_language"
)
executor_logger.setLevel(logging.ERROR)
evaluation = world.evaluate_action_sequence(action_strings, target_values)
if evaluation:
cost = checklist_cost
else:
cost = checklist_cost + (1 - self._checklist_cost_weight) * denotation_cost
return cost
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
The base class returns a dict with dpd accuracy, denotation accuracy, and logical form
percentage metrics. We add the agenda coverage metric here.
"""
metrics = super().get_metrics(reset)
metrics["agenda_coverage"] = self._agenda_coverage.get_metric(reset)
return metrics
| allennlp-semparse-master | allennlp_semparse/models/wikitables/wikitables_erm_semantic_parser.py |
import logging
import os
from functools import partial
from typing import Any, Callable, Dict, List, Tuple, Union
import torch
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.archival import Archive, load_archive
from allennlp.models.model import Model
from allennlp.modules import Attention, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import Activation
from allennlp.training.metrics import Average
from allennlp_semparse.domain_languages import NlvrLanguage
from allennlp_semparse.fields.production_rule_field import ProductionRule
from allennlp_semparse.models.nlvr.nlvr_semantic_parser import NlvrSemanticParser
from allennlp_semparse.state_machines.states import ChecklistStatelet, CoverageState
from allennlp_semparse.state_machines.trainers import DecoderTrainer, ExpectedRiskMinimization
from allennlp_semparse.state_machines.transition_functions import CoverageTransitionFunction
logger = logging.getLogger(__name__)
@Model.register("nlvr_coverage_parser")
class NlvrCoverageSemanticParser(NlvrSemanticParser):
"""
``NlvrSemanticCoverageParser`` is an ``NlvrSemanticParser`` that gets around the problem of lack
of annotated logical forms by maximizing coverage of the output sequences over a prespecified
agenda. In addition to the signal from coverage, we also compute the denotations given by the
logical forms and define a hybrid cost based on coverage and denotation errors. The training
process then minimizes the expected value of this cost over an approximate set of logical forms
produced by the parser, obtained by performing beam search.
Parameters
----------
vocab : ``Vocabulary``
Passed to super-class.
sentence_embedder : ``TextFieldEmbedder``
Passed to super-class.
action_embedding_dim : ``int``
Passed to super-class.
encoder : ``Seq2SeqEncoder``
Passed to super-class.
attention : ``Attention``
We compute an attention over the input question at each step of the decoder, using the
decoder hidden state as the query. Passed to the TransitionFunction.
beam_size : ``int``
Beam size for the beam search used during training.
max_num_finished_states : ``int``, optional (default=None)
Maximum number of finished states the trainer should compute costs for.
normalize_beam_score_by_length : ``bool``, optional (default=False)
Should the log probabilities be normalized by length before renormalizing them? Edunov et
al. do this in their work, but we found that not doing it works better. It's possible they
did this because their task is NMT, and longer decoded sequences are not necessarily worse,
and shouldn't be penalized, while we will mostly want to penalize longer logical forms.
max_decoding_steps : ``int``
Maximum number of steps for the beam search during training.
dropout : ``float``, optional (default=0.0)
Probability of dropout to apply on encoder outputs, decoder outputs and predicted actions.
checklist_cost_weight : ``float``, optional (default=0.6)
Mixture weight (0-1) for combining coverage cost and denotation cost. As this increases, we
weigh the coverage cost higher, with a value of 1.0 meaning that we do not care about
denotation accuracy.
dynamic_cost_weight : ``Dict[str, Union[int, float]]``, optional (default=None)
A dict containing keys ``wait_num_epochs`` and ``rate`` indicating the number of steps
after which we should start decreasing the weight on checklist cost in favor of denotation
cost, and the rate at which we should do it. We will decrease the weight in the following
way - ``checklist_cost_weight = checklist_cost_weight - rate * checklist_cost_weight``
starting at the appropriate epoch. The weight will remain constant if this is not provided.
penalize_non_agenda_actions : ``bool``, optional (default=False)
Should we penalize the model for producing terminal actions that are outside the agenda?
initial_mml_model_file : ``str`` , optional (default=None)
If you want to initialize this model using weights from another model trained using MML,
pass the path to the ``model.tar.gz`` file of that model here.
"""
def __init__(
self,
vocab: Vocabulary,
sentence_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
attention: Attention,
beam_size: int,
max_decoding_steps: int,
max_num_finished_states: int = None,
dropout: float = 0.0,
normalize_beam_score_by_length: bool = False,
checklist_cost_weight: float = 0.6,
dynamic_cost_weight: Dict[str, Union[int, float]] = None,
penalize_non_agenda_actions: bool = False,
initial_mml_model_file: str = None,
) -> None:
super(NlvrCoverageSemanticParser, self).__init__(
vocab=vocab,
sentence_embedder=sentence_embedder,
action_embedding_dim=action_embedding_dim,
encoder=encoder,
dropout=dropout,
)
self._agenda_coverage = Average()
self._decoder_trainer: DecoderTrainer[
Callable[[CoverageState], torch.Tensor]
] = ExpectedRiskMinimization(
beam_size=beam_size,
normalize_by_length=normalize_beam_score_by_length,
max_decoding_steps=max_decoding_steps,
max_num_finished_states=max_num_finished_states,
)
# Instantiating an empty NlvrLanguage just to get the number of terminals.
self._terminal_productions = set(NlvrLanguage(set()).terminal_productions.values())
self._decoder_step = CoverageTransitionFunction(
encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
input_attention=attention,
activation=Activation.by_name("tanh")(),
add_action_bias=False,
dropout=dropout,
)
self._checklist_cost_weight = checklist_cost_weight
self._dynamic_cost_wait_epochs = None
self._dynamic_cost_rate = None
if dynamic_cost_weight:
self._dynamic_cost_wait_epochs = dynamic_cost_weight["wait_num_epochs"]
self._dynamic_cost_rate = dynamic_cost_weight["rate"]
self._penalize_non_agenda_actions = penalize_non_agenda_actions
self._last_epoch_in_forward: int = None
# TODO (pradeep): Checking whether file exists here to avoid raising an error when we've
# copied a trained ERM model from a different machine and the original MML model that was
# used to initialize it does not exist on the current machine. This may not be the best
# solution for the problem.
if initial_mml_model_file is not None:
if os.path.isfile(initial_mml_model_file):
archive = load_archive(initial_mml_model_file)
self._initialize_weights_from_archive(archive)
else:
# A model file is passed, but it does not exist. This is expected to happen when
# you're using a trained ERM model to decode. But it may also happen if the path to
# the file is really just incorrect. So throwing a warning.
logger.warning(
"MML model file for initializing weights is passed, but does not exist."
" This is fine if you're just decoding."
)
def _initialize_weights_from_archive(self, archive: Archive) -> None:
logger.info("Initializing weights from MML model.")
model_parameters = dict(self.named_parameters())
archived_parameters = dict(archive.model.named_parameters())
sentence_embedder_weight = "_sentence_embedder.token_embedder_tokens.weight"
if (
sentence_embedder_weight not in archived_parameters
or sentence_embedder_weight not in model_parameters
):
raise RuntimeError(
"When initializing model weights from an MML model, we need "
"the sentence embedder to be a TokenEmbedder using namespace called "
"tokens."
)
for name, weights in archived_parameters.items():
if name in model_parameters:
if name == "_sentence_embedder.token_embedder_tokens.weight":
# The shapes of embedding weights will most likely differ between the two models
# because the vocabularies will most likely be different. We will get a mapping
# of indices from this model's token indices to the archived model's and copy
# the tensor accordingly.
vocab_index_mapping = self._get_vocab_index_mapping(archive.model.vocab)
archived_embedding_weights = weights.data
new_weights = model_parameters[name].data.clone()
for index, archived_index in vocab_index_mapping:
new_weights[index] = archived_embedding_weights[archived_index]
logger.info(
"Copied embeddings of %d out of %d tokens",
len(vocab_index_mapping),
new_weights.size()[0],
)
else:
new_weights = weights.data
logger.info("Copying parameter %s", name)
model_parameters[name].data.copy_(new_weights)
def _get_vocab_index_mapping(self, archived_vocab: Vocabulary) -> List[Tuple[int, int]]:
vocab_index_mapping: List[Tuple[int, int]] = []
for index in range(self.vocab.get_vocab_size(namespace="tokens")):
token = self.vocab.get_token_from_index(index=index, namespace="tokens")
archived_token_index = archived_vocab.get_token_index(token, namespace="tokens")
# Checking if we got the UNK token index, because we don't want all new token
# representations initialized to UNK token's representation. We do that by checking if
# the two tokens are the same. They will not be if the token at the archived index is
# UNK.
if (
archived_vocab.get_token_from_index(archived_token_index, namespace="tokens")
== token
):
vocab_index_mapping.append((index, archived_token_index))
return vocab_index_mapping
def forward( # type: ignore
self,
sentence: Dict[str, torch.LongTensor],
worlds: List[List[NlvrLanguage]],
actions: List[List[ProductionRule]],
agenda: torch.LongTensor,
identifier: List[str] = None,
labels: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Decoder logic for producing type constrained target sequences that maximize coverage of
their respective agendas, and minimize a denotation based loss.
"""
if self._dynamic_cost_rate is not None:
# This could be added back pretty easily with an EpochCallback passed to the Trainer (it
# just has to set the epoch number on the model, which could then be queried in here).
logger.warning(
"Dynamic cost rate functionality was removed in AllenNLP 1.0. If you want this, "
"use version 0.9. We will just use the static checklist cost weight."
)
batch_size = len(worlds)
initial_rnn_state = self._get_initial_rnn_state(sentence)
initial_score_list = [agenda.new_zeros(1, dtype=torch.float) for i in range(batch_size)]
# TODO (pradeep): Assuming all worlds give the same set of valid actions.
initial_grammar_state = [
self._create_grammar_state(worlds[i][0], actions[i]) for i in range(batch_size)
]
label_strings = self._get_label_strings(labels) if labels is not None else None
# Each instance's agenda is of size (agenda_size, 1)
# TODO(mattg): It looks like the agenda is only ever used on the CPU. In that case, it's a
# waste to copy it to the GPU and then back, and this should probably be a MetadataField.
agenda_list = [agenda[i] for i in range(batch_size)]
initial_checklist_states = []
for instance_actions, instance_agenda in zip(actions, agenda_list):
checklist_info = self._get_checklist_info(instance_agenda, instance_actions)
checklist_target, terminal_actions, checklist_mask = checklist_info
initial_checklist = checklist_target.new_zeros(checklist_target.size())
initial_checklist_states.append(
ChecklistStatelet(
terminal_actions=terminal_actions,
checklist_target=checklist_target,
checklist_mask=checklist_mask,
checklist=initial_checklist,
)
)
initial_state = CoverageState(
batch_indices=list(range(batch_size)),
action_history=[[] for _ in range(batch_size)],
score=initial_score_list,
rnn_state=initial_rnn_state,
grammar_state=initial_grammar_state,
possible_actions=actions,
extras=label_strings,
checklist_state=initial_checklist_states,
)
if not self.training:
initial_state.debug_info = [[] for _ in range(batch_size)]
agenda_data = [agenda_[:, 0].cpu().data for agenda_ in agenda_list]
outputs = self._decoder_trainer.decode( # type: ignore
initial_state, self._decoder_step, partial(self._get_state_cost, worlds)
)
if identifier is not None:
outputs["identifier"] = identifier
best_final_states = outputs["best_final_states"]
best_action_sequences = {}
for batch_index, states in best_final_states.items():
best_action_sequences[batch_index] = [state.action_history[0] for state in states]
batch_action_strings = self._get_action_strings(actions, best_action_sequences)
batch_denotations = self._get_denotations(batch_action_strings, worlds)
if labels is not None:
# We're either training or validating.
self._update_metrics(
action_strings=batch_action_strings,
worlds=worlds,
label_strings=label_strings,
possible_actions=actions,
agenda_data=agenda_data,
)
else:
# We're testing.
if metadata is not None:
outputs["sentence_tokens"] = [x["sentence_tokens"] for x in metadata]
outputs["debug_info"] = []
for i in range(batch_size):
outputs["debug_info"].append(best_final_states[i][0].debug_info[0]) # type: ignore
outputs["best_action_strings"] = batch_action_strings
outputs["denotations"] = batch_denotations
action_mapping = {}
for batch_index, batch_actions in enumerate(actions):
for action_index, action in enumerate(batch_actions):
action_mapping[(batch_index, action_index)] = action[0]
outputs["action_mapping"] = action_mapping
return outputs
def _get_checklist_info(
self, agenda: torch.LongTensor, all_actions: List[ProductionRule]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Takes an agenda and a list of all actions and returns a target checklist against which the
checklist at each state will be compared to compute a loss, indices of ``terminal_actions``,
and a ``checklist_mask`` that indicates which of the terminal actions are relevant for
checklist loss computation. If ``self.penalize_non_agenda_actions`` is set to``True``,
``checklist_mask`` will be all 1s (i.e., all terminal actions are relevant). If it is set to
``False``, indices of all terminals that are not in the agenda will be masked.
Parameters
----------
``agenda`` : ``torch.LongTensor``
Agenda of one instance of size ``(agenda_size, 1)``.
``all_actions`` : ``List[ProductionRule]``
All actions for one instance.
"""
terminal_indices = []
target_checklist_list = []
agenda_indices_set = {int(x) for x in agenda.squeeze(0).detach().cpu().numpy()}
for index, action in enumerate(all_actions):
# Each action is a ProductionRule, a tuple where the first item is the production
# rule string.
if action[0] in self._terminal_productions:
terminal_indices.append([index])
if index in agenda_indices_set:
target_checklist_list.append([1])
else:
target_checklist_list.append([0])
# We want to return checklist target and terminal actions that are column vectors to make
# computing softmax over the difference between checklist and target easier.
# (num_terminals, 1)
terminal_actions = agenda.new_tensor(terminal_indices)
# (num_terminals, 1)
target_checklist = agenda.new_tensor(target_checklist_list, dtype=torch.float)
if self._penalize_non_agenda_actions:
# All terminal actions are relevant
checklist_mask = torch.ones_like(target_checklist)
else:
checklist_mask = (target_checklist != 0).float()
return target_checklist, terminal_actions, checklist_mask
def _update_metrics(
self,
action_strings: List[List[List[str]]],
worlds: List[List[NlvrLanguage]],
label_strings: List[List[str]],
possible_actions: List[List[ProductionRule]],
agenda_data: List[List[int]],
) -> None:
# TODO(pradeep): Move this to the base class.
# TODO(pradeep): action_strings contains k-best lists. This method only uses the top decoded
# sequence currently. Maybe define top-k metrics?
batch_size = len(worlds)
for i in range(batch_size):
# Using only the top decoded sequence per instance.
instance_action_strings = action_strings[i][0] if action_strings[i] else []
sequence_is_correct = [False]
in_agenda_ratio = 0.0
instance_possible_actions = possible_actions[i]
if instance_action_strings:
terminal_agenda_actions = []
for rule_id in agenda_data[i]:
if rule_id == -1:
continue
action_string = instance_possible_actions[rule_id][0]
right_side = action_string.split(" -> ")[1]
if right_side.isdigit() or ("[" not in right_side and len(right_side) > 1):
terminal_agenda_actions.append(action_string)
actions_in_agenda = [
action in instance_action_strings for action in terminal_agenda_actions
]
in_agenda_ratio = sum(actions_in_agenda) / len(actions_in_agenda)
instance_label_strings = label_strings[i]
instance_worlds = worlds[i]
sequence_is_correct = self._check_denotation(
instance_action_strings, instance_label_strings, instance_worlds
)
for correct_in_world in sequence_is_correct:
self._denotation_accuracy(1 if correct_in_world else 0)
self._consistency(1 if all(sequence_is_correct) else 0)
self._agenda_coverage(in_agenda_ratio)
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
"denotation_accuracy": self._denotation_accuracy.get_metric(reset),
"consistency": self._consistency.get_metric(reset),
"agenda_coverage": self._agenda_coverage.get_metric(reset),
}
def _get_state_cost(
self, batch_worlds: List[List[NlvrLanguage]], state: CoverageState
) -> torch.Tensor:
"""
Return the cost of a finished state. Since it is a finished state, the group size will be
1, and hence we'll return just one cost.
The ``batch_worlds`` parameter here is because we need the world to check the denotation
accuracy of the action sequence in the finished state. Instead of adding a field to the
``State`` object just for this method, we take the ``World`` as a parameter here.
"""
if not state.is_finished():
raise RuntimeError("_get_state_cost() is not defined for unfinished states!")
instance_worlds = batch_worlds[state.batch_indices[0]]
# Our checklist cost is a sum of squared error from where we want to be, making sure we
# take into account the mask.
checklist_balance = state.checklist_state[0].get_balance()
checklist_cost = torch.sum((checklist_balance) ** 2)
# This is the number of items on the agenda that we want to see in the decoded sequence.
# We use this as the denotation cost if the path is incorrect.
# Note: If we are penalizing the model for producing non-agenda actions, this is not the
# upper limit on the checklist cost. That would be the number of terminal actions.
denotation_cost = torch.sum(state.checklist_state[0].checklist_target.float())
checklist_cost = self._checklist_cost_weight * checklist_cost
# TODO (pradeep): The denotation based cost below is strict. May be define a cost based on
# how many worlds the logical form is correct in?
# extras being None happens when we are testing. We do not care about the cost
# then. TODO (pradeep): Make this cleaner.
if state.extras is None or all(self._check_state_denotations(state, instance_worlds)):
cost = checklist_cost
else:
cost = checklist_cost + (1 - self._checklist_cost_weight) * denotation_cost
return cost
def _get_state_info(
self, state: CoverageState, batch_worlds: List[List[NlvrLanguage]]
) -> Dict[str, List]:
"""
This method is here for debugging purposes, in case you want to look at the what the model
is learning. It may be inefficient to call it while training the model on real data.
"""
if len(state.batch_indices) == 1 and state.is_finished():
costs = [float(self._get_state_cost(batch_worlds, state).detach().cpu().numpy())]
else:
costs = []
model_scores = [float(score.detach().cpu().numpy()) for score in state.score]
all_actions = state.possible_actions[0]
action_sequences = [
[self._get_action_string(all_actions[action]) for action in history]
for history in state.action_history
]
agenda_sequences = []
all_agenda_indices = []
for checklist_state in state.checklist_state:
agenda_indices = []
for action, is_wanted in zip(
checklist_state.terminal_actions, checklist_state.checklist_target
):
action_int = int(action.detach().cpu().numpy())
is_wanted_int = int(is_wanted.detach().cpu().numpy())
if is_wanted_int != 0:
agenda_indices.append(action_int)
agenda_sequences.append(
[self._get_action_string(all_actions[action]) for action in agenda_indices]
)
all_agenda_indices.append(agenda_indices)
return {
"agenda": agenda_sequences,
"agenda_indices": all_agenda_indices,
"history": action_sequences,
"history_indices": state.action_history,
"costs": costs,
"scores": model_scores,
}
| allennlp-semparse-master | allennlp_semparse/models/nlvr/nlvr_coverage_semantic_parser.py |
allennlp-semparse-master | allennlp_semparse/models/nlvr/__init__.py |
|
import logging
from typing import Any, Dict, List
import torch
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Attention, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import Activation, util
from allennlp_semparse.domain_languages import NlvrLanguage
from allennlp_semparse.fields.production_rule_field import ProductionRule
from allennlp_semparse.models.nlvr.nlvr_semantic_parser import NlvrSemanticParser
from allennlp_semparse.state_machines import BeamSearch
from allennlp_semparse.state_machines.states import GrammarBasedState
from allennlp_semparse.state_machines.trainers import MaximumMarginalLikelihood
from allennlp_semparse.state_machines.transition_functions import BasicTransitionFunction
logger = logging.getLogger(__name__)
@Model.register("nlvr_direct_parser")
class NlvrDirectSemanticParser(NlvrSemanticParser):
"""
``NlvrDirectSemanticParser`` is an ``NlvrSemanticParser`` that gets around the problem of lack
of logical form annotations by maximizing the marginal likelihood of an approximate set of target
sequences that yield the correct denotation. The main difference between this parser and
``NlvrCoverageSemanticParser`` is that while this parser takes the output of an offline search
process as the set of target sequences for training, the latter performs search during training.
Parameters
----------
vocab : ``Vocabulary``
Passed to super-class.
sentence_embedder : ``TextFieldEmbedder``
Passed to super-class.
action_embedding_dim : ``int``
Passed to super-class.
encoder : ``Seq2SeqEncoder``
Passed to super-class.
attention : ``Attention``
We compute an attention over the input question at each step of the decoder, using the
decoder hidden state as the query. Passed to the TransitionFunction.
decoder_beam_search : ``BeamSearch``
Beam search used to retrieve best sequences after training.
max_decoding_steps : ``int``
Maximum number of steps for beam search after training.
dropout : ``float``, optional (default=0.0)
Probability of dropout to apply on encoder outputs, decoder outputs and predicted actions.
"""
def __init__(
self,
vocab: Vocabulary,
sentence_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
attention: Attention,
decoder_beam_search: BeamSearch,
max_decoding_steps: int,
dropout: float = 0.0,
) -> None:
super(NlvrDirectSemanticParser, self).__init__(
vocab=vocab,
sentence_embedder=sentence_embedder,
action_embedding_dim=action_embedding_dim,
encoder=encoder,
dropout=dropout,
)
self._decoder_trainer = MaximumMarginalLikelihood()
self._decoder_step = BasicTransitionFunction(
encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
input_attention=attention,
activation=Activation.by_name("tanh")(),
add_action_bias=False,
dropout=dropout,
)
self._decoder_beam_search = decoder_beam_search
self._max_decoding_steps = max_decoding_steps
self._action_padding_index = -1
def forward( # type: ignore
self,
sentence: Dict[str, torch.LongTensor],
worlds: List[List[NlvrLanguage]],
actions: List[List[ProductionRule]],
identifier: List[str] = None,
target_action_sequences: torch.LongTensor = None,
labels: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Decoder logic for producing type constrained target sequences, trained to maximize marginal
likelihod over a set of approximate logical forms.
"""
batch_size = len(worlds)
initial_rnn_state = self._get_initial_rnn_state(sentence)
token_ids = util.get_token_ids_from_text_field_tensors(sentence)
initial_score_list = [token_ids.new_zeros(1, dtype=torch.float) for i in range(batch_size)]
label_strings = self._get_label_strings(labels) if labels is not None else None
# TODO (pradeep): Assuming all worlds give the same set of valid actions.
initial_grammar_state = [
self._create_grammar_state(worlds[i][0], actions[i]) for i in range(batch_size)
]
initial_state = GrammarBasedState(
batch_indices=list(range(batch_size)),
action_history=[[] for _ in range(batch_size)],
score=initial_score_list,
rnn_state=initial_rnn_state,
grammar_state=initial_grammar_state,
possible_actions=actions,
extras=label_strings,
)
if target_action_sequences is not None:
# Remove the trailing dimension (from ListField[ListField[IndexField]]).
target_action_sequences = target_action_sequences.squeeze(-1)
target_mask = target_action_sequences != self._action_padding_index
else:
target_mask = None
outputs: Dict[str, torch.Tensor] = {}
if identifier is not None:
outputs["identifier"] = identifier
if target_action_sequences is not None:
outputs = self._decoder_trainer.decode(
initial_state, self._decoder_step, (target_action_sequences, target_mask)
)
if not self.training:
initial_state.debug_info = [[] for _ in range(batch_size)]
best_final_states = self._decoder_beam_search.search(
self._max_decoding_steps,
initial_state,
self._decoder_step,
keep_final_unfinished_states=False,
)
best_action_sequences: Dict[int, List[List[int]]] = {}
for i in range(batch_size):
# Decoding may not have terminated with any completed logical forms, if `num_steps`
# isn't long enough (or if the model is not trained enough and gets into an
# infinite action loop).
if i in best_final_states:
best_action_indices = [best_final_states[i][0].action_history[0]]
best_action_sequences[i] = best_action_indices
batch_action_strings = self._get_action_strings(actions, best_action_sequences)
batch_denotations = self._get_denotations(batch_action_strings, worlds)
if target_action_sequences is not None:
self._update_metrics(
action_strings=batch_action_strings, worlds=worlds, label_strings=label_strings
)
else:
if metadata is not None:
outputs["sentence_tokens"] = [x["sentence_tokens"] for x in metadata]
outputs["debug_info"] = []
for i in range(batch_size):
outputs["debug_info"].append(
best_final_states[i][0].debug_info[0]
) # type: ignore
outputs["best_action_strings"] = batch_action_strings
outputs["denotations"] = batch_denotations
action_mapping = {}
for batch_index, batch_actions in enumerate(actions):
for action_index, action in enumerate(batch_actions):
action_mapping[(batch_index, action_index)] = action[0]
outputs["action_mapping"] = action_mapping
return outputs
def _update_metrics(
self,
action_strings: List[List[List[str]]],
worlds: List[List[NlvrLanguage]],
label_strings: List[List[str]],
) -> None:
# TODO(pradeep): Move this to the base class.
# TODO(pradeep): Using only the best decoded sequence. Define metrics for top-k sequences?
batch_size = len(worlds)
for i in range(batch_size):
instance_action_strings = action_strings[i]
sequence_is_correct = [False]
if instance_action_strings:
instance_label_strings = label_strings[i]
instance_worlds = worlds[i]
# Taking only the best sequence.
sequence_is_correct = self._check_denotation(
instance_action_strings[0], instance_label_strings, instance_worlds
)
for correct_in_world in sequence_is_correct:
self._denotation_accuracy(1 if correct_in_world else 0)
self._consistency(1 if all(sequence_is_correct) else 0)
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
"denotation_accuracy": self._denotation_accuracy.get_metric(reset),
"consistency": self._consistency.get_metric(reset),
}
| allennlp-semparse-master | allennlp_semparse/models/nlvr/nlvr_direct_semantic_parser.py |
import logging
from typing import Dict, List, Tuple
import torch
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, Embedding
from allennlp.nn import util
from allennlp.training.metrics import Average
from allennlp_semparse.domain_languages import NlvrLanguage, START_SYMBOL
from allennlp_semparse.fields.production_rule_field import ProductionRule
from allennlp_semparse.state_machines.states import GrammarBasedState, GrammarStatelet, RnnStatelet
logger = logging.getLogger(__name__)
class NlvrSemanticParser(Model):
"""
``NlvrSemanticParser`` is a semantic parsing model built for the NLVR domain. This is an
abstract class and does not have a ``forward`` method implemented. Classes that inherit from
this class are expected to define their own logic depending on the kind of supervision they
use. Accordingly, they should use the appropriate ``DecoderTrainer``. This class provides some
common functionality for things like defining an initial ``RnnStatelet``, embedding actions,
evaluating the denotations of completed logical forms, etc. There is a lot of overlap with
``WikiTablesSemanticParser`` here. We may want to eventually move the common functionality into
a more general transition-based parsing class.
Parameters
----------
vocab : ``Vocabulary``
sentence_embedder : ``TextFieldEmbedder``
Embedder for sentences.
action_embedding_dim : ``int``
Dimension to use for action embeddings.
encoder : ``Seq2SeqEncoder``
The encoder to use for the input question.
dropout : ``float``, optional (default=0.0)
Dropout on the encoder outputs.
rule_namespace : ``str``, optional (default=rule_labels)
The vocabulary namespace to use for production rules. The default corresponds to the
default used in the dataset reader, so you likely don't need to modify this.
"""
def __init__(
self,
vocab: Vocabulary,
sentence_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
dropout: float = 0.0,
rule_namespace: str = "rule_labels",
) -> None:
super(NlvrSemanticParser, self).__init__(vocab=vocab)
self._sentence_embedder = sentence_embedder
self._denotation_accuracy = Average()
self._consistency = Average()
self._encoder = encoder
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._rule_namespace = rule_namespace
self._action_embedder = Embedding(
num_embeddings=vocab.get_vocab_size(self._rule_namespace),
embedding_dim=action_embedding_dim,
)
# This is what we pass as input in the first step of decoding, when we don't have a
# previous action.
self._first_action_embedding = torch.nn.Parameter(torch.FloatTensor(action_embedding_dim))
torch.nn.init.normal_(self._first_action_embedding)
def forward(self): # type: ignore
# Sub-classes should define their own logic here.
raise NotImplementedError
def _get_initial_rnn_state(self, sentence: Dict[str, torch.LongTensor]):
embedded_input = self._sentence_embedder(sentence)
# (batch_size, sentence_length)
sentence_mask = util.get_text_field_mask(sentence)
batch_size = embedded_input.size(0)
# (batch_size, sentence_length, encoder_output_dim)
encoder_outputs = self._dropout(self._encoder(embedded_input, sentence_mask))
final_encoder_output = util.get_final_encoder_states(
encoder_outputs, sentence_mask, self._encoder.is_bidirectional()
)
memory_cell = encoder_outputs.new_zeros(batch_size, self._encoder.get_output_dim())
attended_sentence, _ = self._decoder_step.attend_on_question(
final_encoder_output, encoder_outputs, sentence_mask
)
encoder_outputs_list = [encoder_outputs[i] for i in range(batch_size)]
sentence_mask_list = [sentence_mask[i] for i in range(batch_size)]
initial_rnn_state = []
for i in range(batch_size):
initial_rnn_state.append(
RnnStatelet(
final_encoder_output[i],
memory_cell[i],
self._first_action_embedding,
attended_sentence[i],
encoder_outputs_list,
sentence_mask_list,
)
)
return initial_rnn_state
def _get_label_strings(self, labels):
# TODO (pradeep): Use an unindexed field for labels?
labels_data = labels.detach().cpu()
label_strings: List[List[str]] = []
for instance_labels_data in labels_data:
label_strings.append([])
for label in instance_labels_data:
label_int = int(label)
if label_int == -1:
# Padding, because not all instances have the same number of labels.
continue
label_strings[-1].append(self.vocab.get_token_from_index(label_int, "denotations"))
return label_strings
@classmethod
def _get_action_strings(
cls,
possible_actions: List[List[ProductionRule]],
action_indices: Dict[int, List[List[int]]],
) -> List[List[List[str]]]:
"""
Takes a list of possible actions and indices of decoded actions into those possible actions
for a batch and returns sequences of action strings. We assume ``action_indices`` is a dict
mapping batch indices to k-best decoded sequence lists.
"""
all_action_strings: List[List[List[str]]] = []
batch_size = len(possible_actions)
for i in range(batch_size):
batch_actions = possible_actions[i]
batch_best_sequences = action_indices[i] if i in action_indices else []
# This will append an empty list to ``all_action_strings`` if ``batch_best_sequences``
# is empty.
action_strings = [
[batch_actions[rule_id][0] for rule_id in sequence]
for sequence in batch_best_sequences
]
all_action_strings.append(action_strings)
return all_action_strings
@staticmethod
def _get_denotations(
action_strings: List[List[List[str]]], worlds: List[List[NlvrLanguage]]
) -> List[List[List[str]]]:
all_denotations: List[List[List[str]]] = []
for instance_worlds, instance_action_sequences in zip(worlds, action_strings):
denotations: List[List[str]] = []
for instance_action_strings in instance_action_sequences:
if not instance_action_strings:
continue
logical_form = instance_worlds[0].action_sequence_to_logical_form(
instance_action_strings
)
instance_denotations: List[str] = []
for world in instance_worlds:
# Some of the worlds can be None for instances that come with less than 4 worlds
# because of padding.
if world is not None:
instance_denotations.append(str(world.execute(logical_form)))
denotations.append(instance_denotations)
all_denotations.append(denotations)
return all_denotations
@staticmethod
def _check_denotation(
action_sequence: List[str], labels: List[str], worlds: List[NlvrLanguage]
) -> List[bool]:
is_correct = []
for world, label in zip(worlds, labels):
logical_form = world.action_sequence_to_logical_form(action_sequence)
denotation = world.execute(logical_form)
is_correct.append(str(denotation).lower() == label)
return is_correct
def _create_grammar_state(
self, world: NlvrLanguage, possible_actions: List[ProductionRule]
) -> GrammarStatelet:
valid_actions = world.get_nonterminal_productions()
action_mapping = {}
for i, action in enumerate(possible_actions):
action_mapping[action[0]] = i
translated_valid_actions: Dict[
str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]
] = {}
for key, action_strings in valid_actions.items():
translated_valid_actions[key] = {}
# `key` here is a non-terminal from the grammar, and `action_strings` are all the valid
# productions of that non-terminal.
action_indices = [action_mapping[action_string] for action_string in action_strings]
# All actions in NLVR are global actions.
global_actions = [(possible_actions[index][2], index) for index in action_indices]
# Then we get the embedded representations of the global actions.
global_action_tensors, global_action_ids = zip(*global_actions)
global_action_tensor = torch.cat(global_action_tensors, dim=0)
global_input_embeddings = self._action_embedder(global_action_tensor)
translated_valid_actions[key]["global"] = (
global_input_embeddings,
global_input_embeddings,
list(global_action_ids),
)
return GrammarStatelet([START_SYMBOL], translated_valid_actions, world.is_nonterminal)
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. We only transform the action string sequences into logical
forms here.
"""
best_action_strings = output_dict["best_action_strings"]
# Instantiating an empty world for getting logical forms.
world = NlvrLanguage(set())
logical_forms = []
for instance_action_sequences in best_action_strings:
instance_logical_forms = []
for action_strings in instance_action_sequences:
if action_strings:
instance_logical_forms.append(
world.action_sequence_to_logical_form(action_strings)
)
else:
instance_logical_forms.append("")
logical_forms.append(instance_logical_forms)
action_mapping = output_dict["action_mapping"]
best_actions = output_dict["best_action_strings"]
debug_infos = output_dict["debug_info"]
batch_action_info = []
for batch_index, (predicted_actions, debug_info) in enumerate(
zip(best_actions, debug_infos)
):
instance_action_info = []
for predicted_action, action_debug_info in zip(predicted_actions[0], debug_info):
action_info = {}
action_info["predicted_action"] = predicted_action
considered_actions = action_debug_info["considered_actions"]
probabilities = action_debug_info["probabilities"]
actions = []
for action, probability in zip(considered_actions, probabilities):
if action != -1:
actions.append((action_mapping[(batch_index, action)], probability))
actions.sort()
considered_actions, probabilities = zip(*actions)
action_info["considered_actions"] = considered_actions
action_info["action_probabilities"] = probabilities
action_info["question_attention"] = action_debug_info.get("question_attention", [])
instance_action_info.append(action_info)
batch_action_info.append(instance_action_info)
output_dict["predicted_actions"] = batch_action_info
output_dict["logical_form"] = logical_forms
return output_dict
def _check_state_denotations(
self, state: GrammarBasedState, worlds: List[NlvrLanguage]
) -> List[bool]:
"""
Returns whether action history in the state evaluates to the correct denotations over all
worlds. Only defined when the state is finished.
"""
assert state.is_finished(), "Cannot compute denotations for unfinished states!"
# Since this is a finished state, its group size must be 1.
batch_index = state.batch_indices[0]
instance_label_strings = state.extras[batch_index]
history = state.action_history[0]
all_actions = state.possible_actions[0]
action_sequence = [all_actions[action][0] for action in history]
return self._check_denotation(action_sequence, instance_label_strings, worlds)
| allennlp-semparse-master | allennlp_semparse/models/nlvr/nlvr_semantic_parser.py |
allennlp-semparse-master | allennlp_semparse/models/atis/__init__.py |
|
import logging
from typing import Any, Dict, Iterable, List, Tuple
import difflib
import sqlparse
import torch
from allennlp.common.util import pad_sequence_to_length
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Attention, Seq2SeqEncoder, TextFieldEmbedder, Embedding
from allennlp.nn import util
from allennlp.training.metrics import Average
from allennlp_semparse.fields.production_rule_field import ProductionRule
from allennlp_semparse.parsimonious_languages.worlds import AtisWorld
from allennlp_semparse.parsimonious_languages.contexts.atis_sql_table_context import (
NUMERIC_NONTERMINALS,
)
from allennlp_semparse.parsimonious_languages.contexts.sql_context_utils import (
action_sequence_to_sql,
)
from allennlp_semparse.parsimonious_languages.executors import SqlExecutor
from allennlp_semparse.state_machines import BeamSearch
from allennlp_semparse.state_machines.states import GrammarBasedState
from allennlp_semparse.state_machines.states import GrammarStatelet, RnnStatelet
from allennlp_semparse.state_machines.trainers import MaximumMarginalLikelihood
from allennlp_semparse.state_machines.transition_functions import LinkingTransitionFunction
logger = logging.getLogger(__name__)
@Model.register("atis_parser")
class AtisSemanticParser(Model):
"""
Parameters
----------
vocab : ``Vocabulary``
utterance_embedder : ``TextFieldEmbedder``
Embedder for utterances.
action_embedding_dim : ``int``
Dimension to use for action embeddings.
encoder : ``Seq2SeqEncoder``
The encoder to use for the input utterance.
decoder_beam_search : ``BeamSearch``
Beam search used to retrieve best sequences after training.
max_decoding_steps : ``int``
When we're decoding with a beam search, what's the maximum number of steps we should take?
This only applies at evaluation time, not during training.
input_attention: ``Attention``
We compute an attention over the input utterance at each step of the decoder, using the
decoder hidden state as the query. Passed to the transition function.
add_action_bias : ``bool``, optional (default=True)
If ``True``, we will learn a bias weight for each action that gets used when predicting
that action, in addition to its embedding.
dropout : ``float``, optional (default=0)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
rule_namespace : ``str``, optional (default=rule_labels)
The vocabulary namespace to use for production rules. The default corresponds to the
default used in the dataset reader, so you likely don't need to modify this.
database_file: ``str``, optional (default=/atis/atis.db)
The path of the SQLite database when evaluating SQL queries. SQLite is disk based, so we need
the file location to connect to it.
"""
def __init__(
self,
vocab: Vocabulary,
utterance_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
decoder_beam_search: BeamSearch,
max_decoding_steps: int,
input_attention: Attention,
add_action_bias: bool = True,
training_beam_size: int = None,
decoder_num_layers: int = 1,
dropout: float = 0.0,
rule_namespace: str = "rule_labels",
database_file="/atis/atis.db",
) -> None:
# Atis semantic parser init
super().__init__(vocab)
self._utterance_embedder = utterance_embedder
self._encoder = encoder
self._max_decoding_steps = max_decoding_steps
self._add_action_bias = add_action_bias
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._rule_namespace = rule_namespace
self._exact_match = Average()
self._valid_sql_query = Average()
self._action_similarity = Average()
self._denotation_accuracy = Average()
self._executor = SqlExecutor(database_file)
self._action_padding_index = -1 # the padding value used by IndexField
num_actions = vocab.get_vocab_size(self._rule_namespace)
if self._add_action_bias:
input_action_dim = action_embedding_dim + 1
else:
input_action_dim = action_embedding_dim
self._action_embedder = Embedding(
num_embeddings=num_actions, embedding_dim=input_action_dim
)
self._output_action_embedder = Embedding(
num_embeddings=num_actions, embedding_dim=action_embedding_dim
)
# This is what we pass as input in the first step of decoding, when we don't have a
# previous action, or a previous utterance attention.
self._first_action_embedding = torch.nn.Parameter(torch.FloatTensor(action_embedding_dim))
self._first_attended_utterance = torch.nn.Parameter(
torch.FloatTensor(encoder.get_output_dim())
)
torch.nn.init.normal_(self._first_action_embedding)
torch.nn.init.normal_(self._first_attended_utterance)
self._num_entity_types = 2 # TODO(kevin): get this in a more principled way somehow?
self._entity_type_decoder_embedding = Embedding(
num_embeddings=self._num_entity_types, embedding_dim=action_embedding_dim
)
self._decoder_num_layers = decoder_num_layers
self._beam_search = decoder_beam_search
self._decoder_trainer = MaximumMarginalLikelihood(training_beam_size)
self._transition_function = LinkingTransitionFunction(
encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
add_action_bias=self._add_action_bias,
dropout=dropout,
num_layers=self._decoder_num_layers,
)
def forward(
self, # type: ignore
utterance: Dict[str, torch.LongTensor],
world: List[AtisWorld],
actions: List[List[ProductionRule]],
linking_scores: torch.Tensor,
target_action_sequence: torch.LongTensor = None,
sql_queries: List[List[str]] = None,
) -> Dict[str, torch.Tensor]:
"""
We set up the initial state for the decoder, and pass that state off to either a DecoderTrainer,
if we're training, or a BeamSearch for inference, if we're not.
Parameters
----------
utterance : Dict[str, torch.LongTensor]
The output of ``TextField.as_array()`` applied on the utterance ``TextField``. This will
be passed through a ``TextFieldEmbedder`` and then through an encoder.
world : ``List[AtisWorld]``
We use a ``MetadataField`` to get the ``World`` for each input instance. Because of
how ``MetadataField`` works, this gets passed to us as a ``List[AtisWorld]``,
actions : ``List[List[ProductionRule]]``
A list of all possible actions for each ``World`` in the batch, indexed into a
``ProductionRule`` using a ``ProductionRuleField``. We will embed all of these
and use the embeddings to determine which action to take at each timestep in the
decoder.
linking_scores: ``torch.Tensor``
A matrix of the linking the utterance tokens and the entities. This is a binary matrix that
is deterministically generated where each entry indicates whether a token generated an entity.
This tensor has shape ``(batch_size, num_entities, num_utterance_tokens)``.
target_action_sequence : torch.Tensor, optional (default=None)
The action sequence for the correct action sequence, where each action is an index into the list
of possible actions. This tensor has shape ``(batch_size, sequence_length, 1)``. We remove the
trailing dimension.
sql_queries : List[List[str]], optional (default=None)
A list of the SQL queries that are given during training or validation.
"""
initial_state = self._get_initial_state(utterance, world, actions, linking_scores)
batch_size = linking_scores.shape[0]
if target_action_sequence is not None:
# Remove the trailing dimension (from ListField[ListField[IndexField]]).
target_action_sequence = target_action_sequence.squeeze(-1)
target_mask = target_action_sequence != self._action_padding_index
else:
target_mask = None
if self.training:
# target_action_sequence is of shape (batch_size, 1, sequence_length) here after we unsqueeze it for
# the MML trainer.
return self._decoder_trainer.decode(
initial_state,
self._transition_function,
(target_action_sequence.unsqueeze(1), target_mask.unsqueeze(1)),
)
else:
# TODO(kevin) Move some of this functionality to a separate method for computing validation outputs.
action_mapping = {}
for batch_index, batch_actions in enumerate(actions):
for action_index, action in enumerate(batch_actions):
action_mapping[(batch_index, action_index)] = action[0]
outputs: Dict[str, Any] = {"action_mapping": action_mapping}
outputs["linking_scores"] = linking_scores
if target_action_sequence is not None:
outputs["loss"] = self._decoder_trainer.decode(
initial_state,
self._transition_function,
(target_action_sequence.unsqueeze(1), target_mask.unsqueeze(1)),
)["loss"]
num_steps = self._max_decoding_steps
# This tells the state to start keeping track of debug info, which we'll pass along in
# our output dictionary.
initial_state.debug_info = [[] for _ in range(batch_size)]
best_final_states = self._beam_search.search(
num_steps,
initial_state,
self._transition_function,
keep_final_unfinished_states=False,
)
outputs["best_action_sequence"] = []
outputs["debug_info"] = []
outputs["entities"] = []
outputs["predicted_sql_query"] = []
outputs["sql_queries"] = []
outputs["utterance"] = []
outputs["tokenized_utterance"] = []
for i in range(batch_size):
# Decoding may not have terminated with any completed valid SQL queries, if `num_steps`
# isn't long enough (or if the model is not trained enough and gets into an
# infinite action loop).
if i not in best_final_states:
self._exact_match(0)
self._denotation_accuracy(0)
self._valid_sql_query(0)
self._action_similarity(0)
outputs["predicted_sql_query"].append("")
continue
best_action_indices = best_final_states[i][0].action_history[0]
action_strings = [
action_mapping[(i, action_index)] for action_index in best_action_indices
]
predicted_sql_query = action_sequence_to_sql(action_strings)
if target_action_sequence is not None:
# Use a Tensor, not a Variable, to avoid a memory leak.
targets = target_action_sequence[i].data
sequence_in_targets = 0
sequence_in_targets = self._action_history_match(best_action_indices, targets)
self._exact_match(sequence_in_targets)
similarity = difflib.SequenceMatcher(None, best_action_indices, targets)
self._action_similarity(similarity.ratio())
if sql_queries and sql_queries[i]:
denotation_correct = self._executor.evaluate_sql_query(
predicted_sql_query, sql_queries[i]
)
self._denotation_accuracy(denotation_correct)
outputs["sql_queries"].append(sql_queries[i])
outputs["utterance"].append(world[i].utterances[-1])
outputs["tokenized_utterance"].append(
[token.text for token in world[i].tokenized_utterances[-1]]
)
outputs["entities"].append(world[i].entities)
outputs["best_action_sequence"].append(action_strings)
outputs["predicted_sql_query"].append(
sqlparse.format(predicted_sql_query, reindent=True)
)
outputs["debug_info"].append(best_final_states[i][0].debug_info[0]) # type: ignore
return outputs
def _get_initial_state(
self,
utterance: Dict[str, torch.LongTensor],
worlds: List[AtisWorld],
actions: List[List[ProductionRule]],
linking_scores: torch.Tensor,
) -> GrammarBasedState:
embedded_utterance = self._utterance_embedder(utterance)
utterance_mask = util.get_text_field_mask(utterance)
batch_size = embedded_utterance.size(0)
num_entities = max([len(world.entities) for world in worlds])
# entity_types: tensor with shape (batch_size, num_entities)
entity_types, _ = self._get_type_vector(worlds, num_entities, embedded_utterance)
# (batch_size, num_utterance_tokens, embedding_dim)
encoder_input = embedded_utterance
# (batch_size, utterance_length, encoder_output_dim)
encoder_outputs = self._dropout(self._encoder(encoder_input, utterance_mask))
# This will be our initial hidden state and memory cell for the decoder LSTM.
final_encoder_output = util.get_final_encoder_states(
encoder_outputs, utterance_mask, self._encoder.is_bidirectional()
)
memory_cell = encoder_outputs.new_zeros(batch_size, self._encoder.get_output_dim())
initial_score = embedded_utterance.data.new_zeros(batch_size)
# To make grouping states together in the decoder easier, we convert the batch dimension in
# all of our tensors into an outer list. For instance, the encoder outputs have shape
# `(batch_size, utterance_length, encoder_output_dim)`. We need to convert this into a list
# of `batch_size` tensors, each of shape `(utterance_length, encoder_output_dim)`. Then we
# won't have to do any index selects, or anything, we'll just do some `torch.cat()`s.
initial_score_list = [initial_score[i] for i in range(batch_size)]
encoder_output_list = [encoder_outputs[i] for i in range(batch_size)]
utterance_mask_list = [utterance_mask[i] for i in range(batch_size)]
initial_rnn_state = []
for i in range(batch_size):
if self._decoder_num_layers > 1:
initial_rnn_state.append(
RnnStatelet(
final_encoder_output[i].repeat(self._decoder_num_layers, 1),
memory_cell[i].repeat(self._decoder_num_layers, 1),
self._first_action_embedding,
self._first_attended_utterance,
encoder_output_list,
utterance_mask_list,
)
)
else:
initial_rnn_state.append(
RnnStatelet(
final_encoder_output[i],
memory_cell[i],
self._first_action_embedding,
self._first_attended_utterance,
encoder_output_list,
utterance_mask_list,
)
)
initial_grammar_state = [
self._create_grammar_state(worlds[i], actions[i], linking_scores[i], entity_types[i])
for i in range(batch_size)
]
initial_state = GrammarBasedState(
batch_indices=list(range(batch_size)),
action_history=[[] for _ in range(batch_size)],
score=initial_score_list,
rnn_state=initial_rnn_state,
grammar_state=initial_grammar_state,
possible_actions=actions,
debug_info=None,
)
return initial_state
@staticmethod
def _get_type_vector(
worlds: List[AtisWorld], num_entities: int, tensor: torch.Tensor = None
) -> Tuple[torch.LongTensor, Dict[int, int]]:
"""
Produces the encoding for each entity's type. In addition, a map from a flattened entity
index to type is returned to combine entity type operations into one method.
Parameters
----------
worlds : ``List[AtisWorld]``
num_entities : ``int``
tensor : ``torch.Tensor``
Used for copying the constructed list onto the right device.
Returns
-------
A ``torch.LongTensor`` with shape ``(batch_size, num_entities, num_types)``.
entity_types : ``Dict[int, int]``
This is a mapping from ((batch_index * num_entities) + entity_index) to entity type id.
"""
entity_types = {}
batch_types = []
for batch_index, world in enumerate(worlds):
types = []
entities = [
("number", entity)
if any(
[
entity.startswith(numeric_nonterminal)
for numeric_nonterminal in NUMERIC_NONTERMINALS
]
)
else ("string", entity)
for entity in world.entities
]
for entity_index, entity in enumerate(entities):
# We need numbers to be first, then strings, since our entities are going to be
# sorted. We do a split by type and then a merge later, and it relies on this sorting.
if entity[0] == "number":
entity_type = 1
else:
entity_type = 0
types.append(entity_type)
# For easier lookups later, we're actually using a _flattened_ version
# of (batch_index, entity_index) for the key, because this is how the
# linking scores are stored.
flattened_entity_index = batch_index * num_entities + entity_index
entity_types[flattened_entity_index] = entity_type
padded = pad_sequence_to_length(types, num_entities, lambda: 0)
batch_types.append(padded)
return tensor.new_tensor(batch_types, dtype=torch.long), entity_types
@staticmethod
def _action_history_match(predicted: List[int], targets: torch.LongTensor) -> int:
# TODO(mattg): this could probably be moved into a FullSequenceMatch metric, or something.
# Check if target is big enough to cover prediction (including start/end symbols)
if len(predicted) > targets.size(0):
return 0
predicted_tensor = targets.new_tensor(predicted)
targets_trimmed = targets[: len(predicted)]
# Return 1 if the predicted sequence is anywhere in the list of targets.
return predicted_tensor.equal(targets_trimmed)
@staticmethod
def is_nonterminal(token: str):
if token[0] == '"' and token[-1] == '"':
return False
return True
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
We track four metrics here:
1. exact_match, which is the percentage of the time that our best output action sequence
matches the SQL query exactly.
2. denotation_acc, which is the percentage of examples where we get the correct
denotation. This is the typical "accuracy" metric, and it is what you should usually
report in an experimental result. You need to be careful, though, that you're
computing this on the full data, and not just the subset that can be parsed. (make sure
you pass "keep_if_unparseable=True" to the dataset reader, which we do for validation data,
but not training data).
3. valid_sql_query, which is the percentage of time that decoding actually produces a
valid SQL query. We might not produce a valid SQL query if the decoder gets
into a repetitive loop, or we're trying to produce a super long SQL query and run
out of time steps, or something.
4. action_similarity, which is how similar the action sequence predicted is to the actual
action sequence. This is basically a soft measure of exact_match.
"""
return {
"exact_match": self._exact_match.get_metric(reset),
"denotation_acc": self._denotation_accuracy.get_metric(reset),
"valid_sql_query": self._valid_sql_query.get_metric(reset),
"action_similarity": self._action_similarity.get_metric(reset),
}
def _create_grammar_state(
self,
world: AtisWorld,
possible_actions: List[ProductionRule],
linking_scores: torch.Tensor,
entity_types: torch.Tensor,
) -> GrammarStatelet:
"""
This method creates the GrammarStatelet object that's used for decoding. Part of creating
that is creating the `valid_actions` dictionary, which contains embedded representations of
all of the valid actions. So, we create that here as well.
The inputs to this method are for a `single instance in the batch`; none of the tensors we
create here are batched. We grab the global action ids from the input
``ProductionRules``, and we use those to embed the valid actions for every
non-terminal type. We use the input ``linking_scores`` for non-global actions.
Parameters
----------
world : ``AtisWorld``
From the input to ``forward`` for a single batch instance.
possible_actions : ``List[ProductionRule]``
From the input to ``forward`` for a single batch instance.
linking_scores : ``torch.Tensor``
Assumed to have shape ``(num_entities, num_utterance_tokens)`` (i.e., there is no batch
dimension).
entity_types : ``torch.Tensor``
Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension).
"""
action_map = {}
for action_index, action in enumerate(possible_actions):
action_string = action[0]
action_map[action_string] = action_index
valid_actions = world.valid_actions
entity_map = {}
entities: Iterable[str] = world.entities
for entity_index, entity in enumerate(entities):
entity_map[entity] = entity_index
translated_valid_actions: Dict[
str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]
] = {}
for key, action_strings in valid_actions.items():
translated_valid_actions[key] = {}
# `key` here is a non-terminal from the grammar, and `action_strings` are all the valid
# productions of that non-terminal. We'll first split those productions by global vs.
# linked action.
action_indices = [action_map[action_string] for action_string in action_strings]
production_rule_arrays = [(possible_actions[index], index) for index in action_indices]
global_actions = []
linked_actions = []
for production_rule_array, action_index in production_rule_arrays:
if production_rule_array[1]:
global_actions.append((production_rule_array[2], action_index))
else:
linked_actions.append((production_rule_array[0], action_index))
if global_actions:
global_action_tensors, global_action_ids = zip(*global_actions)
global_action_tensor = (
torch.cat(global_action_tensors, dim=0).to(entity_types.device).long()
)
global_input_embeddings = self._action_embedder(global_action_tensor)
global_output_embeddings = self._output_action_embedder(global_action_tensor)
translated_valid_actions[key]["global"] = (
global_input_embeddings,
global_output_embeddings,
list(global_action_ids),
)
if linked_actions:
linked_rules, linked_action_ids = zip(*linked_actions)
entities = linked_rules
entity_ids = [entity_map[entity] for entity in entities]
entity_linking_scores = linking_scores[entity_ids]
entity_type_tensor = entity_types[entity_ids]
entity_type_embeddings = (
self._entity_type_decoder_embedding(entity_type_tensor)
.to(entity_types.device)
.float()
)
translated_valid_actions[key]["linked"] = (
entity_linking_scores,
entity_type_embeddings,
list(linked_action_ids),
)
return GrammarStatelet(["statement"], translated_valid_actions, self.is_nonterminal)
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. This is (confusingly) a separate notion from the "decoder"
in "encoder/decoder", where that decoder logic lives in ``TransitionFunction``.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_actions`` to the ``output_dict``.
"""
action_mapping = output_dict["action_mapping"]
best_actions = output_dict["best_action_sequence"]
debug_infos = output_dict["debug_info"]
batch_action_info = []
for batch_index, (predicted_actions, debug_info) in enumerate(
zip(best_actions, debug_infos)
):
instance_action_info = []
for predicted_action, action_debug_info in zip(predicted_actions, debug_info):
action_info = {}
action_info["predicted_action"] = predicted_action
considered_actions = action_debug_info["considered_actions"]
probabilities = action_debug_info["probabilities"]
actions = []
for action, probability in zip(considered_actions, probabilities):
if action != -1:
actions.append((action_mapping[(batch_index, action)], probability))
actions.sort()
considered_actions, probabilities = zip(*actions)
action_info["considered_actions"] = considered_actions
action_info["action_probabilities"] = probabilities
action_info["utterance_attention"] = action_debug_info.get("question_attention", [])
instance_action_info.append(action_info)
batch_action_info.append(instance_action_info)
output_dict["predicted_actions"] = batch_action_info
return output_dict
default_predictor = "atis-parser"
| allennlp-semparse-master | allennlp_semparse/models/atis/atis_semantic_parser.py |
from collections import defaultdict
from typing import Dict, List, Optional, Set, Tuple, Union
import torch
def construct_prefix_tree(
targets: Union[torch.Tensor, List[List[List[int]]]],
target_mask: Optional[Union[torch.Tensor, List[List[List[int]]]]] = None,
) -> List[Dict[Tuple[int, ...], Set[int]]]:
"""
Takes a list of valid target action sequences and creates a mapping from all possible
(valid) action prefixes to allowed actions given that prefix. While the method is called
``construct_prefix_tree``, we're actually returning a map that has as keys the paths to
`all internal nodes of the trie`, and as values all of the outgoing edges from that node.
``targets`` is assumed to be a tensor of shape ``(batch_size, num_valid_sequences,
sequence_length)``. If the mask is not ``None``, it is assumed to have the same shape, and
we will ignore any value in ``targets`` that has a value of ``0`` in the corresponding
position in the mask. We assume that the mask has the format 1*0* for each item in
``targets`` - that is, once we see our first zero, we stop processing that target.
For example, if ``targets`` is the following tensor: ``[[1, 2, 3], [1, 4, 5]]``, the return
value will be: ``{(): set([1]), (1,): set([2, 4]), (1, 2): set([3]), (1, 4): set([5])}``.
This could be used, e.g., to do an efficient constrained beam search, or to efficiently
evaluate the probability of all of the target sequences.
"""
batched_allowed_transitions: List[Dict[Tuple[int, ...], Set[int]]] = []
if not isinstance(targets, list):
assert targets.dim() == 3, "targets tensor needs to be batched!"
targets = targets.detach().cpu().numpy().tolist()
if target_mask is not None:
if not isinstance(target_mask, list):
target_mask = target_mask.detach().cpu().numpy().tolist()
else:
target_mask = [None for _ in targets]
for instance_targets, instance_mask in zip(targets, target_mask):
allowed_transitions: Dict[Tuple[int, ...], Set[int]] = defaultdict(set)
for i, target_sequence in enumerate(instance_targets):
history: Tuple[int, ...] = ()
for j, action in enumerate(target_sequence):
if instance_mask and instance_mask[i][j] == 0:
break
allowed_transitions[history].add(action)
history = history + (action,)
batched_allowed_transitions.append(allowed_transitions)
return batched_allowed_transitions
| allennlp-semparse-master | allennlp_semparse/state_machines/util.py |
"""
This module contains code for using state machines in a model to do transition-based decoding.
"Transition-based decoding" is where you start in some state, iteratively transition between
states, and have some kind of supervision signal that tells you which end states, or which
transition sequences, are "good".
Typical seq2seq decoding, where you have a fixed vocabulary and no constraints on your output, can
be done much more efficiently than we do in this code. This is intended for structured models that
have constraints on their outputs.
The key abstractions in this code are the following:
- ``State`` represents the current state of decoding, containing a list of all of the actions
taken so far, and a current score for the state. It also has methods around determining
whether the state is "finished" and for combining states for batched computation.
- ``TransitionFunction`` is a ``torch.nn.Module`` that models the transition function between
states. Its main method is ``take_step``, which generates a ranked list of next states given
a current state.
- ``DecoderTrainer`` is an algorithm for training the transition function with some kind of
supervision signal. There are many options for training algorithms and supervision signals;
this is an abstract class that is generic over the type of the supervision signal.
There is also a generic ``BeamSearch`` class for finding the ``k`` highest-scoring transition
sequences given a trained ``TransitionFunction`` and an initial ``State``.
"""
from allennlp_semparse.state_machines.beam_search import BeamSearch
from allennlp_semparse.state_machines.constrained_beam_search import ConstrainedBeamSearch
from allennlp_semparse.state_machines.states import State
from allennlp_semparse.state_machines.trainers import DecoderTrainer
from allennlp_semparse.state_machines.transition_functions import TransitionFunction
| allennlp-semparse-master | allennlp_semparse/state_machines/__init__.py |
from collections import defaultdict
from typing import Dict, Generic, List, TypeVar, Tuple
import torch
from allennlp.common.registrable import FromParams
from allennlp_semparse.state_machines import util
from allennlp_semparse.state_machines.states import State
from allennlp_semparse.state_machines.transition_functions import TransitionFunction
StateType = TypeVar("StateType", bound=State)
class BeamSearch(FromParams, Generic[StateType]):
"""
This class implements beam search over transition sequences given an initial ``State`` and a
``TransitionFunction``, returning the highest scoring final states found by the beam (the
states will keep track of the transition sequence themselves).
The initial ``State`` is assumed to be `batched`. The value we return from the search is a
dictionary from batch indices to ranked finished states.
IMPORTANT: We assume that the ``TransitionFunction`` that you are using returns possible next
states in sorted order, so we do not do an additional sort inside of ``BeamSearch.search()``.
If you're implementing your own ``TransitionFunction``, you must ensure that you've sorted the
states that you return.
Parameters
----------
beam_size : ``int``
The beam size to use.
per_node_beam_size : ``int``, optional (default = beam_size)
The maximum number of candidates to consider per node, at each step in the search.
If not given, this just defaults to `beam_size`. Setting this parameter
to a number smaller than `beam_size` may give better results, as it can introduce
more diversity into the search. See Freitag and Al-Onaizan 2017,
"Beam Search Strategies for Neural Machine Translation".
initial_sequence : ``torch.Tensor``, optional (default = None)
If you provide a (sequence_length,) tensor here, the beam search will be constrained
to only sequences that begin with the provided initial_sequence.
keep_beam_details : ``bool``, optional (default = False)
If True, we store snapshots of each beam in an instance variable ``beam_snapshots``,
which is a dict: { batch_index -> [timestep0_histories, ..., timestepk_histories] },
where a "timestep history" is just a pair (score, action_history) that was considered
at that timestep.
"""
def __init__(
self,
beam_size: int,
per_node_beam_size: int = None,
initial_sequence: torch.Tensor = None,
keep_beam_details: bool = False,
) -> None:
self._beam_size = beam_size
self._per_node_beam_size = per_node_beam_size or beam_size
if initial_sequence is not None:
# construct_prefix_tree wants a tensor of shape (batch_size, num_sequences, sequence_length)
# so we need to add the first two dimensions in. This returns a list, but we're assuming
# batch size 1, so we extract the first element.
self._allowed_transitions = util.construct_prefix_tree(initial_sequence.view(1, 1, -1))[
0
]
else:
self._allowed_transitions = None
if keep_beam_details:
# mapping from batch_index to a list (timesteps) of lists (beam elements)
# of pairs (score, action_history)
self.beam_snapshots: Dict[int, List[List[Tuple[float, List[int]]]]] = {}
else:
self.beam_snapshots = None
def constrained_to(
self, initial_sequence: torch.Tensor, keep_beam_details: bool = True
) -> "BeamSearch":
"""
Return a new BeamSearch instance that's like this one but with the specified constraint.
"""
return BeamSearch(
self._beam_size, self._per_node_beam_size, initial_sequence, keep_beam_details
)
def search(
self,
num_steps: int,
initial_state: StateType,
transition_function: TransitionFunction,
keep_final_unfinished_states: bool = True,
) -> Dict[int, List[StateType]]:
"""
Parameters
----------
num_steps : ``int``
How many steps should we take in our search? This is an upper bound, as it's possible
for the search to run out of valid actions before hitting this number, or for all
states on the beam to finish.
initial_state : ``StateType``
The starting state of our search. This is assumed to be `batched`, and our beam search
is batch-aware - we'll keep ``beam_size`` states around for each instance in the batch.
transition_function : ``TransitionFunction``
The ``TransitionFunction`` object that defines and scores transitions from one state to the
next.
keep_final_unfinished_states : ``bool``, optional (default=True)
If we run out of steps before a state is "finished", should we return that state in our
search results?
Returns
-------
best_states : ``Dict[int, List[StateType]]``
This is a mapping from batch index to the top states for that instance.
"""
finished_states: Dict[int, List[StateType]] = defaultdict(list)
states = [initial_state]
step_num = 1
# Erase stored beams, if we're tracking them.
if self.beam_snapshots is not None:
self.beam_snapshots = defaultdict(list)
while states and step_num <= num_steps:
next_states: Dict[int, List[StateType]] = defaultdict(list)
grouped_state = states[0].combine_states(states)
if self._allowed_transitions:
# We were provided an initial sequence, so we need to check
# if the current sequence is still constrained.
key = tuple(grouped_state.action_history[0])
if key in self._allowed_transitions:
# We're still in the initial_sequence, so our hand is forced.
allowed_actions = [self._allowed_transitions[key]]
else:
# We've gone past the end of the initial sequence, so no constraint.
allowed_actions = None
else:
# No initial sequence was provided, so all actions are allowed.
allowed_actions = None
for next_state in transition_function.take_step(
grouped_state, max_actions=self._per_node_beam_size, allowed_actions=allowed_actions
):
# NOTE: we're doing state.batch_indices[0] here (and similar things below),
# hard-coding a group size of 1. But, our use of `next_state.is_finished()`
# already checks for that, as it crashes if the group size is not 1.
batch_index = next_state.batch_indices[0]
if next_state.is_finished():
finished_states[batch_index].append(next_state)
else:
if step_num == num_steps and keep_final_unfinished_states:
finished_states[batch_index].append(next_state)
next_states[batch_index].append(next_state)
states = []
for batch_index, batch_states in next_states.items():
# The states from the generator are already sorted, so we can just take the first
# ones here, without an additional sort.
states.extend(batch_states[: self._beam_size])
if self.beam_snapshots is not None:
# Add to beams
self.beam_snapshots[batch_index].append(
[(state.score[0].item(), state.action_history[0]) for state in batch_states]
)
step_num += 1
# Add finished states to the stored beams as well.
if self.beam_snapshots is not None:
for batch_index, states in finished_states.items():
for state in states:
score = state.score[0].item()
action_history = state.action_history[0]
while len(self.beam_snapshots[batch_index]) < len(action_history):
self.beam_snapshots[batch_index].append([])
self.beam_snapshots[batch_index][len(action_history) - 1].append(
(score, action_history)
)
best_states: Dict[int, List[StateType]] = {}
for batch_index, batch_states in finished_states.items():
# The time this sort takes is pretty negligible, no particular need to optimize this
# yet. Maybe with a larger beam size...
finished_to_sort = [(-state.score[0].item(), state) for state in batch_states]
finished_to_sort.sort(key=lambda x: x[0])
best_states[batch_index] = [state[1] for state in finished_to_sort[: self._beam_size]]
return best_states
| allennlp-semparse-master | allennlp_semparse/state_machines/beam_search.py |
from collections import defaultdict
from typing import Dict, List, Optional
import torch
from allennlp_semparse.state_machines import util
from allennlp_semparse.state_machines.states import State
from allennlp_semparse.state_machines.transition_functions import TransitionFunction
class ConstrainedBeamSearch:
"""
This class implements beam search over transition sequences given an initial ``State``, a
``TransitionFunction``, and a list of allowed transition sequences. We will do a beam search
`over the list of allowed sequences` and return the highest scoring states found by the beam.
This is only actually a `beam search` if your beam size is smaller than the list of allowed
transition sequences; otherwise, we are just scoring and sorting the sequences using a prefix
tree.
The initial ``State`` is assumed to be `batched`. The value we return from the search is a
dictionary from batch indices to ranked finished states.
IMPORTANT: We assume that the ``TransitionFunction`` that you are using returns possible next
states in sorted order, so we do not do an additional sort inside of
``ConstrainedBeamSearch.search()``. If you're implementing your own ``TransitionFunction``,
you must ensure that you've sorted the states that you return.
Parameters
----------
beam_size : ``Optional[int]``
The beam size to use. Because this is a `constrained` beam search, we allow for the case
where you just want to evaluate all options in the constrained set. In that case, you
don't need a beam, and you can pass a beam size of ``None``, and we will just evaluate
everything. This lets us be more efficient in :func:`TransitionFunction.take_step` and
skip the sorting that is typically done there.
allowed_sequences : ``torch.Tensor``
A ``(batch_size, num_sequences, sequence_length)`` tensor containing the transition
sequences that we will search in. The values in this tensor must match whatever the
``State`` keeps in its ``action_history`` variable (typically this is action indices).
allowed_sequence_mask : ``torch.Tensor``
A ``(batch_size, num_sequences, sequence_length)`` tensor indicating whether each entry in
the ``allowed_sequences`` tensor is padding. The allowed sequences could be padded both on
the ``num_sequences`` dimension and the ``sequence_length`` dimension.
per_node_beam_size : ``int``, optional (default = beam_size)
The maximum number of candidates to consider per node, at each step in the search.
If not given, this just defaults to `beam_size`. Setting this parameter
to a number smaller than `beam_size` may give better results, as it can introduce
more diversity into the search. See Freitag and Al-Onaizan 2017,
"Beam Search Strategies for Neural Machine Translation".
"""
def __init__(
self,
beam_size: Optional[int],
allowed_sequences: torch.Tensor,
allowed_sequence_mask: torch.Tensor,
per_node_beam_size: int = None,
) -> None:
self._beam_size = beam_size
self._per_node_beam_size = per_node_beam_size or beam_size
self._allowed_transitions = util.construct_prefix_tree(
allowed_sequences, allowed_sequence_mask
)
def search(
self, initial_state: State, transition_function: TransitionFunction
) -> Dict[int, List[State]]:
"""
Parameters
----------
initial_state : ``State``
The starting state of our search. This is assumed to be `batched`, and our beam search
is batch-aware - we'll keep ``beam_size`` states around for each instance in the batch.
transition_function : ``TransitionFunction``
The ``TransitionFunction`` object that defines and scores transitions from one state to the
next.
Returns
-------
best_states : ``Dict[int, List[State]]``
This is a mapping from batch index to the top states for that instance.
"""
finished_states: Dict[int, List[State]] = defaultdict(list)
states = [initial_state]
step_num = 0
while states:
step_num += 1
next_states: Dict[int, List[State]] = defaultdict(list)
grouped_state = states[0].combine_states(states)
allowed_actions = []
for batch_index, action_history in zip(
grouped_state.batch_indices, grouped_state.action_history
):
allowed_actions.append(
self._allowed_transitions[batch_index][tuple(action_history)]
)
for next_state in transition_function.take_step(
grouped_state, max_actions=self._per_node_beam_size, allowed_actions=allowed_actions
):
# NOTE: we're doing state.batch_indices[0] here (and similar things below),
# hard-coding a group size of 1. But, our use of `next_state.is_finished()`
# already checks for that, as it crashes if the group size is not 1.
batch_index = next_state.batch_indices[0]
if next_state.is_finished():
finished_states[batch_index].append(next_state)
else:
next_states[batch_index].append(next_state)
states = []
for batch_index, batch_states in next_states.items():
# The states from the generator are already sorted, so we can just take the first
# ones here, without an additional sort.
if self._beam_size:
batch_states = batch_states[: self._beam_size]
states.extend(batch_states)
best_states: Dict[int, List[State]] = {}
for batch_index, batch_states in finished_states.items():
# The time this sort takes is pretty negligible, no particular need to optimize this
# yet. Maybe with a larger beam size...
finished_to_sort = [(-state.score[0].item(), state) for state in batch_states]
finished_to_sort.sort(key=lambda x: x[0])
best_states[batch_index] = [state[1] for state in finished_to_sort[: self._beam_size]]
return best_states
| allennlp-semparse-master | allennlp_semparse/state_machines/constrained_beam_search.py |
from typing import Callable, Dict, List, TypeVar
from collections import defaultdict
import torch
from allennlp.nn import util as nn_util
from allennlp_semparse.state_machines.states import State
from allennlp_semparse.state_machines.trainers.decoder_trainer import DecoderTrainer
from allennlp_semparse.state_machines.transition_functions import TransitionFunction
StateType = TypeVar("StateType", bound=State)
class ExpectedRiskMinimization(DecoderTrainer[Callable[[StateType], torch.Tensor]]):
"""
This class implements a trainer that minimizes the expected value of a cost function over the
space of some candidate sequences produced by a decoder. We generate the candidate sequences by
performing beam search (which is one of the two popular ways of getting these sequences, the
other one being sampling; see "Classical Structured Prediction Losses for Sequence to Sequence
Learning" by Edunov et al., 2017 for more details).
Parameters
----------
beam_size : ``int``
noramlize_by_length : ``bool``
Should the log probabilities be normalized by length before renormalizing them? Edunov et
al. do this in their work.
max_decoding_steps : ``int``
The maximum number of steps we should take during decoding.
max_num_decoded_sequences : ``int``, optional (default=1)
Maximum number of sorted decoded sequences to return. Defaults to 1.
max_num_finished_states : ``int``, optional (default = None)
Maximum number of finished states to keep after search. This is to finished states as
``beam_size`` is to unfinished ones. Costs are computed for only these number of states per
instance. If not set, we will keep all the finished states.
"""
def __init__(
self,
beam_size: int,
normalize_by_length: bool,
max_decoding_steps: int,
max_num_decoded_sequences: int = 1,
max_num_finished_states: int = None,
) -> None:
self._beam_size = beam_size
self._normalize_by_length = normalize_by_length
self._max_decoding_steps = max_decoding_steps
self._max_num_decoded_sequences = max_num_decoded_sequences
self._max_num_finished_states = max_num_finished_states
def decode(
self,
initial_state: State,
transition_function: TransitionFunction,
supervision: Callable[[StateType], torch.Tensor],
) -> Dict[str, torch.Tensor]:
cost_function = supervision
finished_states = self._get_finished_states(initial_state, transition_function)
loss = initial_state.score[0].new_zeros(1)
finished_model_scores = self._get_model_scores_by_batch(finished_states)
finished_costs = self._get_costs_by_batch(finished_states, cost_function)
for batch_index in finished_model_scores:
# Finished model scores are log-probabilities of the predicted sequences. We convert
# log probabilities into probabilities and re-normalize them to compute expected cost under
# the distribution approximated by the beam search.
costs = torch.cat([tensor.view(-1) for tensor in finished_costs[batch_index]])
logprobs = torch.cat([tensor.view(-1) for tensor in finished_model_scores[batch_index]])
# Unmasked softmax of log probabilities will convert them into probabilities and
# renormalize them.
renormalized_probs = nn_util.masked_softmax(logprobs, None)
loss += renormalized_probs.dot(costs)
mean_loss = loss / len(finished_model_scores)
return {
"loss": mean_loss,
"best_final_states": self._get_best_final_states(finished_states),
}
def _get_finished_states(
self, initial_state: State, transition_function: TransitionFunction
) -> List[StateType]:
finished_states = []
states = [initial_state]
num_steps = 0
while states and num_steps < self._max_decoding_steps:
next_states = []
grouped_state = states[0].combine_states(states)
# These states already come sorted.
for next_state in transition_function.take_step(grouped_state):
if next_state.is_finished():
finished_states.append(next_state)
else:
next_states.append(next_state)
states = self._prune_beam(
states=next_states, beam_size=self._beam_size, sort_states=False
)
num_steps += 1
if self._max_num_finished_states is not None:
finished_states = self._prune_beam(
states=finished_states, beam_size=self._max_num_finished_states, sort_states=True
)
return finished_states
# TODO(pradeep): Move this method to state_machines.util
@staticmethod
def _prune_beam(states: List[State], beam_size: int, sort_states: bool = False) -> List[State]:
"""
This method can be used to prune the set of unfinished states on a beam or finished states
at the end of search. In the former case, the states need not be sorted because the all come
from the same decoding step, which does the sorting. However, if the states are finished and
this method is called at the end of the search, they need to be sorted because they come
from different decoding steps.
"""
states_by_batch_index: Dict[int, List[State]] = defaultdict(list)
for state in states:
assert len(state.batch_indices) == 1
batch_index = state.batch_indices[0]
states_by_batch_index[batch_index].append(state)
pruned_states = []
for _, instance_states in states_by_batch_index.items():
if sort_states:
scores = torch.cat([state.score[0].view(-1) for state in instance_states])
_, sorted_indices = scores.sort(-1, descending=True)
sorted_states = [instance_states[i] for i in sorted_indices.detach().cpu().numpy()]
instance_states = sorted_states
for state in instance_states[:beam_size]:
pruned_states.append(state)
return pruned_states
def _get_model_scores_by_batch(self, states: List[StateType]) -> Dict[int, List[torch.Tensor]]:
batch_scores: Dict[int, List[torch.Tensor]] = defaultdict(list)
for state in states:
for batch_index, model_score, history in zip(
state.batch_indices, state.score, state.action_history
):
if self._normalize_by_length:
path_length = model_score.new_tensor([len(history)])
model_score = model_score / path_length
batch_scores[batch_index].append(model_score)
return batch_scores
@staticmethod
def _get_costs_by_batch(
states: List[StateType], cost_function: Callable[[StateType], torch.Tensor]
) -> Dict[int, List[torch.Tensor]]:
batch_costs: Dict[int, List[torch.Tensor]] = defaultdict(list)
for state in states:
cost = cost_function(state)
# Since this is a finished state, its group size is 1, and we just take the only batch
# index.
batch_index = state.batch_indices[0]
batch_costs[batch_index].append(cost)
return batch_costs
def _get_best_final_states(
self, finished_states: List[StateType]
) -> Dict[int, List[StateType]]:
"""
Returns the best finished states for each batch instance based on model scores. We return
at most ``self._max_num_decoded_sequences`` number of sequences per instance.
"""
batch_states: Dict[int, List[StateType]] = defaultdict(list)
for state in finished_states:
batch_states[state.batch_indices[0]].append(state)
best_states: Dict[int, List[StateType]] = {}
for batch_index, states in batch_states.items():
# The time this sort takes is pretty negligible, no particular need to optimize this
# yet. Maybe with a larger beam size...
finished_to_sort = [(-state.score[0].item(), state) for state in states]
finished_to_sort.sort(key=lambda x: x[0])
best_states[batch_index] = [state[1] for state in finished_to_sort[: self._beam_size]]
return best_states
| allennlp-semparse-master | allennlp_semparse/state_machines/trainers/expected_risk_minimization.py |
from typing import Dict, Generic, TypeVar
import torch
from allennlp_semparse.state_machines.states import State
from allennlp_semparse.state_machines.transition_functions import TransitionFunction
SupervisionType = TypeVar("SupervisionType")
class DecoderTrainer(Generic[SupervisionType]):
"""
``DecoderTrainers`` define a training regime for transition-based decoders. A
``DecoderTrainer`` assumes an initial ``State``, a ``TransitionFunction`` function that can
traverse the state space, and some supervision signal. Given these things, the
``DecoderTrainer`` trains the ``TransitionFunction`` function to traverse the state space to
end up at good end states.
Concrete implementations of this abstract base class could do things like maximum marginal
likelihood, SEARN, LaSO, or other structured learning algorithms. If you're just trying to
maximize the probability of a single target sequence where the possible outputs are the same
for each timestep (as in, e.g., typical machine translation training regimes), there are way
more efficient ways to do that than using this API.
"""
def decode(
self,
initial_state: State,
transition_function: TransitionFunction,
supervision: SupervisionType,
) -> Dict[str, torch.Tensor]:
"""
Takes an initial state object, a means of transitioning from state to state, and a
supervision signal, and uses the supervision to train the transition function to pick
"good" states.
This function should typically return a ``loss`` key during training, which the ``Model``
will use as its loss.
Parameters
----------
initial_state : ``State``
This is the initial state for decoding, typically initialized after running some kind
of encoder on some inputs.
transition_function : ``TransitionFunction``
This is the transition function that scores all possible actions that can be taken in a
given state, and returns a ranked list of next states at each step of decoding.
supervision : ``SupervisionType``
This is the supervision that is used to train the ``transition_function`` function to
pick "good" states. You can use whatever kind of supervision you want (e.g., a single
"gold" action sequence, a set of possible "gold" action sequences, a reward function,
etc.). We use ``typing.Generics`` to make sure that our static type checker is happy
with how you've matched the supervision that you provide in the model to the
``DecoderTrainer`` that you want to use.
"""
raise NotImplementedError
| allennlp-semparse-master | allennlp_semparse/state_machines/trainers/decoder_trainer.py |
from allennlp_semparse.state_machines.trainers.decoder_trainer import DecoderTrainer
from allennlp_semparse.state_machines.trainers.expected_risk_minimization import (
ExpectedRiskMinimization,
)
from allennlp_semparse.state_machines.trainers.maximum_marginal_likelihood import (
MaximumMarginalLikelihood,
)
| allennlp-semparse-master | allennlp_semparse/state_machines/trainers/__init__.py |
import logging
from typing import Dict, List, Tuple
import torch
from allennlp.nn import util
from allennlp_semparse.state_machines.constrained_beam_search import ConstrainedBeamSearch
from allennlp_semparse.state_machines.states import State
from allennlp_semparse.state_machines.trainers.decoder_trainer import DecoderTrainer
from allennlp_semparse.state_machines.transition_functions import TransitionFunction
logger = logging.getLogger(__name__)
class MaximumMarginalLikelihood(DecoderTrainer[Tuple[torch.Tensor, torch.Tensor]]):
"""
This class trains a decoder by maximizing the marginal likelihood of the targets. That is,
during training, we are given a `set` of acceptable or possible target sequences, and we
optimize the `sum` of the probability the model assigns to each item in the set. This allows
the model to distribute its probability mass over the set however it chooses, without forcing
`all` of the given target sequences to have high probability. This is helpful, for example, if
you have good reason to expect that the correct target sequence is in the set, but aren't sure
`which` of the sequences is actually correct.
This implementation of maximum marginal likelihood requires the model you use to be `locally
normalized`; that is, at each decoding timestep, we assume that the model creates a normalized
probability distribution over actions. This assumption is necessary, because we do no explicit
normalization in our loss function, we just sum the probabilities assigned to all correct
target sequences, relying on the local normalization at each time step to push probability mass
from bad actions to good ones.
Parameters
----------
beam_size : ``int``, optional (default=None)
We can optionally run a constrained beam search over the provided targets during decoding.
This narrows the set of transition sequences that are marginalized over in the loss
function, keeping only the top ``beam_size`` sequences according to the model. If this is
``None``, we will keep all of the provided sequences in the loss computation.
"""
def __init__(self, beam_size: int = None) -> None:
self._beam_size = beam_size
def decode(
self,
initial_state: State,
transition_function: TransitionFunction,
supervision: Tuple[torch.Tensor, torch.Tensor],
) -> Dict[str, torch.Tensor]:
targets, target_mask = supervision
beam_search = ConstrainedBeamSearch(self._beam_size, targets, target_mask)
finished_states: Dict[int, List[State]] = beam_search.search(
initial_state, transition_function
)
loss = 0
for instance_states in finished_states.values():
scores = [state.score[0].view(-1) for state in instance_states]
loss += -util.logsumexp(torch.cat(scores))
return {"loss": loss / len(finished_states)}
| allennlp-semparse-master | allennlp_semparse/state_machines/trainers/maximum_marginal_likelihood.py |
from collections import defaultdict
from typing import Any, Dict, List, Tuple
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.modules import Attention, FeedForward
from allennlp.nn import Activation
from allennlp_semparse.state_machines.states import GrammarBasedState
from allennlp_semparse.state_machines.transition_functions.basic_transition_function import (
BasicTransitionFunction,
)
class LinkingTransitionFunction(BasicTransitionFunction):
"""
This transition function adds the ability to consider `linked` actions to the
``BasicTransitionFunction`` (which is just an LSTM decoder with attention). These actions are
potentially unseen at training time, so we need to handle them without requiring the action to
have an embedding. Instead, we rely on a `linking score` between each action and the words in
the question/utterance, and use these scores, along with the attention, to do something similar
to a copy mechanism when producing these actions.
When both linked and global (embedded) actions are available, we need some way to compare the
scores for these two sets of actions. The original WikiTableQuestion semantic parser just
concatenated the logits together before doing a joint softmax, but this is quite brittle,
because the logits might have quite different scales. So we have the option here of predicting
a mixture probability between two independently normalized distributions.
Parameters
----------
encoder_output_dim : ``int``
action_embedding_dim : ``int``
input_attention : ``Attention``
activation : ``Activation``, optional (default=relu)
The activation that gets applied to the decoder LSTM input and to the action query.
add_action_bias : ``bool``, optional (default=True)
If ``True``, there has been a bias dimension added to the embedding of each action, which
gets used when predicting the next action. We add a dimension of ones to our predicted
action vector in this case to account for that.
mixture_feedforward : ``FeedForward`` optional (default=None)
If given, we'll use this to compute a mixture probability between global actions and linked
actions given the hidden state at every timestep of decoding, instead of concatenating the
logits for both (where the logits may not be compatible with each other).
dropout : ``float`` (optional, default=0.0)
num_layers: ``int`` (optional, default=1)
The number of layers in the decoder LSTM.
"""
def __init__(
self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name("relu")(),
add_action_bias: bool = True,
mixture_feedforward: FeedForward = None,
dropout: float = 0.0,
num_layers: int = 1,
) -> None:
super().__init__(
encoder_output_dim=encoder_output_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
activation=activation,
add_action_bias=add_action_bias,
dropout=dropout,
num_layers=num_layers,
)
self._mixture_feedforward = mixture_feedforward
if mixture_feedforward is not None:
check_dimensions_match(
encoder_output_dim,
mixture_feedforward.get_input_dim(),
"hidden state embedding dim",
"mixture feedforward input dim",
)
check_dimensions_match(
mixture_feedforward.get_output_dim(),
1,
"mixture feedforward output dim",
"dimension for scalar value",
)
def _compute_action_probabilities(
self,
state: GrammarBasedState,
hidden_state: torch.Tensor,
attention_weights: torch.Tensor,
predicted_action_embeddings: torch.Tensor,
) -> Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]]:
# In this section we take our predicted action embedding and compare it to the available
# actions in our current state (which might be different for each group element). For
# computing action scores, we'll forget about doing batched / grouped computation, as it
# adds too much complexity and doesn't speed things up, anyway, with the operations we're
# doing here. This means we don't need any action masks, as we'll only get the right
# lengths for what we're computing.
group_size = len(state.batch_indices)
actions = state.get_valid_actions()
batch_results: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]] = defaultdict(list)
for group_index in range(group_size):
instance_actions = actions[group_index]
predicted_action_embedding = predicted_action_embeddings[group_index]
embedded_actions: List[int] = []
output_action_embeddings = None
embedded_action_logits = None
current_log_probs = None
if "global" in instance_actions:
action_embeddings, output_action_embeddings, embedded_actions = instance_actions[
"global"
]
# This is just a matrix product between a (num_actions, embedding_dim) matrix and an
# (embedding_dim, 1) matrix.
embedded_action_logits = action_embeddings.mm(
predicted_action_embedding.unsqueeze(-1)
).squeeze(-1)
action_ids = embedded_actions
if "linked" in instance_actions:
linking_scores, type_embeddings, linked_actions = instance_actions["linked"]
action_ids = embedded_actions + linked_actions
# linking_scores: (num_entities, num_question_tokens)
# linked_action_logits: (num_entities, 1)
linked_action_logits = (
linking_scores.float()
.mm(attention_weights[group_index].unsqueeze(-1))
.squeeze(-1)
)
# The `output_action_embeddings` tensor gets used later as the input to the next
# decoder step. For linked actions, we don't have any action embedding, so we use
# the entity type instead.
if output_action_embeddings is not None:
output_action_embeddings = torch.cat(
[output_action_embeddings, type_embeddings], dim=0
)
else:
output_action_embeddings = type_embeddings
if self._mixture_feedforward is not None:
# The linked and global logits are combined with a mixture weight to prevent the
# linked_action_logits from dominating the embedded_action_logits if a softmax
# was applied on both together.
mixture_weight = self._mixture_feedforward(hidden_state[group_index])
mix1 = torch.log(mixture_weight)
mix2 = torch.log(1 - mixture_weight)
entity_action_probs = (
torch.nn.functional.log_softmax(linked_action_logits, dim=-1) + mix1
)
if embedded_action_logits is not None:
embedded_action_probs = (
torch.nn.functional.log_softmax(embedded_action_logits, dim=-1) + mix2
)
current_log_probs = torch.cat(
[embedded_action_probs, entity_action_probs], dim=-1
)
else:
current_log_probs = entity_action_probs
else:
if embedded_action_logits is not None:
action_logits = torch.cat(
[embedded_action_logits, linked_action_logits], dim=-1
)
else:
action_logits = linked_action_logits
current_log_probs = torch.nn.functional.log_softmax(action_logits, dim=-1)
else:
action_logits = embedded_action_logits
current_log_probs = torch.nn.functional.log_softmax(action_logits, dim=-1)
# This is now the total score for each state after taking each action. We're going to
# sort by this later, so it's important that this is the total score, not just the
# score for the current action.
log_probs = state.score[group_index] + current_log_probs
batch_results[state.batch_indices[group_index]].append(
(group_index, log_probs, current_log_probs, output_action_embeddings, action_ids)
)
return batch_results
| allennlp-semparse-master | allennlp_semparse/state_machines/transition_functions/linking_transition_function.py |
"""
This module contains ``TransitionFunctions`` for state-machine-based decoders. The
``TransitionFunction`` parameterizes transitions between ``States``. These ``TransitionFunctions``
are all pytorch `Modules`` that have trainable parameters. The :class:`BasicTransitionFunction` is
simply an LSTM decoder with attention over an input utterance, and the other classes typically
subclass this and add functionality to it.
"""
from allennlp_semparse.state_machines.transition_functions.basic_transition_function import (
BasicTransitionFunction,
)
from allennlp_semparse.state_machines.transition_functions.coverage_transition_function import (
CoverageTransitionFunction,
)
from allennlp_semparse.state_machines.transition_functions.linking_coverage_transition_function import (
LinkingCoverageTransitionFunction,
)
from allennlp_semparse.state_machines.transition_functions.linking_transition_function import (
LinkingTransitionFunction,
)
from allennlp_semparse.state_machines.transition_functions.transition_function import (
TransitionFunction,
)
| allennlp-semparse-master | allennlp_semparse/state_machines/transition_functions/__init__.py |
from typing import Generic, List, Set, TypeVar
import torch
from allennlp_semparse.state_machines.states import State
StateType = TypeVar("StateType", bound=State)
class TransitionFunction(torch.nn.Module, Generic[StateType]):
"""
A ``TransitionFunction`` is a module that assigns scores to state transitions in a
transition-based decoder.
The ``TransitionFunction`` takes a ``State`` and outputs a ranked list of next states, ordered
by the state's score.
The intention with this class is that a model will implement a subclass of
``TransitionFunction`` that defines how exactly you want to handle the input and what
computations get done at each step of decoding, and how states are scored. This subclass then
gets passed to a ``DecoderTrainer`` to have its parameters trained.
"""
def forward(self, *inputs):
raise RuntimeError("call .take_step() instead of .forward()")
def take_step(
self, state: StateType, max_actions: int = None, allowed_actions: List[Set] = None
) -> List[StateType]:
"""
The main method in the ``TransitionFunction`` API. This function defines the computation
done at each step of decoding and returns a ranked list of next states.
The input state is `grouped`, to allow for efficient computation, but the output states
should all have a ``group_size`` of 1, to make things easier on the decoding algorithm.
They will get regrouped later as needed.
Because of the way we handle grouping in the decoder states, constructing a new state is
actually a relatively expensive operation. If you know a priori that only some of the
states will be needed (either because you have a set of gold action sequences, or you have
a fixed beam size), passing that information into this function will keep us from
constructing more states than we need, which will greatly speed up your computation.
IMPORTANT: This method `must` returns states already sorted by their score, otherwise
``BeamSearch`` and other methods will break. For efficiency, we do not perform an
additional sort in those methods.
ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you
want to evaluate all possible states and do not need any sorting (e.g., this is true for
maximum marginal likelihood training that does not use a beam search). In this case, we
may skip the sorting step for efficiency reasons.
Parameters
----------
state : ``State``
The current state of the decoder, which we will take a step `from`. We may be grouping
together computation for several states here. Because we can have several states for
each instance in the original batch being evaluated at the same time, we use
``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch
in ``model.forward.``
max_actions : ``int``, optional
If you know that you will only need a certain number of states out of this (e.g., in a
beam search), you can pass in the max number of actions that you need, and we will only
construct that many states (for each `batch` instance - `not` for each `group`
instance!). This can save a whole lot of computation if you have an action space
that's much larger than your beam size.
allowed_actions : ``List[Set]``, optional
If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g.,
maximum marginal likelihood only needs to evaluate action sequences in a given set),
you can pass those constraints here, to avoid constructing state objects unnecessarily.
If there are no constraints from the trainer, passing a value of ``None`` here will
allow all actions to be considered.
This is a list because it is `batched` - every instance in the batch has a set of
allowed actions. Note that the size of this list is the ``group_size`` in the
``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs
to convert from the `batched` allowed action sequences that it has to a `grouped`
allowed action sequence list.
Returns
-------
next_states : ``List[State]``
A list of next states, ordered by score.
"""
raise NotImplementedError
| allennlp-semparse-master | allennlp_semparse/state_machines/transition_functions/transition_function.py |
from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple
import torch
from torch.nn.modules.rnn import LSTM, LSTMCell
from torch.nn.modules.linear import Linear
from allennlp.modules import Attention
from allennlp.nn import util, Activation
from allennlp_semparse.state_machines.states import RnnStatelet, GrammarBasedState
from allennlp_semparse.state_machines.transition_functions.transition_function import (
TransitionFunction,
)
class BasicTransitionFunction(TransitionFunction[GrammarBasedState]):
"""
This is a typical transition function for a state-based decoder. We use an LSTM to track
decoder state, and at every timestep we compute an attention over the input question/utterance
to help in selecting the action. All actions have an embedding, and we use a dot product
between a predicted action embedding and the allowed actions to compute a distribution over
actions at each timestep.
We allow the first action to be predicted separately from everything else. This is optional,
and is because that's how the original WikiTableQuestions semantic parser was written. The
intuition is that maybe you want to predict the type of your output program outside of the
typical LSTM decoder (or maybe Jayant just didn't realize this could be treated as another
action...).
Parameters
----------
encoder_output_dim : ``int``
action_embedding_dim : ``int``
input_attention : ``Attention``
activation : ``Activation``, optional (default=relu)
The activation that gets applied to the decoder LSTM input and to the action query.
add_action_bias : ``bool``, optional (default=True)
If ``True``, there has been a bias dimension added to the embedding of each action, which
gets used when predicting the next action. We add a dimension of ones to our predicted
action vector in this case to account for that.
dropout : ``float`` (optional, default=0.0)
num_layers: ``int``, (optional, default=1)
The number of layers in the decoder LSTM.
"""
def __init__(
self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name("relu")(),
add_action_bias: bool = True,
dropout: float = 0.0,
num_layers: int = 1,
) -> None:
super().__init__()
self._input_attention = input_attention
self._add_action_bias = add_action_bias
self._activation = activation
self._num_layers = num_layers
# Decoder output dim needs to be the same as the encoder output dim since we initialize the
# hidden state of the decoder with the final hidden state of the encoder.
output_dim = encoder_output_dim
input_dim = output_dim
# Our decoder input will be the concatenation of the attended encoder hidden state (i.e.,
# the attended question encoding) and the previous action embedding, and we'll project that
# down to the decoder's `input_dim`, which we arbitrarily set to be the same as
# `output_dim`.
self._input_projection_layer = Linear(encoder_output_dim + action_embedding_dim, input_dim)
# Before making a prediction, we'll compute an attention over the input given our updated
# hidden state. Then we concatenate those with the decoder state and project to
# `action_embedding_dim` to make a prediction.
self._output_projection_layer = Linear(
output_dim + encoder_output_dim, action_embedding_dim
)
if self._num_layers > 1:
self._decoder_cell = LSTM(input_dim, output_dim, self._num_layers)
else:
# We use a ``LSTMCell`` if we just have one layer because it is slightly faster since we are
# just running the LSTM for one step each time.
self._decoder_cell = LSTMCell(input_dim, output_dim)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
def take_step(
self,
state: GrammarBasedState,
max_actions: int = None,
allowed_actions: List[Set[int]] = None,
) -> List[GrammarBasedState]:
# Taking a step in the decoder consists of three main parts. First, we'll construct the
# input to the decoder and update the decoder's hidden state. Second, we'll use this new
# hidden state (and maybe other information) to predict an action. Finally, we will
# construct new states for the next step. Each new state corresponds to one valid action
# that can be taken from the current state, and they are ordered by their probability of
# being selected.
updated_state = self._update_decoder_state(state)
batch_results = self._compute_action_probabilities(
state,
updated_state["hidden_state"],
updated_state["attention_weights"],
updated_state["predicted_action_embeddings"],
)
new_states = self._construct_next_states(
state, updated_state, batch_results, max_actions, allowed_actions
)
return new_states
def _update_decoder_state(self, state: GrammarBasedState) -> Dict[str, torch.Tensor]:
# For updating the decoder, we're doing a bunch of tensor operations that can be batched
# without much difficulty. So, we take all group elements and batch their tensors together
# before doing these decoder operations.
group_size = len(state.batch_indices)
attended_question = torch.stack([rnn_state.attended_input for rnn_state in state.rnn_state])
if self._num_layers > 1:
hidden_state = torch.stack([rnn_state.hidden_state for rnn_state in state.rnn_state], 1)
memory_cell = torch.stack([rnn_state.memory_cell for rnn_state in state.rnn_state], 1)
else:
hidden_state = torch.stack([rnn_state.hidden_state for rnn_state in state.rnn_state])
memory_cell = torch.stack([rnn_state.memory_cell for rnn_state in state.rnn_state])
previous_action_embedding = torch.stack(
[rnn_state.previous_action_embedding for rnn_state in state.rnn_state]
)
# (group_size, decoder_input_dim)
projected_input = self._input_projection_layer(
torch.cat([attended_question, previous_action_embedding], -1)
)
decoder_input = self._activation(projected_input)
if self._num_layers > 1:
_, (hidden_state, memory_cell) = self._decoder_cell(
decoder_input.unsqueeze(0), (hidden_state, memory_cell)
)
else:
hidden_state, memory_cell = self._decoder_cell(
decoder_input, (hidden_state, memory_cell)
)
hidden_state = self._dropout(hidden_state)
# (group_size, encoder_output_dim)
encoder_outputs = torch.stack(
[state.rnn_state[0].encoder_outputs[i] for i in state.batch_indices]
)
encoder_output_mask = torch.stack(
[state.rnn_state[0].encoder_output_mask[i] for i in state.batch_indices]
)
if self._num_layers > 1:
attended_question, attention_weights = self.attend_on_question(
hidden_state[-1], encoder_outputs, encoder_output_mask
)
action_query = torch.cat([hidden_state[-1], attended_question], dim=-1)
else:
attended_question, attention_weights = self.attend_on_question(
hidden_state, encoder_outputs, encoder_output_mask
)
action_query = torch.cat([hidden_state, attended_question], dim=-1)
# (group_size, action_embedding_dim)
projected_query = self._activation(self._output_projection_layer(action_query))
predicted_action_embeddings = self._dropout(projected_query)
if self._add_action_bias:
# NOTE: It's important that this happens right before the dot product with the action
# embeddings. Otherwise this isn't a proper bias. We do it here instead of right next
# to the `.mm` below just so we only do it once for the whole group.
ones = predicted_action_embeddings.new([[1] for _ in range(group_size)])
predicted_action_embeddings = torch.cat([predicted_action_embeddings, ones], dim=-1)
return {
"hidden_state": hidden_state,
"memory_cell": memory_cell,
"attended_question": attended_question,
"attention_weights": attention_weights,
"predicted_action_embeddings": predicted_action_embeddings,
}
def _compute_action_probabilities(
self,
state: GrammarBasedState,
hidden_state: torch.Tensor,
attention_weights: torch.Tensor,
predicted_action_embeddings: torch.Tensor,
) -> Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]]:
# We take a couple of extra arguments here because subclasses might use them.
# In this section we take our predicted action embedding and compare it to the available
# actions in our current state (which might be different for each group element). For
# computing action scores, we'll forget about doing batched / grouped computation, as it
# adds too much complexity and doesn't speed things up, anyway, with the operations we're
# doing here. This means we don't need any action masks, as we'll only get the right
# lengths for what we're computing.
group_size = len(state.batch_indices)
actions = state.get_valid_actions()
batch_results: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]] = defaultdict(list)
for group_index in range(group_size):
instance_actions = actions[group_index]
predicted_action_embedding = predicted_action_embeddings[group_index]
action_embeddings, output_action_embeddings, action_ids = instance_actions["global"]
# This is just a matrix product between a (num_actions, embedding_dim) matrix and an
# (embedding_dim, 1) matrix.
action_logits = action_embeddings.mm(predicted_action_embedding.unsqueeze(-1)).squeeze(
-1
)
current_log_probs = torch.nn.functional.log_softmax(action_logits, dim=-1)
# This is now the total score for each state after taking each action. We're going to
# sort by this later, so it's important that this is the total score, not just the
# score for the current action.
log_probs = state.score[group_index] + current_log_probs
batch_results[state.batch_indices[group_index]].append(
(group_index, log_probs, current_log_probs, output_action_embeddings, action_ids)
)
return batch_results
def _construct_next_states(
self,
state: GrammarBasedState,
updated_rnn_state: Dict[str, torch.Tensor],
batch_action_probs: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]],
max_actions: int,
allowed_actions: List[Set[int]],
):
# We'll yield a bunch of states here that all have a `group_size` of 1, so that the
# learning algorithm can decide how many of these it wants to keep, and it can just regroup
# them later, as that's a really easy operation.
#
# We first define a `make_state` method, as in the logic that follows we want to create
# states in a couple of different branches, and we don't want to duplicate the
# state-creation logic. This method creates a closure using variables from the method, so
# it doesn't make sense to pull it out of here.
# Each group index here might get accessed multiple times, and doing the slicing operation
# each time is more expensive than doing it once upfront. These three lines give about a
# 10% speedup in training time.
group_size = len(state.batch_indices)
chunk_index = 1 if self._num_layers > 1 else 0
hidden_state = [
x.squeeze(chunk_index)
for x in updated_rnn_state["hidden_state"].chunk(group_size, chunk_index)
]
memory_cell = [
x.squeeze(chunk_index)
for x in updated_rnn_state["memory_cell"].chunk(group_size, chunk_index)
]
attended_question = [
x.squeeze(0) for x in updated_rnn_state["attended_question"].chunk(group_size, 0)
]
def make_state(
group_index: int, action: int, new_score: torch.Tensor, action_embedding: torch.Tensor
) -> GrammarBasedState:
new_rnn_state = RnnStatelet(
hidden_state[group_index],
memory_cell[group_index],
action_embedding,
attended_question[group_index],
state.rnn_state[group_index].encoder_outputs,
state.rnn_state[group_index].encoder_output_mask,
)
batch_index = state.batch_indices[group_index]
for i, _, current_log_probs, _, actions in batch_action_probs[batch_index]:
if i == group_index:
considered_actions = actions
probabilities = current_log_probs.exp().cpu()
break
return state.new_state_from_group_index(
group_index,
action,
new_score,
new_rnn_state,
considered_actions,
probabilities,
updated_rnn_state["attention_weights"],
)
new_states = []
for _, results in batch_action_probs.items():
if allowed_actions and not max_actions:
# If we're given a set of allowed actions, and we're not just keeping the top k of
# them, we don't need to do any sorting, so we can speed things up quite a bit.
for group_index, log_probs, _, action_embeddings, actions in results:
for log_prob, action_embedding, action in zip(
log_probs, action_embeddings, actions
):
if action in allowed_actions[group_index]:
new_states.append(
make_state(group_index, action, log_prob, action_embedding)
)
else:
# In this case, we need to sort the actions. We'll do that on CPU, as it's easier,
# and our action list is on the CPU, anyway.
group_indices = []
group_log_probs: List[torch.Tensor] = []
group_action_embeddings = []
group_actions = []
for group_index, log_probs, _, action_embeddings, actions in results:
group_indices.extend([group_index] * len(actions))
group_log_probs.append(log_probs)
group_action_embeddings.append(action_embeddings)
group_actions.extend(actions)
log_probs = torch.cat(group_log_probs, dim=0)
action_embeddings = torch.cat(group_action_embeddings, dim=0)
log_probs_cpu = log_probs.data.cpu().numpy().tolist()
batch_states = [
(
log_probs_cpu[i],
group_indices[i],
log_probs[i],
action_embeddings[i],
group_actions[i],
)
for i in range(len(group_actions))
if (
not allowed_actions or group_actions[i] in allowed_actions[group_indices[i]]
)
]
# We use a key here to make sure we're not trying to compare anything on the GPU.
batch_states.sort(key=lambda x: x[0], reverse=True)
if max_actions:
batch_states = batch_states[:max_actions]
for _, group_index, log_prob, action_embedding, action in batch_states:
new_states.append(make_state(group_index, action, log_prob, action_embedding))
return new_states
def attend_on_question(
self, query: torch.Tensor, encoder_outputs: torch.Tensor, encoder_output_mask: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a query (which is typically the decoder hidden state), compute an attention over the
output of the question encoder, and return a weighted sum of the question representations
given this attention. We also return the attention weights themselves.
This is a simple computation, but we have it as a separate method so that the ``forward``
method on the main parser module can call it on the initial hidden state, to simplify the
logic in ``take_step``.
"""
# (group_size, question_length)
question_attention_weights = self._input_attention(
query, encoder_outputs, encoder_output_mask
)
# (group_size, encoder_output_dim)
attended_question = util.weighted_sum(encoder_outputs, question_attention_weights)
return attended_question, question_attention_weights
| allennlp-semparse-master | allennlp_semparse/state_machines/transition_functions/basic_transition_function.py |
from collections import defaultdict
from typing import Any, Dict, List, Tuple
import torch
from torch.nn import Parameter
from allennlp.common.checks import check_dimensions_match
from allennlp.modules import Attention, FeedForward
from allennlp.nn import Activation
from allennlp_semparse.state_machines.states import CoverageState, ChecklistStatelet
from allennlp_semparse.state_machines.transition_functions.coverage_transition_function import (
CoverageTransitionFunction,
)
class LinkingCoverageTransitionFunction(CoverageTransitionFunction):
"""
Combines both linking and coverage on top of the ``BasicTransitionFunction`` (which is just an
LSTM decoder with attention). This adds the ability to consider `linked` actions in addition
to global (embedded) actions, and it adds a coverage penalty over the `output action sequence`,
combining the :class:`LinkingTransitionFunction` with the :class:`CoverageTransitionFunction`.
The one thing that's unique to this class is how the coverage penalty interacts with linked
actions. Instead of boosting the action's embedding, as we do in the
``CoverageTransitionFunction``, we boost the action's logit directly (as there is no action
embedding for linked actions).
Parameters
----------
encoder_output_dim : ``int``
action_embedding_dim : ``int``
input_attention : ``Attention``
activation : ``Activation``, optional (default=relu)
The activation that gets applied to the decoder LSTM input and to the action query.
add_action_bias : ``bool``, optional (default=True)
If ``True``, there has been a bias dimension added to the embedding of each action, which
gets used when predicting the next action. We add a dimension of ones to our predicted
action vector in this case to account for that.
dropout : ``float`` (optional, default=0.0)
"""
def __init__(
self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name("relu")(),
add_action_bias: bool = True,
mixture_feedforward: FeedForward = None,
dropout: float = 0.0,
) -> None:
super().__init__(
encoder_output_dim=encoder_output_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
activation=activation,
add_action_bias=add_action_bias,
dropout=dropout,
)
self._linked_checklist_multiplier = Parameter(torch.FloatTensor([1.0]))
self._mixture_feedforward = mixture_feedforward
if mixture_feedforward is not None:
check_dimensions_match(
encoder_output_dim,
mixture_feedforward.get_input_dim(),
"hidden state embedding dim",
"mixture feedforward input dim",
)
check_dimensions_match(
mixture_feedforward.get_output_dim(),
1,
"mixture feedforward output dim",
"dimension for scalar value",
)
def _compute_action_probabilities( # type: ignore
self,
state: CoverageState,
hidden_state: torch.Tensor,
attention_weights: torch.Tensor,
predicted_action_embeddings: torch.Tensor,
) -> Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]]:
# In this section we take our predicted action embedding and compare it to the available
# actions in our current state (which might be different for each group element). For
# computing action scores, we'll forget about doing batched / grouped computation, as it
# adds too much complexity and doesn't speed things up, anyway, with the operations we're
# doing here. This means we don't need any action masks, as we'll only get the right
# lengths for what we're computing.
group_size = len(state.batch_indices)
actions = state.get_valid_actions()
batch_results: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]] = defaultdict(list)
for group_index in range(group_size):
instance_actions = actions[group_index]
predicted_action_embedding = predicted_action_embeddings[group_index]
action_ids: List[int] = []
if "global" in instance_actions:
action_embeddings, output_action_embeddings, embedded_actions = instance_actions[
"global"
]
# This embedding addition the only difference between the logic here and the
# corresponding logic in the super class.
embedding_addition = self._get_predicted_embedding_addition(
state.checklist_state[group_index], embedded_actions, action_embeddings
)
addition = embedding_addition * self._checklist_multiplier
predicted_action_embedding = predicted_action_embedding + addition
# This is just a matrix product between a (num_actions, embedding_dim) matrix and an
# (embedding_dim, 1) matrix.
embedded_action_logits = action_embeddings.mm(
predicted_action_embedding.unsqueeze(-1)
).squeeze(-1)
action_ids += embedded_actions
else:
embedded_action_logits = None
output_action_embeddings = None
if "linked" in instance_actions:
linking_scores, type_embeddings, linked_actions = instance_actions["linked"]
action_ids += linked_actions
# (num_question_tokens, 1)
linked_action_logits = linking_scores.mm(
attention_weights[group_index].unsqueeze(-1)
).squeeze(-1)
linked_logits_addition = self._get_linked_logits_addition(
state.checklist_state[group_index], linked_actions, linked_action_logits
)
addition = linked_logits_addition * self._linked_checklist_multiplier
linked_action_logits = linked_action_logits + addition
# The `output_action_embeddings` tensor gets used later as the input to the next
# decoder step. For linked actions, we don't have any action embedding, so we use
# the entity type instead.
if output_action_embeddings is None:
output_action_embeddings = type_embeddings
else:
output_action_embeddings = torch.cat(
[output_action_embeddings, type_embeddings], dim=0
)
if self._mixture_feedforward is not None:
# The linked and global logits are combined with a mixture weight to prevent the
# linked_action_logits from dominating the embedded_action_logits if a softmax
# was applied on both together.
mixture_weight = self._mixture_feedforward(hidden_state[group_index])
mix1 = torch.log(mixture_weight)
mix2 = torch.log(1 - mixture_weight)
entity_action_probs = (
torch.nn.functional.log_softmax(linked_action_logits, dim=-1) + mix1
)
if embedded_action_logits is None:
current_log_probs = entity_action_probs
else:
embedded_action_probs = (
torch.nn.functional.log_softmax(embedded_action_logits, dim=-1) + mix2
)
current_log_probs = torch.cat(
[embedded_action_probs, entity_action_probs], dim=-1
)
else:
if embedded_action_logits is None:
current_log_probs = torch.nn.functional.log_softmax(
linked_action_logits, dim=-1
)
else:
action_logits = torch.cat(
[embedded_action_logits, linked_action_logits], dim=-1
)
current_log_probs = torch.nn.functional.log_softmax(action_logits, dim=-1)
else:
current_log_probs = torch.nn.functional.log_softmax(embedded_action_logits, dim=-1)
# This is now the total score for each state after taking each action. We're going to
# sort by this later, so it's important that this is the total score, not just the
# score for the current action.
log_probs = state.score[group_index] + current_log_probs
batch_results[state.batch_indices[group_index]].append(
(group_index, log_probs, current_log_probs, output_action_embeddings, action_ids)
)
return batch_results
@staticmethod
def _get_linked_logits_addition(
checklist_state: ChecklistStatelet, action_ids: List[int], action_logits: torch.Tensor
) -> torch.Tensor:
"""
Gets the logits of desired terminal actions yet to be produced by the decoder, and
returns them for the decoder to add to the prior action logits, biasing the model towards
predicting missing linked actions.
"""
# Our basic approach here will be to figure out which actions we want to bias, by doing
# some fancy indexing work, then multiply the action embeddings by a mask for those
# actions, and return the sum of the result.
# Shape: (num_terminal_actions, 1). This is 1 if we still want to predict something on the
# checklist, and 0 otherwise.
checklist_balance = checklist_state.get_balance().clamp(min=0)
# (num_terminal_actions, 1)
actions_in_agenda = checklist_state.terminal_actions
# (1, num_current_actions)
action_id_tensor = checklist_balance.new(action_ids).long().unsqueeze(0)
# Shape: (num_terminal_actions, num_current_actions). Will have a value of 1 if the
# terminal action i is our current action j, and a value of 0 otherwise. Because both sets
# of actions are free of duplicates, there will be at most one non-zero value per current
# action, and per terminal action.
current_agenda_actions = (actions_in_agenda == action_id_tensor).float()
# Shape: (num_current_actions,). With the inner multiplication, we remove any current
# agenda actions that are not in our checklist balance, then we sum over the terminal
# action dimension, which will have a sum of at most one. So this will be a 0/1 tensor,
# where a 1 means to encourage the current action in that position.
actions_to_encourage = torch.sum(current_agenda_actions * checklist_balance, dim=0)
# Shape: (num_current_actions,). This is the sum of the action embeddings that we want
# the model to prefer.
logit_addition = action_logits * actions_to_encourage
return logit_addition
| allennlp-semparse-master | allennlp_semparse/state_machines/transition_functions/linking_coverage_transition_function.py |
from collections import defaultdict
from typing import Any, Dict, List, Tuple
import torch
from torch.nn import Parameter
from allennlp.modules import Attention
from allennlp.nn import Activation
from allennlp_semparse.state_machines.states import CoverageState, ChecklistStatelet
from allennlp_semparse.state_machines.transition_functions.basic_transition_function import (
BasicTransitionFunction,
)
class CoverageTransitionFunction(BasicTransitionFunction):
"""
Adds a coverage penalty to the ``BasicTransitionFunction`` (which is just an LSTM decoder with
attention). This coverage penalty is on the `output action sequence`, and requires an
externally-computed `agenda` of actions that are expected to be produced during decoding, and
encourages the model to select actions on that agenda.
The way that we encourage the model to select actions on the agenda is that we add the
embeddings for actions on the agenda (that are available at this decoding step and haven't yet
been taken) to the predicted action embedding. We weight that addition by a learned multiplier
that gets initialized to 1.
Parameters
----------
encoder_output_dim : ``int``
action_embedding_dim : ``int``
input_attention : ``Attention``
activation : ``Activation``, optional (default=relu)
The activation that gets applied to the decoder LSTM input and to the action query.
add_action_bias : ``bool``, optional (default=True)
If ``True``, there has been a bias dimension added to the embedding of each action, which
gets used when predicting the next action. We add a dimension of ones to our predicted
action vector in this case to account for that.
dropout : ``float`` (optional, default=0.0)
"""
def __init__(
self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name("relu")(),
add_action_bias: bool = True,
dropout: float = 0.0,
) -> None:
super().__init__(
encoder_output_dim=encoder_output_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
activation=activation,
add_action_bias=add_action_bias,
dropout=dropout,
)
# See the class docstring for a description of what this does.
self._checklist_multiplier = Parameter(torch.FloatTensor([1.0]))
def _compute_action_probabilities( # type: ignore
self,
state: CoverageState,
hidden_state: torch.Tensor,
attention_weights: torch.Tensor,
predicted_action_embeddings: torch.Tensor,
) -> Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]]:
# In this section we take our predicted action embedding and compare it to the available
# actions in our current state (which might be different for each group element). For
# computing action scores, we'll forget about doing batched / grouped computation, as it
# adds too much complexity and doesn't speed things up, anyway, with the operations we're
# doing here. This means we don't need any action masks, as we'll only get the right
# lengths for what we're computing.
group_size = len(state.batch_indices)
actions = state.get_valid_actions()
batch_results: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]] = defaultdict(list)
for group_index in range(group_size):
instance_actions = actions[group_index]
predicted_action_embedding = predicted_action_embeddings[group_index]
action_embeddings, output_action_embeddings, action_ids = instance_actions["global"]
# This embedding addition the only difference between the logic here and the
# corresponding logic in the super class.
embedding_addition = self._get_predicted_embedding_addition(
state.checklist_state[group_index], action_ids, action_embeddings
)
addition = embedding_addition * self._checklist_multiplier
predicted_action_embedding = predicted_action_embedding + addition
# This is just a matrix product between a (num_actions, embedding_dim) matrix and an
# (embedding_dim, 1) matrix.
action_logits = action_embeddings.mm(predicted_action_embedding.unsqueeze(-1)).squeeze(
-1
)
current_log_probs = torch.nn.functional.log_softmax(action_logits, dim=-1)
# This is now the total score for each state after taking each action. We're going to
# sort by this later, so it's important that this is the total score, not just the
# score for the current action.
log_probs = state.score[group_index] + current_log_probs
batch_results[state.batch_indices[group_index]].append(
(group_index, log_probs, current_log_probs, output_action_embeddings, action_ids)
)
return batch_results
def _get_predicted_embedding_addition(
self,
checklist_state: ChecklistStatelet,
action_ids: List[int],
action_embeddings: torch.Tensor,
) -> torch.Tensor:
"""
Gets the embeddings of desired terminal actions yet to be produced by the decoder, and
returns their sum for the decoder to add it to the predicted embedding to bias the
prediction towards missing actions.
"""
# Our basic approach here will be to figure out which actions we want to bias, by doing
# some fancy indexing work, then multiply the action embeddings by a mask for those
# actions, and return the sum of the result.
# Shape: (num_terminal_actions, 1). This is 1 if we still want to predict something on the
# checklist, and 0 otherwise.
checklist_balance = checklist_state.get_balance().clamp(min=0)
# (num_terminal_actions, 1)
actions_in_agenda = checklist_state.terminal_actions
# (1, num_current_actions)
action_id_tensor = checklist_balance.new(action_ids).long().unsqueeze(0)
# Shape: (num_terminal_actions, num_current_actions). Will have a value of 1 if the
# terminal action i is our current action j, and a value of 0 otherwise. Because both sets
# of actions are free of duplicates, there will be at most one non-zero value per current
# action, and per terminal action.
current_agenda_actions = (actions_in_agenda == action_id_tensor).float()
# Shape: (num_current_actions,). With the inner multiplication, we remove any current
# agenda actions that are not in our checklist balance, then we sum over the terminal
# action dimension, which will have a sum of at most one. So this will be a 0/1 tensor,
# where a 1 means to encourage the current action in that position.
actions_to_encourage = torch.sum(current_agenda_actions * checklist_balance, dim=0)
# Shape: (action_embedding_dim,). This is the sum of the action embeddings that we want
# the model to prefer.
embedding_addition = torch.sum(
action_embeddings * actions_to_encourage.unsqueeze(1), dim=0, keepdim=False
)
if self._add_action_bias:
# If we're adding an action bias, the last dimension of the action embedding is a bias
# weight. We don't want this addition to affect the bias (TODO(mattg): or do we?), so
# we zero out that dimension here.
embedding_addition[-1] = 0
return embedding_addition
| allennlp-semparse-master | allennlp_semparse/state_machines/transition_functions/coverage_transition_function.py |
from typing import Any, List, Sequence
import torch
from allennlp.nn import util
from allennlp_semparse.fields.production_rule_field import ProductionRule
from allennlp_semparse.state_machines.states.checklist_statelet import ChecklistStatelet
from allennlp_semparse.state_machines.states.grammar_based_state import GrammarBasedState
from allennlp_semparse.state_machines.states.grammar_statelet import GrammarStatelet
from allennlp_semparse.state_machines.states.rnn_statelet import RnnStatelet
class CoverageState(GrammarBasedState):
"""
This ``State`` adds one field to a ``GrammarBasedState``: a ``ChecklistStatelet``
that is used to specify a set of actions that should be taken during decoder, and keep track of
which of those actions have already been selected.
We only provide documentation for the ``ChecklistStatelet`` here; for the rest, see
:class:`GrammarBasedState`.
Parameters
----------
batch_indices : ``List[int]``
action_history : ``List[List[int]]``
score : ``List[torch.Tensor]``
rnn_state : ``List[RnnStatelet]``
grammar_state : ``List[GrammarStatelet]``
checklist_state : ``List[ChecklistStatelet]``
This holds the current checklist state for each element of the group. The
``ChecklistStatelet`` keeps track of which actions are preferred by some agenda, and which
of those have already been selected during decoding.
possible_actions : ``List[List[ProductionRule]]``
extras : ``List[Any]``, optional (default=None)
debug_info : ``List[Any]``, optional (default=None).
"""
def __init__(
self,
batch_indices: List[int],
action_history: List[List[int]],
score: List[torch.Tensor],
rnn_state: List[RnnStatelet],
grammar_state: List[GrammarStatelet],
checklist_state: List[ChecklistStatelet],
possible_actions: List[List[ProductionRule]],
extras: List[Any] = None,
debug_info: List = None,
) -> None:
super().__init__(
batch_indices=batch_indices,
action_history=action_history,
score=score,
rnn_state=rnn_state,
grammar_state=grammar_state,
possible_actions=possible_actions,
extras=extras,
debug_info=debug_info,
)
self.checklist_state = checklist_state
def new_state_from_group_index(
self,
group_index: int,
action: int,
new_score: torch.Tensor,
new_rnn_state: RnnStatelet,
considered_actions: List[int] = None,
action_probabilities: List[float] = None,
attention_weights: torch.Tensor = None,
) -> "CoverageState":
super_class_state = super().new_state_from_group_index(
group_index=group_index,
action=action,
new_score=new_score,
new_rnn_state=new_rnn_state,
considered_actions=considered_actions,
action_probabilities=action_probabilities,
attention_weights=attention_weights,
)
new_checklist = self.checklist_state[group_index].update(action)
return CoverageState(
batch_indices=super_class_state.batch_indices,
action_history=super_class_state.action_history,
score=super_class_state.score,
rnn_state=super_class_state.rnn_state,
grammar_state=super_class_state.grammar_state,
checklist_state=[new_checklist],
possible_actions=super_class_state.possible_actions,
extras=super_class_state.extras,
debug_info=super_class_state.debug_info,
)
@classmethod
def combine_states(cls, states: Sequence["CoverageState"]) -> "CoverageState": # type: ignore
super_class_state = super().combine_states(states)
checklist_states = [
checklist_state for state in states for checklist_state in state.checklist_state
]
return CoverageState(
batch_indices=super_class_state.batch_indices,
action_history=super_class_state.action_history,
score=super_class_state.score,
rnn_state=super_class_state.rnn_state,
grammar_state=super_class_state.grammar_state,
checklist_state=checklist_states,
possible_actions=super_class_state.possible_actions,
extras=super_class_state.extras,
debug_info=super_class_state.debug_info,
)
def __eq__(self, other):
if isinstance(self, other.__class__):
return all(
[
self.batch_indices == other.batch_indices,
self.action_history == other.action_history,
util.tensors_equal(self.score, other.score, tolerance=1e-3),
util.tensors_equal(self.rnn_state, other.rnn_state, tolerance=1e-4),
self.grammar_state == other.grammar_state,
self.checklist_state == other.checklist_state,
self.possible_actions == other.possible_actions,
self.extras == other.extras,
util.tensors_equal(self.debug_info, other.debug_info, tolerance=1e-6),
]
)
return NotImplemented
| allennlp-semparse-master | allennlp_semparse/state_machines/states/coverage_state.py |
from typing import Callable, Dict, Generic, List, TypeVar
from allennlp.nn import util
ActionRepresentation = TypeVar("ActionRepresentation")
class GrammarStatelet(Generic[ActionRepresentation]):
"""
A ``GrammarStatelet`` keeps track of the currently valid actions at every step of decoding.
This class is relatively simple: we have a non-terminal stack which tracks which non-terminals
we still need to expand. At every timestep of decoding, we take an action that pops something
off of the non-terminal stack, and possibly pushes more things on. The grammar state is
"finished" when the non-terminal stack is empty.
At any point during decoding, you can query this object to get a representation of all of the
valid actions in the current state. The representation is something that you provide when
constructing the initial state, in whatever form you want, and we just hold on to it for you
and return it when you ask. Putting this in here is purely for convenience, to group together
pieces of state that are related to taking actions - if you want to handle the action
representations outside of this class, that would work just fine too.
Parameters
----------
nonterminal_stack : ``List[str]``
Holds the list of non-terminals that still need to be expanded. This starts out as
[START_SYMBOL], and decoding ends when this is empty. Every time we take an action, we
update the non-terminal stack and the context-dependent valid actions, and we use what's on
the stack to decide which actions are valid in the current state.
valid_actions : ``Dict[str, ActionRepresentation]``
A mapping from non-terminals (represented as strings) to all valid expansions of that
non-terminal. The class that constructs this object can pick how it wants the actions to
be represented.
is_nonterminal : ``Callable[[str], bool]``
A function that is used to determine whether each piece of the RHS of the action string is
a non-terminal that needs to be added to the non-terminal stack. You can use
``type_declaraction.is_nonterminal`` here, or write your own function if that one doesn't
work for your domain.
reverse_productions: ``bool``, optional (default=True)
A flag that reverses the production rules when ``True``. If the production rules are
reversed, then the first non-terminal in the production will be popped off the stack first,
giving us left-to-right production. If this is ``False``, you will get right-to-left
production.
"""
def __init__(
self,
nonterminal_stack: List[str],
valid_actions: Dict[str, ActionRepresentation],
is_nonterminal: Callable[[str], bool],
reverse_productions: bool = True,
) -> None:
self._nonterminal_stack = nonterminal_stack
self._valid_actions = valid_actions
self._is_nonterminal = is_nonterminal
self._reverse_productions = reverse_productions
def is_finished(self) -> bool:
"""
Have we finished producing our logical form? We have finished producing the logical form
if and only if there are no more non-terminals on the stack.
"""
return not self._nonterminal_stack
def get_valid_actions(self) -> ActionRepresentation:
"""
Returns the valid actions in the current grammar state. The `Model` determines what
exactly this looks like when it constructs the `valid_actions` dictionary.
"""
return self._valid_actions[self._nonterminal_stack[-1]]
def take_action(self, production_rule: str) -> "GrammarStatelet":
"""
Takes an action in the current grammar state, returning a new grammar state with whatever
updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS".
This will update the non-terminal stack. Updating the non-terminal stack involves popping
the non-terminal that was expanded off of the stack, then pushing on any non-terminals in
the production rule back on the stack.
For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and
``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e",
"<e,d>"]``.
If ``self._reverse_productions`` is set to ``False`` then we push the non-terminals on in
in their given order, which means that the first non-terminal in the production rule gets
popped off the stack `last`.
"""
left_side, right_side = production_rule.split(" -> ")
assert self._nonterminal_stack[-1] == left_side, (
f"Tried to expand {self._nonterminal_stack[-1]}"
f"but got rule {left_side} -> {right_side}"
)
new_stack = self._nonterminal_stack[:-1]
productions = self._get_productions_from_string(right_side)
if self._reverse_productions:
productions = list(reversed(productions))
for production in productions:
if self._is_nonterminal(production):
new_stack.append(production)
return GrammarStatelet(
nonterminal_stack=new_stack,
valid_actions=self._valid_actions,
is_nonterminal=self._is_nonterminal,
reverse_productions=self._reverse_productions,
)
@staticmethod
def _get_productions_from_string(production_string: str) -> List[str]:
"""
Takes a string like '[<d,d>, d]' and parses it into a list like ['<d,d>', 'd']. For
production strings that are not lists, like '<e,d>', we return a single-element list:
['<e,d>'].
"""
if production_string[0] == "[":
return production_string[1:-1].split(", ")
else:
return [production_string]
def __eq__(self, other):
if isinstance(self, other.__class__):
return all(
[
self._nonterminal_stack == other._nonterminal_stack,
util.tensors_equal(self._valid_actions, other._valid_actions),
self._is_nonterminal.__code__ == other._is_nonterminal.__code__,
self._reverse_productions == other._reverse_productions,
]
)
return NotImplemented
| allennlp-semparse-master | allennlp_semparse/state_machines/states/grammar_statelet.py |
"""
This module contains the ``State`` abstraction for defining state-machine-based decoders, and some
pre-built concrete ``State`` classes for various kinds of decoding (e.g., a ``GrammarBasedState``
for doing grammar-based decoding, where the output is a sequence of production rules from a
grammar).
The module also has some ``Statelet`` classes to help represent the ``State`` by grouping together
related pieces, including ``RnnStatelet``, which you can use to keep track of a decoder RNN's
internal state, ``GrammarStatelet``, which keeps track of what actions are allowed at each timestep
of decoding (if your outputs are production rules from a grammar), and ``ChecklistStatelet`` that
keeps track of coverage information if you are training a coverage-based parser.
"""
from allennlp_semparse.state_machines.states.checklist_statelet import ChecklistStatelet
from allennlp_semparse.state_machines.states.coverage_state import CoverageState
from allennlp_semparse.state_machines.states.grammar_based_state import GrammarBasedState
from allennlp_semparse.state_machines.states.grammar_statelet import GrammarStatelet
from allennlp_semparse.state_machines.states.lambda_grammar_statelet import LambdaGrammarStatelet
from allennlp_semparse.state_machines.states.rnn_statelet import RnnStatelet
from allennlp_semparse.state_machines.states.state import State
| allennlp-semparse-master | allennlp_semparse/state_machines/states/__init__.py |
from typing import List
import torch
from allennlp.nn import util
class RnnStatelet:
"""
This class keeps track of all of decoder-RNN-related variables that you need during decoding.
This includes things like the current decoder hidden state, the memory cell (for LSTM
decoders), the encoder output that you need for computing attentions, and so on.
This is intended to be used `inside` a ``State``, which likely has other things it has to keep
track of for doing constrained decoding.
Parameters
----------
hidden_state : ``torch.Tensor``
This holds the LSTM hidden state, with shape ``(decoder_output_dim,)`` if the decoder
has 1 layer and ``(num_layers, decoder_output_dim)`` otherwise.
memory_cell : ``torch.Tensor``
This holds the LSTM memory cell, with shape ``(decoder_output_dim,)`` if the decoder has
1 layer and ``(num_layers, decoder_output_dim)`` otherwise.
previous_action_embedding : ``torch.Tensor``
This holds the embedding for the action we took at the last timestep (which gets input to
the decoder). Has shape ``(action_embedding_dim,)``.
attended_input : ``torch.Tensor``
This holds the attention-weighted sum over the input representations that we computed in
the previous timestep. We keep this as part of the state because we use the previous
attention as part of our decoder cell update. Has shape ``(encoder_output_dim,)``.
encoder_outputs : ``List[torch.Tensor]``
A list of variables, each of shape ``(input_sequence_length, encoder_output_dim)``,
containing the encoder outputs at each timestep. The list is over batch elements, and we
do the input this way so we can easily do a ``torch.cat`` on a list of indices into this
batched list.
Note that all of the above parameters are single tensors, while the encoder outputs and
mask are lists of length ``batch_size``. We always pass around the encoder outputs and
mask unmodified, regardless of what's in the grouping for this state. We'll use the
``batch_indices`` for the group to pull pieces out of these lists when we're ready to
actually do some computation.
encoder_output_mask : ``List[torch.Tensor]``
A list of variables, each of shape ``(input_sequence_length,)``, containing a mask over
question tokens for each batch instance. This is a list over batch elements, for the same
reasons as above.
"""
def __init__(
self,
hidden_state: torch.Tensor,
memory_cell: torch.Tensor,
previous_action_embedding: torch.Tensor,
attended_input: torch.Tensor,
encoder_outputs: List[torch.Tensor],
encoder_output_mask: List[torch.Tensor],
) -> None:
self.hidden_state = hidden_state
self.memory_cell = memory_cell
self.previous_action_embedding = previous_action_embedding
self.attended_input = attended_input
self.encoder_outputs = encoder_outputs
self.encoder_output_mask = encoder_output_mask
def __eq__(self, other):
if isinstance(self, other.__class__):
return all(
[
util.tensors_equal(self.hidden_state, other.hidden_state, tolerance=1e-5),
util.tensors_equal(self.memory_cell, other.memory_cell, tolerance=1e-5),
util.tensors_equal(
self.previous_action_embedding,
other.previous_action_embedding,
tolerance=1e-5,
),
util.tensors_equal(self.attended_input, other.attended_input, tolerance=1e-5),
util.tensors_equal(self.encoder_outputs, other.encoder_outputs, tolerance=1e-5),
util.tensors_equal(
self.encoder_output_mask, other.encoder_output_mask, tolerance=1e-5
),
]
)
return NotImplemented
| allennlp-semparse-master | allennlp_semparse/state_machines/states/rnn_statelet.py |
from typing import Any, Dict, List, Sequence, Tuple
import torch
from allennlp_semparse.fields.production_rule_field import ProductionRule
from allennlp_semparse.state_machines.states.grammar_statelet import GrammarStatelet
from allennlp_semparse.state_machines.states.rnn_statelet import RnnStatelet
from allennlp_semparse.state_machines.states.state import State
# This syntax is pretty weird and ugly, but it's necessary to make mypy happy with the API that
# we've defined. We're using generics to make the type of `combine_states` come out right. See
# the note in `state_machines.state.py` for a little more detail.
class GrammarBasedState(State["GrammarBasedState"]):
"""
A generic State that's suitable for most models that do grammar-based decoding. We keep around
a `group` of states, and each element in the group has a few things: a batch index, an action
history, a score, an ``RnnStatelet``, and a ``GrammarStatelet``. We additionally have some
information that's independent of any particular group element: a list of all possible actions
for all batch instances passed to ``model.forward()``, and a ``extras`` field that you can use
if you really need some extra information about each batch instance (like a string description,
or other metadata).
Finally, we also have a specially-treated, optional ``debug_info`` field. If this is given, it
should be an empty list for each group instance when the initial state is created. In that
case, we will keep around information about the actions considered at each timestep of decoding
and other things that you might want to visualize in a demo. This probably isn't necessary for
training, and to get it right we need to copy a bunch of data structures for each new state, so
it's best used only at evaluation / demo time.
Parameters
----------
batch_indices : ``List[int]``
Passed to super class; see docs there.
action_history : ``List[List[int]]``
Passed to super class; see docs there.
score : ``List[torch.Tensor]``
Passed to super class; see docs there.
rnn_state : ``List[RnnStatelet]``
An ``RnnStatelet`` for every group element. This keeps track of the current decoder hidden
state, the previous decoder output, the output from the encoder (for computing attentions),
and other things that are typical seq2seq decoder state things.
grammar_state : ``List[GrammarStatelet]``
This hold the current grammar state for each element of the group. The ``GrammarStatelet``
keeps track of which actions are currently valid.
possible_actions : ``List[List[ProductionRule]]``
The list of all possible actions that was passed to ``model.forward()``. We need this so
we can recover production strings, which we need to update grammar states.
extras : ``List[Any]``, optional (default=None)
If you need to keep around some extra data for each instance in the batch, you can put that
in here, without adding another field. This should be used `very sparingly`, as there is
no type checking or anything done on the contents of this field, and it will just be passed
around between ``States`` as-is, without copying.
debug_info : ``List[Any]``, optional (default=None).
"""
def __init__(
self,
batch_indices: List[int],
action_history: List[List[int]],
score: List[torch.Tensor],
rnn_state: List[RnnStatelet],
grammar_state: List[GrammarStatelet],
possible_actions: List[List[ProductionRule]],
extras: List[Any] = None,
debug_info: List = None,
) -> None:
super().__init__(batch_indices, action_history, score)
self.rnn_state = rnn_state
self.grammar_state = grammar_state
self.possible_actions = possible_actions
self.extras = extras
self.debug_info = debug_info
def new_state_from_group_index(
self,
group_index: int,
action: int,
new_score: torch.Tensor,
new_rnn_state: RnnStatelet,
considered_actions: List[int] = None,
action_probabilities: List[float] = None,
attention_weights: torch.Tensor = None,
) -> "GrammarBasedState":
batch_index = self.batch_indices[group_index]
new_action_history = self.action_history[group_index] + [action]
production_rule = self.possible_actions[batch_index][action][0]
new_grammar_state = self.grammar_state[group_index].take_action(production_rule)
if self.debug_info is not None:
attention = attention_weights[group_index] if attention_weights is not None else None
debug_info = {
"considered_actions": considered_actions,
"question_attention": attention,
"probabilities": action_probabilities,
}
new_debug_info = [self.debug_info[group_index] + [debug_info]]
else:
new_debug_info = None
return GrammarBasedState(
batch_indices=[batch_index],
action_history=[new_action_history],
score=[new_score],
rnn_state=[new_rnn_state],
grammar_state=[new_grammar_state],
possible_actions=self.possible_actions,
extras=self.extras,
debug_info=new_debug_info,
)
def print_action_history(self, group_index: int = None) -> None:
scores = self.score if group_index is None else [self.score[group_index]]
batch_indices = (
self.batch_indices if group_index is None else [self.batch_indices[group_index]]
)
histories = (
self.action_history if group_index is None else [self.action_history[group_index]]
)
for score, batch_index, action_history in zip(scores, batch_indices, histories):
print(
" ",
score.detach().cpu().numpy()[0],
[self.possible_actions[batch_index][action][0] for action in action_history],
)
def get_valid_actions(self) -> List[Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]]:
"""
Returns a list of valid actions for each element of the group.
"""
return [state.get_valid_actions() for state in self.grammar_state]
def is_finished(self) -> bool:
if len(self.batch_indices) != 1:
raise RuntimeError("is_finished() is only defined with a group_size of 1")
return self.grammar_state[0].is_finished()
@classmethod
def combine_states(cls, states: Sequence["GrammarBasedState"]) -> "GrammarBasedState":
batch_indices = [batch_index for state in states for batch_index in state.batch_indices]
action_histories = [
action_history for state in states for action_history in state.action_history
]
scores = [score for state in states for score in state.score]
rnn_states = [rnn_state for state in states for rnn_state in state.rnn_state]
grammar_states = [
grammar_state for state in states for grammar_state in state.grammar_state
]
if states[0].debug_info is not None:
debug_info = [debug_info for state in states for debug_info in state.debug_info]
else:
debug_info = None
return GrammarBasedState(
batch_indices=batch_indices,
action_history=action_histories,
score=scores,
rnn_state=rnn_states,
grammar_state=grammar_states,
possible_actions=states[0].possible_actions,
extras=states[0].extras,
debug_info=debug_info,
)
| allennlp-semparse-master | allennlp_semparse/state_machines/states/grammar_based_state.py |
from typing import Callable, Dict, List, Tuple
import torch
from allennlp.nn import util
# We're not actually inhereting from `GrammarStatelet` here because there's very little logic that
# would actually be shared. Doing that doesn't solve our type problems, anyway, because List isn't
# covariant...
class LambdaGrammarStatelet:
"""
A ``LambdaGrammarStatelet`` is a ``GrammarStatelet`` that adds lambda productions. These
productions change the valid actions depending on the current state (you can produce lambda
variables inside the scope of a lambda expression), so we need some extra bookkeeping to keep
track of them.
We only use this for the ``WikiTablesSemanticParser``, and so we just hard-code the action
representation type here, because the way we handle the context / global / linked action
representations is a little convoluted. It would be hard to make this generic in the way that
we use it. So we'll not worry about that until there are other use cases of this class that
need it.
Parameters
----------
nonterminal_stack : ``List[str]``
Holds the list of non-terminals that still need to be expanded. This starts out as
[START_SYMBOL], and decoding ends when this is empty. Every time we take an action, we
update the non-terminal stack and the context-dependent valid actions, and we use what's on
the stack to decide which actions are valid in the current state.
lambda_stacks : ``Dict[Tuple[str, str], List[str]]``
The lambda stack keeps track of when we're in the scope of a lambda function. The
dictionary is keyed by the production rule we are adding (like "r -> x", separated into
left hand side and right hand side, where the LHS is the type of the lambda variable and
the RHS is the variable itself), and the value is a nonterminal stack much like
``nonterminal_stack``. When the stack becomes empty, we remove the lambda entry.
valid_actions : ``Dict[str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]]``
A mapping from non-terminals (represented as strings) to all valid expansions of that
non-terminal. The way we represent the valid expansions is a little complicated: we use a
dictionary of `action types`, where the key is the action type (like "global", "linked", or
whatever your model is expecting), and the value is a tuple representing all actions of
that type. The tuple is (input tensor, output tensor, action id). The input tensor has
the representation that is used when `selecting` actions, for all actions of this type.
The output tensor has the representation that is used when feeding the action to the next
step of the decoder (this could just be the same as the input tensor). The action ids are
a list of indices into the main action list for each batch instance.
context_actions : ``Dict[str, Tuple[torch.Tensor, torch.Tensor, int]]``
Variable actions are never included in the ``valid_actions`` dictionary, because they are
only valid depending on the current grammar state. This dictionary maps from the string
representation of all such actions to the tensor representations of the actions. These
will get added onto the "global" key in the ``valid_actions`` when they are allowed.
is_nonterminal : ``Callable[[str], bool]``
A function that is used to determine whether each piece of the RHS of the action string is
a non-terminal that needs to be added to the non-terminal stack. You can use
``type_declaraction.is_nonterminal`` here, or write your own function if that one doesn't
work for your domain.
"""
def __init__(
self,
nonterminal_stack: List[str],
lambda_stacks: Dict[Tuple[str, str], List[str]],
valid_actions: Dict[str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]],
context_actions: Dict[str, Tuple[torch.Tensor, torch.Tensor, int]],
is_nonterminal: Callable[[str], bool],
) -> None:
self._nonterminal_stack = nonterminal_stack
self._lambda_stacks = lambda_stacks
self._valid_actions = valid_actions
self._context_actions = context_actions
self._is_nonterminal = is_nonterminal
def is_finished(self) -> bool:
"""
Have we finished producing our logical form? We have finished producing the logical form
if and only if there are no more non-terminals on the stack.
"""
return not self._nonterminal_stack
def get_valid_actions(self) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]:
"""
Returns the valid actions in the current grammar state. See the class docstring for a
description of what we're returning here.
"""
actions = self._valid_actions[self._nonterminal_stack[-1]]
context_actions = []
for type_, variable in self._lambda_stacks:
if self._nonterminal_stack[-1] == type_:
production_string = f"{type_} -> {variable}"
context_actions.append(self._context_actions[production_string])
if context_actions:
input_tensor, output_tensor, action_ids = actions["global"]
new_inputs = [input_tensor] + [x[0] for x in context_actions]
input_tensor = torch.cat(new_inputs, dim=0)
new_outputs = [output_tensor] + [x[1] for x in context_actions]
output_tensor = torch.cat(new_outputs, dim=0)
new_action_ids = action_ids + [x[2] for x in context_actions]
# We can't just reassign to actions['global'], because that would modify the state of
# self._valid_actions. Instead, we need to construct a new actions dictionary.
new_actions = {**actions}
new_actions["global"] = (input_tensor, output_tensor, new_action_ids)
actions = new_actions
return actions
def take_action(self, production_rule: str) -> "LambdaGrammarStatelet":
"""
Takes an action in the current grammar state, returning a new grammar state with whatever
updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS".
This will update the non-terminal stack and the context-dependent actions. Updating the
non-terminal stack involves popping the non-terminal that was expanded off of the stack,
then pushing on any non-terminals in the production rule back on the stack. We push the
non-terminals on in `reverse` order, so that the first non-terminal in the production rule
gets popped off the stack first.
For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and
``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e",
"<e,d>"]``.
"""
left_side, right_side = production_rule.split(" -> ")
assert self._nonterminal_stack[-1] == left_side, (
f"Tried to expand {self._nonterminal_stack[-1]}"
f"but got rule {left_side} -> {right_side}"
)
assert all(self._lambda_stacks[key][-1] == left_side for key in self._lambda_stacks)
new_stack = self._nonterminal_stack[:-1]
new_lambda_stacks = {key: self._lambda_stacks[key][:-1] for key in self._lambda_stacks}
productions = self._get_productions_from_string(right_side)
# Looking for lambda productions, but not for cells or columns with the word "lambda" in
# them.
if "lambda" in productions[0] and "fb:" not in productions[0]:
production = productions[0]
if production[0] == "'" and production[-1] == "'":
# The production rule with a lambda is typically "<t,d> -> ['lambda x', d]". We
# need to strip the quotes.
production = production[1:-1]
lambda_variable = production.split(" ")[1]
# The left side must be formatted as "<t,d>", where "t" is the type of the lambda
# variable, and "d" is the return type of the lambda function. We need to pull out the
# "t" here. TODO(mattg): this is pretty limiting, but I'm not sure how general we
# should make this.
if len(left_side) != 5:
raise NotImplementedError("Can't handle this type yet:", left_side)
lambda_type = left_side[1]
new_lambda_stacks[(lambda_type, lambda_variable)] = []
for production in reversed(productions):
if self._is_nonterminal(production):
new_stack.append(production)
for lambda_stack in new_lambda_stacks.values():
lambda_stack.append(production)
# If any of the lambda stacks have now become empty, we remove them from our dictionary.
new_lambda_stacks = {
key: new_lambda_stacks[key] for key in new_lambda_stacks if new_lambda_stacks[key]
}
return LambdaGrammarStatelet(
nonterminal_stack=new_stack,
lambda_stacks=new_lambda_stacks,
valid_actions=self._valid_actions,
context_actions=self._context_actions,
is_nonterminal=self._is_nonterminal,
)
@staticmethod
def _get_productions_from_string(production_string: str) -> List[str]:
"""
Takes a string like '[<d,d>, d]' and parses it into a list like ['<d,d>', 'd']. For
production strings that are not lists, like '<e,d>', we return a single-element list:
['<e,d>'].
"""
if production_string[0] == "[":
return production_string[1:-1].split(", ")
else:
return [production_string]
def __eq__(self, other):
if isinstance(self, other.__class__):
return all(
[
self._nonterminal_stack == other._nonterminal_stack,
self._lambda_stacks == other._lambda_stacks,
util.tensors_equal(self._valid_actions, other._valid_actions),
util.tensors_equal(self._context_actions, other._context_actions),
self._is_nonterminal == other._is_nonterminal,
]
)
return NotImplemented
| allennlp-semparse-master | allennlp_semparse/state_machines/states/lambda_grammar_statelet.py |
from typing import Dict
import torch
from allennlp.nn import util
class ChecklistStatelet:
"""
This class keeps track of checklist related variables that are used while training a coverage
based semantic parser (or any other kind of transition based constrained decoder). This is
intended to be used within a ``State``.
Parameters
----------
terminal_actions : ``torch.Tensor``
A vector containing the indices of terminal actions, required for computing checklists for
next states based on current actions. The idea is that we will build checklists
corresponding to the presence or absence of just the terminal actions. But in principle,
they can be all actions that are relevant to checklist computation.
checklist_target : ``torch.Tensor``
Targets corresponding to checklist that indicate the states in which we want the checklist to
ideally be. It is the same size as ``terminal_actions``, and it contains 1 for each corresponding
action in the list that we want to see in the final logical form, and 0 for each corresponding
action that we do not.
checklist_mask : ``torch.Tensor``
Mask corresponding to ``terminal_actions``, indicating which of those actions are relevant
for checklist computation. For example, if the parser is penalizing non-agenda terminal
actions, all the terminal actions are relevant.
checklist : ``torch.Tensor``
A checklist indicating how many times each action in its agenda has been chosen previously.
It contains the actual counts of the agenda actions.
terminal_indices_dict: ``Dict[int, int]``, optional
Mapping from batch action indices to indices in any of the four vectors above. If not
provided, this mapping will be computed here.
"""
def __init__(
self,
terminal_actions: torch.Tensor,
checklist_target: torch.Tensor,
checklist_mask: torch.Tensor,
checklist: torch.Tensor,
terminal_indices_dict: Dict[int, int] = None,
) -> None:
self.terminal_actions = terminal_actions
self.checklist_target = checklist_target
self.checklist_mask = checklist_mask
self.checklist = checklist
if terminal_indices_dict is not None:
self.terminal_indices_dict = terminal_indices_dict
else:
self.terminal_indices_dict = {}
for checklist_index, batch_action_index in enumerate(terminal_actions.detach().cpu()):
action_index = int(batch_action_index[0])
if action_index == -1:
continue
self.terminal_indices_dict[action_index] = checklist_index
def update(self, action: torch.Tensor) -> "ChecklistStatelet":
"""
Takes an action index, updates checklist and returns an updated state.
"""
checklist_addition = (self.terminal_actions == action).float()
new_checklist = self.checklist + checklist_addition
new_checklist_state = ChecklistStatelet(
terminal_actions=self.terminal_actions,
checklist_target=self.checklist_target,
checklist_mask=self.checklist_mask,
checklist=new_checklist,
terminal_indices_dict=self.terminal_indices_dict,
)
return new_checklist_state
def get_balance(self) -> torch.Tensor:
return self.checklist_mask * (self.checklist_target - self.checklist)
def __eq__(self, other):
if isinstance(self, other.__class__):
return all(
[
util.tensors_equal(self.terminal_actions, other.terminal_actions),
util.tensors_equal(self.checklist_target, other.checklist_target),
util.tensors_equal(self.checklist_mask, other.checklist_mask),
util.tensors_equal(self.checklist, other.checklist),
self.terminal_indices_dict == other.terminal_indices_dict,
]
)
return NotImplemented
| allennlp-semparse-master | allennlp_semparse/state_machines/states/checklist_statelet.py |
from typing import Generic, List, TypeVar
import torch
# Note that the bound here is `State` itself. This is what lets us have methods that take
# lists of a `State` subclass and output structures with the subclass. Really ugly that we
# have to do this generic typing _for our own class_, but it makes mypy happy and gives us good
# type checking in a few important methods.
T = TypeVar("T", bound="State")
class State(Generic[T]):
"""
Represents the (batched) state of a transition-based decoder.
There are two different kinds of batching we need to distinguish here. First, there's the
batch of training instances passed to ``model.forward()``. We'll use "batch" and
``batch_size`` to refer to this through the docs and code. We additionally batch together
computation for several states at the same time, where each state could be from the same
training instance in the original batch, or different instances. We use "group" and
``group_size`` in the docs and code to refer to this kind of batching, to distinguish it from
the batch of training instances.
So, using this terminology, a single ``State`` object represents a `grouped` collection of
states. Because different states in this group might finish at different timesteps, we have
methods and member variables to handle some bookkeeping around this, to split and regroup
things.
Parameters
----------
batch_indices : ``List[int]``
A ``group_size``-length list, where each element specifies which ``batch_index`` that group
element came from.
Our internal variables (like scores, action histories, hidden states, whatever) are
`grouped`, and our ``group_size`` is likely different from the original ``batch_size``.
This variable keeps track of which batch instance each group element came from (e.g., to
know what the correct action sequences are, or which encoder outputs to use).
action_history : ``List[List[int]]``
The list of actions taken so far in this state. This is also grouped, so each state in the
group has a list of actions.
score : ``List[torch.Tensor]``
This state's score. It's a variable, because typically we'll be computing a loss based on
this score, and using it for backprop during training. Like the other variables here, this
is a ``group_size``-length list.
"""
def __init__(
self, batch_indices: List[int], action_history: List[List[int]], score: List[torch.Tensor]
) -> None:
self.batch_indices = batch_indices
self.action_history = action_history
self.score = score
def is_finished(self) -> bool:
"""
If this state has a ``group_size`` of 1, this returns whether the single action sequence in
this state is finished or not. If this state has a ``group_size`` other than 1, this
method raises an error.
"""
raise NotImplementedError
@classmethod
def combine_states(cls, states: List[T]) -> T:
"""
Combines a list of states, each with their own group size, into a single state.
"""
raise NotImplementedError
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| allennlp-semparse-master | allennlp_semparse/state_machines/states/state.py |
from typing import List
NUMBER_CHARACTERS = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ".", "-"}
MONTH_NUMBERS = {
"january": 1,
"jan": 1,
"february": 2,
"feb": 2,
"march": 3,
"mar": 3,
"april": 4,
"apr": 4,
"may": 5,
"june": 6,
"jun": 6,
"july": 7,
"jul": 7,
"august": 8,
"aug": 8,
"september": 9,
"sep": 9,
"october": 10,
"oct": 10,
"november": 11,
"nov": 11,
"december": 12,
"dec": 12,
}
ORDER_OF_MAGNITUDE_WORDS = {"hundred": 100, "thousand": 1000, "million": 1000000}
NUMBER_WORDS = {
"zero": 0,
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7,
"eight": 8,
"nine": 9,
"ten": 10,
"first": 1,
"second": 2,
"third": 3,
"fourth": 4,
"fifth": 5,
"sixth": 6,
"seventh": 7,
"eighth": 8,
"ninth": 9,
"tenth": 10,
**MONTH_NUMBERS,
}
def lisp_to_nested_expression(lisp_string: str) -> List:
"""
Takes a logical form as a lisp string and returns a nested list representation of the lisp.
For example, "(count (division first))" would get mapped to ['count', ['division', 'first']].
"""
stack: List = []
current_expression: List = []
tokens = lisp_string.split()
for token in tokens:
while token[0] == "(":
nested_expression: List = []
current_expression.append(nested_expression)
stack.append(current_expression)
current_expression = nested_expression
token = token[1:]
current_expression.append(token.replace(")", ""))
while token[-1] == ")":
current_expression = stack.pop()
token = token[:-1]
return current_expression[0]
| allennlp-semparse-master | allennlp_semparse/common/util.py |
"""
A ``KnowledgeGraph`` is a graphical representation of some structured knowledge source: say a
table, figure or an explicit knowledge base.
"""
from typing import Dict, List, Set
class KnowledgeGraph:
"""
A ``KnowledgeGraph`` represents a collection of entities and their relationships.
The ``KnowledgeGraph`` currently stores (untyped) neighborhood information and text
representations of each entity (if there is any).
The knowledge base itself can be a table (like in WikitableQuestions), a figure (like in NLVR)
or some other structured knowledge source. This abstract class needs to be inherited for
implementing the functionality appropriate for a given KB.
All of the parameters listed below are stored as public attributes.
Parameters
----------
entities : ``Set[str]``
The string identifiers of the entities in this knowledge graph. We sort this set and store
it as a list. The sorting is so that we get a guaranteed consistent ordering across
separate runs of the code.
neighbors : ``Dict[str, List[str]]``
A mapping from string identifiers to other string identifiers, denoting which entities are
neighbors in the graph.
entity_text : ``Dict[str, str]``
If you have additional text associated with each entity (other than its string identifier),
you can store that here. This might be, e.g., the text in a table cell, or the description
of a wikipedia entity.
"""
def __init__(
self,
entities: Set[str],
neighbors: Dict[str, List[str]],
entity_text: Dict[str, str] = None,
) -> None:
self.entities = sorted(entities)
self.neighbors = neighbors
self.entity_text = entity_text
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| allennlp-semparse-master | allennlp_semparse/common/knowledge_graph.py |
from allennlp_semparse.common.date import Date
from allennlp_semparse.common.errors import ParsingError, ExecutionError
from allennlp_semparse.common.util import (
NUMBER_CHARACTERS,
MONTH_NUMBERS,
ORDER_OF_MAGNITUDE_WORDS,
NUMBER_WORDS,
)
| allennlp-semparse-master | allennlp_semparse/common/__init__.py |
from collections import defaultdict
from typing import List, Dict, Set
import logging
from allennlp.common.util import START_SYMBOL
from allennlp_semparse.domain_languages.domain_language import DomainLanguage
logger = logging.getLogger(__name__)
class ActionSpaceWalker:
"""
``ActionSpaceWalker`` takes a world, traverses all the valid paths driven by the valid action
specification of the world to generate all possible logical forms (under some constraints). This
class also has some utilities for indexing logical forms to efficiently retrieve required
subsets.
Parameters
----------
world : ``DomainLanguage``
The world (domain language instantiation) from which valid actions will be taken.
max_path_length : ``int``
The maximum path length till which the action space will be explored. Paths longer than this
length will be discarded.
"""
def __init__(self, world: DomainLanguage, max_path_length: int) -> None:
self._world = world
self._max_path_length = max_path_length
self._completed_paths: List[List[str]] = None
self._terminal_path_index: Dict[str, Set[int]] = defaultdict(set)
self._length_sorted_paths: List[List[str]] = None
def _walk(self) -> None:
"""
Walk over action space to collect completed paths of at most ``self._max_path_length`` steps.
"""
actions = self._world.get_nonterminal_productions()
start_productions = actions[START_SYMBOL]
# Buffer of NTs to expand, previous actions
incomplete_paths = [
([start_production.split(" -> ")[-1]], [start_production])
for start_production in start_productions
]
self._completed_paths = []
# Overview: We keep track of the buffer of non-terminals to expand, and the action history
# for each incomplete path. At every iteration in the while loop below, we iterate over all
# incomplete paths, expand one non-terminal from the buffer in a depth-first fashion, get
# all possible next actions triggered by that non-terminal and add to the paths. Then, we
# check the expanded paths, to see if they are 1) complete, in which case they are
# added to completed_paths, 2) longer than max_path_length, in which case they are
# discarded, or 3) neither, in which case they are used to form the incomplete_paths for the
# next iteration of this while loop.
# While the non-terminal expansion is done in a depth-first fashion, note that the search over
# the action space itself is breadth-first.
while incomplete_paths:
next_paths = []
for nonterminal_buffer, history in incomplete_paths:
# Taking the last non-terminal added to the buffer. We're going depth-first.
nonterminal = nonterminal_buffer.pop()
next_actions = []
if nonterminal not in actions:
# This happens when the nonterminal corresponds to a type that does not exist in
# the context. For example, in the variable free variant of the WikiTables
# world, there are nonterminals for specific column types (like date). Say we
# produced a path containing "filter_date_greater" already, and we do not have
# an columns of type "date", then this condition would be triggered. We should
# just discard those paths.
continue
else:
next_actions.extend(actions[nonterminal])
# Iterating over all possible next actions.
for action in next_actions:
new_history = history + [action]
new_nonterminal_buffer = nonterminal_buffer[:]
# Since we expand the last action added to the buffer, the left child should be
# added after the right child.
for right_side_part in reversed(self._get_right_side_parts(action)):
if self._world.is_nonterminal(right_side_part):
new_nonterminal_buffer.append(right_side_part)
next_paths.append((new_nonterminal_buffer, new_history))
incomplete_paths = []
for nonterminal_buffer, path in next_paths:
# An empty buffer means that we've completed this path.
if not nonterminal_buffer:
# Indexing completed paths by the nonterminals they contain.
next_path_index = len(self._completed_paths)
for action in path:
for value in self._get_right_side_parts(action):
if not self._world.is_nonterminal(value):
self._terminal_path_index[action].add(next_path_index)
self._completed_paths.append(path)
# We're adding to incomplete_paths for the next iteration, only those paths that are
# shorter than the max_path_length. The remaining paths will be discarded.
elif len(path) <= self._max_path_length:
incomplete_paths.append((nonterminal_buffer, path))
@staticmethod
def _get_right_side_parts(action: str) -> List[str]:
_, right_side = action.split(" -> ")
if right_side.startswith("["):
right_side_parts = right_side[1:-1].split(", ")
else:
right_side_parts = [right_side]
return right_side_parts
def get_logical_forms_with_agenda(
self,
agenda: List[str],
max_num_logical_forms: int = None,
allow_partial_match: bool = False,
) -> List[str]:
"""
Parameters
----------
agenda : ``List[str]``
max_num_logical_forms : ``int`` (optional)
allow_partial_match : ``bool`` (optional, defaul=False)
If set, this method will return logical forms which contain not necessarily all the
items on the agenda. The returned list will be sorted by how many items the logical
forms match.
"""
if not agenda:
if allow_partial_match:
logger.warning("Agenda is empty! Returning all paths instead.")
return self.get_all_logical_forms(max_num_logical_forms)
return []
if self._completed_paths is None:
self._walk()
agenda_path_indices = [self._terminal_path_index[action] for action in agenda]
if all([not path_indices for path_indices in agenda_path_indices]):
if allow_partial_match:
logger.warning(
"""Agenda items not in any of the paths found. Returning all paths."""
)
return self.get_all_logical_forms(max_num_logical_forms)
return []
# TODO (pradeep): Sort the indices and do intersections in order, so that we can return the
# set with maximal coverage if the full intersection is null.
# This list contains for each agenda item the list of indices of paths that contain that agenda item. Note
# that we omit agenda items that are not in any paths to avoid the final intersection being null. So there
# will not be any empty sub-lists in the list below.
filtered_path_indices: List[Set[int]] = []
for agenda_item, path_indices in zip(agenda, agenda_path_indices):
if not path_indices:
logger.warning(f"{agenda_item} is not in any of the paths found! Ignoring it.")
continue
filtered_path_indices.append(path_indices)
# This mapping is from a path index to the number of items in the agenda that the path contains.
index_to_num_items: Dict[int, int] = defaultdict(int)
for indices in filtered_path_indices:
for index in indices:
index_to_num_items[index] += 1
if allow_partial_match:
# We group the paths based on how many agenda items they contain, and output them in a sorted order.
num_items_grouped_paths: Dict[int, List[List[str]]] = defaultdict(list)
for index, num_items in index_to_num_items.items():
num_items_grouped_paths[num_items].append(self._completed_paths[index])
paths = []
# Sort by number of agenda items present in the paths.
for num_items, corresponding_paths in sorted(
num_items_grouped_paths.items(), reverse=True
):
# Given those paths, sort them by length, so that the first path in ``paths`` will
# be the shortest path with the most agenda items.
paths.extend(sorted(corresponding_paths, key=len))
else:
indices_to_return = []
for index, num_items in index_to_num_items.items():
if num_items == len(filtered_path_indices):
indices_to_return.append(index)
# Sort all the paths by length
paths = sorted([self._completed_paths[index] for index in indices_to_return], key=len)
if max_num_logical_forms is not None:
paths = paths[:max_num_logical_forms]
logical_forms = [self._world.action_sequence_to_logical_form(path) for path in paths]
return logical_forms
def get_all_logical_forms(self, max_num_logical_forms: int = None) -> List[str]:
if self._completed_paths is None:
self._walk()
paths = self._completed_paths
if max_num_logical_forms is not None:
if self._length_sorted_paths is None:
self._length_sorted_paths = sorted(self._completed_paths, key=len)
paths = self._length_sorted_paths[:max_num_logical_forms]
logical_forms = [self._world.action_sequence_to_logical_form(path) for path in paths]
return logical_forms
| allennlp-semparse-master | allennlp_semparse/common/action_space_walker.py |
class ParsingError(Exception):
"""
This exception gets raised when there is a parsing error during logical form processing. This
might happen because you're not handling the full set of possible logical forms, for instance,
and having this error provides a consistent way to catch those errors and log how frequently
this occurs.
"""
def __init__(self, message):
super().__init__()
self.message = message
def __str__(self):
return repr(self.message)
class ExecutionError(Exception):
"""
This exception gets raised when you're trying to execute a logical form that your executor does
not understand. This may be because your logical form contains a function with an invalid name
or a set of arguments whose types do not match those that the function expects.
"""
def __init__(self, message):
super().__init__()
self.message = message
def __str__(self):
return repr(self.message)
| allennlp-semparse-master | allennlp_semparse/common/errors.py |
from allennlp_semparse.common.errors import ExecutionError
class Date:
def __init__(self, year: int, month: int, day: int) -> None:
self.year = year
self.month = month
self.day = day
def __eq__(self, other) -> bool:
# Note that the logic below renders equality to be non-transitive. That is,
# Date(2018, -1, -1) == Date(2018, 2, 3) and Date(2018, -1, -1) == Date(2018, 4, 5)
# but Date(2018, 2, 3) != Date(2018, 4, 5).
if not isinstance(other, Date):
raise ExecutionError("only compare Dates with Dates")
year_is_same = self.year == -1 or other.year == -1 or self.year == other.year
month_is_same = self.month == -1 or other.month == -1 or self.month == other.month
day_is_same = self.day == -1 or other.day == -1 or self.day == other.day
return year_is_same and month_is_same and day_is_same
def __gt__(self, other) -> bool:
# The logic below is tricky, and is based on some assumptions we make about date comparison.
# Year, month or day being -1 means that we do not know its value. In those cases, the
# we consider the comparison to be undefined, and return False if all the fields that are
# more significant than the field being compared are equal. However, when year is -1 for both
# dates being compared, it is safe to assume that the year is not specified because it is
# the same. So we make an exception just in that case. That is, we deem the comparison
# undefined only when one of the year values is -1, but not both.
if not isinstance(other, Date):
raise ExecutionError("only compare Dates with Dates")
# We're doing an exclusive or below.
if (self.year == -1) != (other.year == -1):
return False # comparison undefined
# If both years are -1, we proceed.
if self.year != other.year:
return self.year > other.year
# The years are equal and not -1, or both are -1.
if self.month == -1 or other.month == -1:
return False
if self.month != other.month:
return self.month > other.month
# The months and years are equal and not -1
if self.day == -1 or other.day == -1:
return False
return self.day > other.day
def __ge__(self, other) -> bool:
if not isinstance(other, Date):
raise ExecutionError("only compare Dates with Dates")
return self > other or self == other
def __str__(self):
if (self.month, self.day) == (-1, -1):
# If we have only the year, return just that so that the official evaluator does the
# comparison against the target as if both are numbers.
return str(self.year)
return f"{self.year}-{self.month}-{self.day}"
def __hash__(self):
return hash(str(self))
def to_json(self):
return str(self)
@classmethod
def make_date(cls, string: str) -> "Date":
year_string, month_string, day_string = string.split("-")
year = -1
month = -1
day = -1
try:
year = int(year_string)
except ValueError:
pass
try:
month = int(month_string)
except ValueError:
pass
try:
day = int(day_string)
except ValueError:
pass
return Date(year, month, day)
| allennlp-semparse-master | allennlp_semparse/common/date.py |
import re
import csv
from typing import Union, Dict, List, Tuple, Set
from collections import defaultdict
from unidecode import unidecode
from allennlp.data.tokenizers import Token
from allennlp_semparse.common import Date, NUMBER_CHARACTERS, NUMBER_WORDS, ORDER_OF_MAGNITUDE_WORDS
from allennlp_semparse.common.knowledge_graph import KnowledgeGraph
# == stop words that will be omitted by ContextGenerator
STOP_WORDS = {
"",
"",
"all",
"being",
"-",
"over",
"through",
"yourselves",
"its",
"before",
"hadn",
"with",
"had",
",",
"should",
"to",
"only",
"under",
"ours",
"has",
"ought",
"do",
"them",
"his",
"than",
"very",
"cannot",
"they",
"not",
"during",
"yourself",
"him",
"nor",
"did",
"didn",
"'ve",
"this",
"she",
"each",
"where",
"because",
"doing",
"some",
"we",
"are",
"further",
"ourselves",
"out",
"what",
"for",
"weren",
"does",
"above",
"between",
"mustn",
"?",
"be",
"hasn",
"who",
"were",
"here",
"shouldn",
"let",
"hers",
"by",
"both",
"about",
"couldn",
"of",
"could",
"against",
"isn",
"or",
"own",
"into",
"while",
"whom",
"down",
"wasn",
"your",
"from",
"her",
"their",
"aren",
"there",
"been",
".",
"few",
"too",
"wouldn",
"themselves",
":",
"was",
"until",
"more",
"himself",
"on",
"but",
"don",
"herself",
"haven",
"those",
"he",
"me",
"myself",
"these",
"up",
";",
"below",
"'re",
"can",
"theirs",
"my",
"and",
"would",
"then",
"is",
"am",
"it",
"doesn",
"an",
"as",
"itself",
"at",
"have",
"in",
"any",
"if",
"!",
"again",
"'ll",
"no",
"that",
"when",
"same",
"how",
"other",
"which",
"you",
"many",
"shan",
"'t",
"'s",
"our",
"after",
"most",
"'d",
"such",
"'m",
"why",
"a",
"off",
"i",
"yours",
"so",
"the",
"having",
"once",
}
CellValueType = Union[str, float, Date]
class TableQuestionContext:
"""
Representation of table context similar to the one used by Memory Augmented Policy Optimization (MAPO, Liang et
al., 2018). Most of the functionality is a reimplementation of
https://github.com/crazydonkey200/neural-symbolic-machines/blob/master/table/wtq/preprocess.py
for extracting entities from a question given a table and type its columns with <string> | <date> | <number>
"""
def __init__(
self,
table_data: List[Dict[str, CellValueType]],
column_name_type_mapping: Dict[str, Set[str]],
column_names: Set[str],
question_tokens: List[Token],
) -> None:
self.table_data = table_data
self.column_types: Set[str] = set()
self.column_names = column_names
for types in column_name_type_mapping.values():
self.column_types.update(types)
self.question_tokens = question_tokens
# Mapping from strings to the columns they are under.
string_column_mapping: Dict[str, List[str]] = defaultdict(list)
for table_row in table_data:
for column_name, cell_value in table_row.items():
if "string_column:" in column_name and cell_value is not None:
string_column_mapping[str(cell_value)].append(column_name)
# We want the object to raise KeyError when checking if a specific string is a cell in the
# table.
self._string_column_mapping = dict(string_column_mapping)
self._table_knowledge_graph: KnowledgeGraph = None
def __eq__(self, other):
if not isinstance(other, TableQuestionContext):
return False
return self.table_data == other.table_data
def get_table_knowledge_graph(self) -> KnowledgeGraph:
if self._table_knowledge_graph is None:
entities: Set[str] = set()
neighbors: Dict[str, List[str]] = defaultdict(list)
entity_text: Dict[str, str] = {}
# Add all column names to entities. We'll define their neighbors to be empty lists for
# now, and later add number and string entities as needed.
number_columns = []
date_columns = []
for typed_column_name in self.column_names:
if "number_column:" in typed_column_name or "num2_column" in typed_column_name:
number_columns.append(typed_column_name)
if "date_column:" in typed_column_name:
date_columns.append(typed_column_name)
# Add column names to entities, with no neighbors yet.
entities.add(typed_column_name)
neighbors[typed_column_name] = []
entity_text[typed_column_name] = typed_column_name.split(":", 1)[-1].replace(
"_", " "
)
string_entities, numbers = self.get_entities_from_question()
for entity, column_names in string_entities:
entities.add(entity)
for column_name in column_names:
neighbors[entity].append(column_name)
neighbors[column_name].append(entity)
entity_text[entity] = entity.replace("string:", "").replace("_", " ")
# For all numbers (except -1), we add all number and date columns as their neighbors.
for number, _ in numbers:
entities.add(number)
neighbors[number].extend(number_columns + date_columns)
for column_name in number_columns + date_columns:
neighbors[column_name].append(number)
entity_text[number] = number
for entity, entity_neighbors in neighbors.items():
neighbors[entity] = list(set(entity_neighbors))
# Add "-1" as an entity only if we have date columns in the table because we will need
# it as a wild-card in dates. The neighbors are the date columns.
if "-1" not in neighbors and date_columns:
entities.add("-1")
neighbors["-1"] = date_columns
entity_text["-1"] = "-1"
for date_column in date_columns:
neighbors[date_column].append("-1")
self._table_knowledge_graph = KnowledgeGraph(entities, dict(neighbors), entity_text)
return self._table_knowledge_graph
@classmethod
def get_table_data_from_tagged_lines(
cls, lines: List[List[str]]
) -> Tuple[List[Dict[str, Dict[str, str]]], Dict[str, Set[str]]]:
column_index_to_name = {}
header = lines[0] # the first line is the header ("row\tcol\t...")
index = 1
table_data: List[Dict[str, Dict[str, str]]] = []
while lines[index][0] == "-1":
# column names start with fb:row.row.
current_line = lines[index]
column_name_sempre = current_line[2]
column_index = int(current_line[1])
column_name = column_name_sempre.replace("fb:row.row.", "")
column_index_to_name[column_index] = column_name
index += 1
column_name_type_mapping: Dict[str, Set[str]] = defaultdict(set)
last_row_index = -1
for current_line in lines[1:]:
row_index = int(current_line[0])
if row_index == -1:
continue # header row
column_index = int(current_line[1])
if row_index != last_row_index:
table_data.append({})
node_info = dict(zip(header, current_line))
cell_data: Dict[str, str] = {}
column_name = column_index_to_name[column_index]
if node_info["date"]:
column_name_type_mapping[column_name].add("date")
cell_data["date"] = node_info["date"]
if node_info["number"]:
column_name_type_mapping[column_name].add("number")
cell_data["number"] = node_info["number"]
if node_info["num2"]:
column_name_type_mapping[column_name].add("num2")
cell_data["num2"] = node_info["num2"]
if node_info["content"] != "—":
column_name_type_mapping[column_name].add("string")
cell_data["string"] = node_info["content"]
table_data[-1][column_name] = cell_data
last_row_index = row_index
return table_data, column_name_type_mapping
@classmethod
def get_table_data_from_untagged_lines(
cls, lines: List[List[str]]
) -> Tuple[List[Dict[str, Dict[str, str]]], Dict[str, Set[str]]]:
"""
This method will be called only when we do not have tagged information from CoreNLP. That is, when we are
running the parser on data outside the WikiTableQuestions dataset. We try to do the same processing that
CoreNLP does for WTQ, but what we do here may not be as effective.
"""
table_data: List[Dict[str, Dict[str, str]]] = []
column_index_to_name = {}
column_names = lines[0]
for column_index, column_name in enumerate(column_names):
normalized_name = cls.normalize_string(column_name)
column_index_to_name[column_index] = normalized_name
column_name_type_mapping: Dict[str, Set[str]] = defaultdict(set)
for row in lines[1:]:
table_data.append({})
for column_index, cell_value in enumerate(row):
column_name = column_index_to_name[column_index]
cell_data: Dict[str, str] = {}
# Interpret the content as a date.
try:
potential_date_string = str(Date.make_date(cell_value))
if potential_date_string != "-1":
# This means the string is a really a date.
cell_data["date"] = cell_value
column_name_type_mapping[column_name].add("date")
except ValueError:
pass
# Interpret the content as a number.
try:
float(cell_value)
cell_data["number"] = cell_value
column_name_type_mapping[column_name].add("number")
except ValueError:
pass
# Interpret the content as a range or a score to get number and num2 out.
if "-" in cell_value and len(cell_value.split("-")) == 2:
# This could be a number range or a score
cell_parts = cell_value.split("-")
try:
float(cell_parts[0])
float(cell_parts[1])
cell_data["number"] = cell_parts[0]
cell_data["num2"] = cell_parts[1]
column_name_type_mapping[column_name].add("number")
column_name_type_mapping[column_name].add("num2")
except ValueError:
pass
# Interpret the content as a string.
cell_data["string"] = cell_value
column_name_type_mapping[column_name].add("string")
table_data[-1][column_name] = cell_data
return table_data, column_name_type_mapping
@classmethod
def read_from_lines(cls, lines: List, question_tokens: List[Token]) -> "TableQuestionContext":
header = lines[0]
if isinstance(header, list) and header[:6] == [
"row",
"col",
"id",
"content",
"tokens",
"lemmaTokens",
]:
# These lines are from the tagged table file from the official dataset.
table_data, column_name_type_mapping = cls.get_table_data_from_tagged_lines(lines)
else:
# We assume that the lines are just the table data, with rows being newline separated, and columns
# being tab-separated.
rows = [line.split("\t") for line in lines] # type: ignore
table_data, column_name_type_mapping = cls.get_table_data_from_untagged_lines(rows)
# Each row is a mapping from column names to cell data. Cell data is a dict, where keys are
# "string", "number", "num2" and "date", and the values are the corresponding values
# extracted by CoreNLP.
# Table data with each column split into different ones, depending on the types they have.
table_data_with_column_types: List[Dict[str, CellValueType]] = []
all_column_names: Set[str] = set()
for table_row in table_data:
table_data_with_column_types.append({})
for column_name, cell_data in table_row.items():
for column_type in column_name_type_mapping[column_name]:
typed_column_name = f"{column_type}_column:{column_name}"
all_column_names.add(typed_column_name)
cell_value_string = cell_data.get(column_type, None)
if column_type in ["number", "num2"]:
try:
cell_number = float(cell_value_string)
except (ValueError, TypeError):
cell_number = None
table_data_with_column_types[-1][typed_column_name] = cell_number
elif column_type == "date":
cell_date = None
if cell_value_string is not None:
cell_date = Date.make_date(cell_value_string)
table_data_with_column_types[-1][typed_column_name] = cell_date
else:
if cell_value_string is None:
normalized_string = None
else:
normalized_string = cls.normalize_string(cell_value_string)
table_data_with_column_types[-1][typed_column_name] = normalized_string
return cls(
table_data_with_column_types,
column_name_type_mapping,
all_column_names,
question_tokens,
)
@classmethod
def read_from_file(cls, filename: str, question_tokens: List[Token]) -> "TableQuestionContext":
with open(filename, "r") as file_pointer:
reader = csv.reader(file_pointer, delimiter="\t", quoting=csv.QUOTE_NONE)
lines = [line for line in reader]
return cls.read_from_lines(lines, question_tokens)
def get_entities_from_question(self) -> Tuple[List[Tuple[str, str]], List[Tuple[str, int]]]:
entity_data = []
for i, token in enumerate(self.question_tokens):
token_text = token.text
if token_text in STOP_WORDS:
continue
normalized_token_text = self.normalize_string(token_text)
if not normalized_token_text:
continue
token_columns = self._string_in_table(normalized_token_text)
if token_columns:
# We need to keep track of the type of column this string occurs in. It is unlikely it occurs in
# columns of multiple types. So we just keep track of the first column type. Hence, the
# ``token_columns[0]``.
token_type = token_columns[0].split(":")[0].replace("_column", "")
entity_data.append(
{
"value": normalized_token_text,
"token_start": i,
"token_end": i + 1,
"token_type": token_type,
"token_in_columns": token_columns,
}
)
extracted_numbers = self._get_numbers_from_tokens(self.question_tokens)
# filter out number entities to avoid repetition
expanded_entities = []
for entity in self._expand_entities(self.question_tokens, entity_data):
if entity["token_type"] == "string":
expanded_entities.append((f"string:{entity['value']}", entity["token_in_columns"]))
return expanded_entities, extracted_numbers # TODO(shikhar) Handle conjunctions
@staticmethod
def _get_numbers_from_tokens(tokens: List[Token]) -> List[Tuple[str, int]]:
"""
Finds numbers in the input tokens and returns them as strings. We do some simple heuristic
number recognition, finding ordinals and cardinals expressed as text ("one", "first",
etc.), as well as numerals ("7th", "3rd"), months (mapping "july" to 7), and units
("1ghz").
We also handle year ranges expressed as decade or centuries ("1800s" or "1950s"), adding
the endpoints of the range as possible numbers to generate.
We return a list of tuples, where each tuple is the (number_string, token_index) for a
number found in the input tokens.
"""
numbers = []
for i, token in enumerate(tokens):
token_text = token.text
text = token.text.replace(",", "").lower()
number = float(NUMBER_WORDS[text]) if text in NUMBER_WORDS else None
magnitude = 1
if i < len(tokens) - 1:
next_token = tokens[i + 1].text.lower()
if next_token in ORDER_OF_MAGNITUDE_WORDS:
magnitude = ORDER_OF_MAGNITUDE_WORDS[next_token]
token_text += " " + tokens[i + 1].text
is_range = False
if len(text) > 1 and text[-1] == "s" and text[-2] == "0":
is_range = True
text = text[:-1]
# We strip out any non-digit characters, to capture things like '7th', or '1ghz'. The
# way we're doing this could lead to false positives for something like '1e2', but
# we'll take that risk. It shouldn't be a big deal.
text = "".join(text[i] for i, char in enumerate(text) if char in NUMBER_CHARACTERS)
try:
# We'll use a check for float(text) to find numbers, because text.isdigit() doesn't
# catch things like "-3" or "0.07".
number = float(text)
except ValueError:
pass
if number is not None:
number = number * magnitude
if "." in text:
number_string = "%.3f" % number
else:
number_string = "%d" % number
numbers.append((number_string, i))
if is_range:
# TODO(mattg): both numbers in the range will have the same text, and so the
# linking score won't have any way to differentiate them... We should figure
# out a better way to handle this.
num_zeros = 1
while text[-(num_zeros + 1)] == "0":
num_zeros += 1
numbers.append((str(int(number + 10**num_zeros)), i))
return numbers
def _string_in_table(self, candidate: str) -> List[str]:
"""
Checks if the string occurs in the table, and if it does, returns the names of the columns
under which it occurs. If it does not, returns an empty list.
"""
candidate_column_names: List[str] = []
# First check if the entire candidate occurs as a cell.
if candidate in self._string_column_mapping:
candidate_column_names = self._string_column_mapping[candidate]
# If not, check if it is a substring pf any cell value.
if not candidate_column_names:
for cell_value, column_names in self._string_column_mapping.items():
if candidate in cell_value:
candidate_column_names.extend(column_names)
candidate_column_names = list(set(candidate_column_names))
return candidate_column_names
def _process_conjunction(self, entity_data):
raise NotImplementedError
def _expand_entities(self, question, entity_data):
new_entities = []
for entity in entity_data:
# to ensure the same strings are not used over and over
if new_entities and entity["token_end"] <= new_entities[-1]["token_end"]:
continue
current_start = entity["token_start"]
current_end = entity["token_end"]
current_token = entity["value"]
current_token_type = entity["token_type"]
current_token_columns = entity["token_in_columns"]
while current_end < len(question):
next_token = question[current_end].text
next_token_normalized = self.normalize_string(next_token)
if next_token_normalized == "":
current_end += 1
continue
candidate = "%s_%s" % (current_token, next_token_normalized)
candidate_columns = self._string_in_table(candidate)
candidate_columns = list(set(candidate_columns).intersection(current_token_columns))
if not candidate_columns:
break
candidate_type = candidate_columns[0].split(":")[0].replace("_column", "")
if candidate_type != current_token_type:
break
current_end += 1
current_token = candidate
current_token_columns = candidate_columns
new_entities.append(
{
"token_start": current_start,
"token_end": current_end,
"value": current_token,
"token_type": current_token_type,
"token_in_columns": current_token_columns,
}
)
return new_entities
@staticmethod
def normalize_string(string: str) -> str:
"""
These are the transformation rules used to normalize cell in column names in Sempre. See
``edu.stanford.nlp.sempre.tables.StringNormalizationUtils.characterNormalize`` and
``edu.stanford.nlp.sempre.tables.TableTypeSystem.canonicalizeName``. We reproduce those
rules here to normalize and canonicalize cells and columns in the same way so that we can
match them against constants in logical forms appropriately.
"""
# Normalization rules from Sempre
# \u201A -> ,
string = re.sub("‚", ",", string)
string = re.sub("„", ",,", string)
string = re.sub("[·・]", ".", string)
string = re.sub("…", "...", string)
string = re.sub("ˆ", "^", string)
string = re.sub("˜", "~", string)
string = re.sub("‹", "<", string)
string = re.sub("›", ">", string)
string = re.sub("[‘’´`]", "'", string)
string = re.sub("[“”«»]", '"', string)
string = re.sub("[•†‡²³]", "", string)
string = re.sub("[‐‑–—−]", "-", string)
# Oddly, some unicode characters get converted to _ instead of being stripped. Not really
# sure how sempre decides what to do with these... TODO(mattg): can we just get rid of the
# need for this function somehow? It's causing a whole lot of headaches.
string = re.sub("[ðø′″€⁄ªΣ]", "_", string)
# This is such a mess. There isn't just a block of unicode that we can strip out, because
# sometimes sempre just strips diacritics... We'll try stripping out a few separate
# blocks, skipping the ones that sempre skips...
string = re.sub("[\\u0180-\\u0210]", "", string).strip()
string = re.sub("[\\u0220-\\uFFFF]", "", string).strip()
string = string.replace("\\n", "_")
string = re.sub("\\s+", " ", string)
# Canonicalization rules from Sempre.
string = re.sub("[^\\w]", "_", string)
string = re.sub("_+", "_", string)
string = re.sub("_$", "", string)
return unidecode(string.lower())
| allennlp-semparse-master | allennlp_semparse/common/wikitables/table_question_context.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is the official evaluator taken from the original dataset. I made minimal changes to make it
Python 3 compatible, and conform to our style guidelines.
"""
# Official Evaluator for WikiTableQuestions Dataset
#
# There are 3 value types
# 1. String (unicode)
# 2. Number (float)
# 3. Date (a struct with 3 fields: year, month, and date)
# Some fields (but not all) can be left unspecified. However, if only the year
# is specified, the date is automatically converted into a number.
#
# Target denotation is a set of items
# - Each item T is a raw unicode string from Mechanical Turk
# - If T can be converted to a number or date (via Stanford CoreNLP), the converted value is precomputed
#
# Predicted denotation is a set of items
# - Each item P is a string, a number, or a date
# - If P is read from a text file, assume the following
# - A string that can be converted into a number (float) is converted into a number
# - A string of the form "yyyy-mm-dd" is converted into a date. Unspecified fields can be marked as
# "xx". For example, "xx-01-02" represents the date January 2nd of an unknown year.
# - Otherwise, it is kept as a string
#
# The predicted denotation is correct if
# 1. The sizes of the target denotation and the predicted denotation are equal
# 2. Each item in the target denotation matches an item in the predicted denotation
#
# A target item T matches a predicted item P if one of the following is true:
# 1. normalize(raw string of T) and normalize(string form of P) are identical.
# The normalize method performs the following normalizations on strings:
# - Remove diacritics (é → e)
# - Convert smart quotes (‘’´`“”) and dashes (‐‑‒–—−) into ASCII ones
# - Remove citations (trailing •♦†‡*#+ or [...])
# - Remove details in parenthesis (trailing (...))
# - Remove outermost quotation marks
# - Remove trailing period (.)
# - Convert to lowercase
# - Collapse multiple whitespaces and strip outermost whitespaces
# 2. T can be interpreted as a number T_N, P is a number, and P = T_N
# 3. T can be interpreted as a date T_D, P is a date, and P = T_D
# (exact match on all fields; e.g., xx-01-12 and 1990-01-12 do not match)
__version__ = "1.0.2"
import sys
import os
import re
import argparse
import unicodedata
from codecs import open as codecs_open
from math import isnan, isinf
from abc import ABCMeta, abstractmethod
################ String Normalization ################
def normalize(x):
# Remove diacritics
x = "".join(c for c in unicodedata.normalize("NFKD", x) if unicodedata.category(c) != "Mn")
# Normalize quotes and dashes
x = re.sub(r"[‘’´`]", "'", x)
x = re.sub(r"[“”]", '"', x)
x = re.sub(r"[‐‑‒–—−]", "-", x)
while True:
old_x = x
# Remove citations
x = re.sub(r"((?<!^)\[[^\]]*\]|\[\d+\]|[•♦†‡*#+])*$", "", x.strip())
# Remove details in parenthesis
x = re.sub(r"(?<!^)( \([^)]*\))*$", "", x.strip())
# Remove outermost quotation mark
x = re.sub(r'^"([^"]*)"$', r"\1", x.strip())
if x == old_x:
break
# Remove final '.'
if x and x[-1] == ".":
x = x[:-1]
# Collapse whitespaces and convert to lower case
x = re.sub(r"\s+", " ", x, flags=re.U).lower().strip()
return x
################ Value Types ################
class Value:
__metaclass__ = ABCMeta
# Should be populated with the normalized string
_normalized = None
@abstractmethod
def match(self, other):
"""Return True if the value matches the other value.
Args:
other (Value)
Returns:
a boolean
"""
pass
@property
def normalized(self):
return self._normalized
class StringValue(Value):
def __init__(self, content):
self._normalized = normalize(content)
self._hash = hash(self._normalized)
def __eq__(self, other):
return isinstance(other, StringValue) and self.normalized == other.normalized
def __hash__(self):
return self._hash
def __str__(self):
return "S" + str([self.normalized])
__repr__ = __str__
def match(self, other):
assert isinstance(other, Value)
return self.normalized == other.normalized
class NumberValue(Value):
def __init__(self, amount, original_string=None):
assert isinstance(amount, (int, float))
if abs(amount - round(amount)) < 1e-6:
self._amount = int(amount)
else:
self._amount = float(amount)
if not original_string:
self._normalized = str(self._amount)
else:
self._normalized = normalize(original_string)
self._hash = hash(self._amount)
@property
def amount(self):
return self._amount
def __eq__(self, other):
return isinstance(other, NumberValue) and self.amount == other.amount
def __hash__(self):
return self._hash
def __str__(self):
return ("N(%f)" % self.amount) + str([self.normalized])
__repr__ = __str__
def match(self, other):
assert isinstance(other, Value)
if self.normalized == other.normalized:
return True
if isinstance(other, NumberValue):
return abs(self.amount - other.amount) < 1e-6
return False
@staticmethod
def parse(text):
"""Try to parse into a number.
Return:
the number (int or float) if successful; otherwise None.
"""
try:
return int(text)
except ValueError:
try:
amount = float(text)
assert not isnan(amount) and not isinf(amount)
return amount
except (ValueError, AssertionError):
return None
class DateValue(Value):
def __init__(self, year, month, day, original_string=None):
"""Create a new DateValue. Placeholders are marked as -1."""
assert isinstance(year, int)
assert isinstance(month, int) and (month == -1 or 1 <= month <= 12)
assert isinstance(day, int) and (day == -1 or 1 <= day <= 31)
assert not year == month == day == -1
self._year = year
self._month = month
self._day = day
if not original_string:
self._normalized = "{}-{}-{}".format(
year if year != -1 else "xx",
month if month != -1 else "xx",
day if day != "-1" else "xx",
)
else:
self._normalized = normalize(original_string)
self._hash = hash((self._year, self._month, self._day))
@property
def ymd(self):
return (self._year, self._month, self._day)
def __eq__(self, other):
return isinstance(other, DateValue) and self.ymd == other.ymd
def __hash__(self):
return self._hash
def __str__(self):
return ("D(%d,%d,%d)" % (self._year, self._month, self._day)) + str([self._normalized])
__repr__ = __str__
def match(self, other):
assert isinstance(other, Value)
if self.normalized == other.normalized:
return True
if isinstance(other, DateValue):
return self.ymd == other.ymd
return False
@staticmethod
def parse(text):
"""Try to parse into a date.
Return:
tuple (year, month, date) if successful; otherwise None.
"""
try:
ymd = text.lower().split("-")
assert len(ymd) == 3
year = -1 if ymd[0] in ("xx", "xxxx") else int(ymd[0])
month = -1 if ymd[1] == "xx" else int(ymd[1])
day = -1 if ymd[2] == "xx" else int(ymd[2])
assert not year == month == day == -1
assert month == -1 or 1 <= month <= 12
assert day == -1 or 1 <= day <= 31
return (year, month, day)
except (ValueError, AssertionError):
return None
################ Value Instantiation ################
def to_value(original_string, corenlp_value=None):
"""Convert the string to Value object.
Args:
original_string (basestring): Original string
corenlp_value (basestring): Optional value returned from CoreNLP
Returns:
Value
"""
if isinstance(original_string, Value):
# Already a Value
return original_string
if not corenlp_value:
corenlp_value = original_string
# Number?
amount = NumberValue.parse(corenlp_value)
if amount is not None:
return NumberValue(amount, original_string)
# Date?
ymd = DateValue.parse(corenlp_value)
if ymd is not None:
if ymd[1] == ymd[2] == -1:
return NumberValue(ymd[0], original_string)
else:
return DateValue(ymd[0], ymd[1], ymd[2], original_string)
# String.
return StringValue(original_string)
def to_value_list(original_strings, corenlp_values=None):
"""Convert a list of strings to a list of Values
Args:
original_strings (list[basestring])
corenlp_values (list[basestring or None])
Returns:
list[Value]
"""
assert isinstance(original_strings, (list, tuple, set))
if corenlp_values is not None:
assert isinstance(corenlp_values, (list, tuple, set))
assert len(original_strings) == len(corenlp_values)
return list(set(to_value(x, y) for (x, y) in zip(original_strings, corenlp_values)))
else:
return list(set(to_value(x) for x in original_strings))
################ Check the Predicted Denotations ################
def check_denotation(target_values, predicted_values):
"""Return True if the predicted denotation is correct.
Args:
target_values (list[Value])
predicted_values (list[Value])
Returns:
bool
"""
# Check size
if len(target_values) != len(predicted_values):
return False
# Check items
for target in target_values:
if not any(target.match(pred) for pred in predicted_values):
return False
return True
################ Batch Mode ################
def tsv_unescape(x):
"""
Unescape strings in the TSV file.
Escaped characters include:
- newline (0x10) -> backslash + n
- vertical bar (0x7C) -> backslash + p
- backslash (0x5C) -> backslash + backslash
Parameters
----------
x : ``str``
Returns
-------
``str``
"""
return x.replace(r"\n", "\n").replace(r"\p", "|").replace("\\\\", "\\")
def tsv_unescape_list(x):
"""Unescape a list in the TSV file.
List items are joined with vertical bars (0x5C)
Args:
x (str or unicode)
Returns:
a list of unicodes
"""
return [tsv_unescape(y) for y in x.split("|")]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-t",
"--tagged-dataset-path",
default=os.path.join(".", "tagged", "data"),
help="Directory containing CoreNLP-tagged dataset TSV file",
)
parser.add_argument(
"prediction_path",
help="Path to the prediction file. Each line contains "
"ex_id <tab> item1 <tab> item2 <tab> ...",
)
args = parser.parse_args()
# ID string --> list[Value]
target_values_map = {}
for filename in os.listdir(args.tagged_dataset_path):
filename = os.path.join(args.tagged_dataset_path, filename)
print("Reading dataset from", filename, file=sys.stderr)
with codecs_open(filename, "r", "utf8") as fin:
header = fin.readline().rstrip("\n").split("\t")
for line in fin:
stuff = dict(zip(header, line.rstrip("\n").split("\t")))
ex_id = stuff["id"]
original_strings = tsv_unescape_list(stuff["targetValue"])
canon_strings = tsv_unescape_list(stuff["targetCanon"])
target_values_map[ex_id] = to_value_list(original_strings, canon_strings)
print("Read", len(target_values_map), "examples", file=sys.stderr)
print("Reading predictions from", args.prediction_path, file=sys.stderr)
num_examples, num_correct = 0, 0
with codecs_open(args.prediction_path, "r", "utf8") as fin:
for line in fin:
line = line.rstrip("\n").split("\t")
ex_id = line[0]
if ex_id not in target_values_map:
print('WARNING: Example ID "%s" not found' % ex_id)
else:
target_values = target_values_map[ex_id]
predicted_values = to_value_list(line[1:])
correct = check_denotation(target_values, predicted_values)
print("%s\t%s\t%s\t%s" % (ex_id, correct, target_values, predicted_values))
num_examples += 1
if correct:
num_correct += 1
print("Examples:", num_examples, file=sys.stderr)
print("Correct:", num_correct, file=sys.stderr)
print("Accuracy:", round((num_correct + 1e-9) / (num_examples + 1e-9), 4), file=sys.stderr)
if __name__ == "__main__":
main()
| allennlp-semparse-master | allennlp_semparse/common/wikitables/wikitables_evaluator.py |
from allennlp_semparse.common.wikitables.table_question_context import (
TableQuestionContext,
CellValueType,
)
| allennlp-semparse-master | allennlp_semparse/common/wikitables/__init__.py |
"""
Utility functions for reading the standardised text2sql datasets presented in
`"Improving Text to SQL Evaluation Methodology" <https://arxiv.org/abs/1806.09029>`_
"""
from typing import List, Dict, NamedTuple, Iterable, Tuple, Set
from collections import defaultdict
from allennlp.common import JsonDict
class SqlData(NamedTuple):
"""
A utility class for reading in text2sql data.
Parameters
----------
text : ``List[str]``
The tokens in the text of the query.
text_with_variables : ``List[str]``
The tokens in the text of the query with variables
mapped to table names/abstract variables.
variable_tags : ``List[str]``
Labels for each word in ``text`` which correspond to
which variable in the sql the token is linked to. "O"
is used to denote no tag.
sql : ``List[str]``
The tokens in the SQL query which corresponds to the text.
text_variables : ``Dict[str, str]``
A dictionary of variables associated with the text, e.g. {"city_name0": "san fransisco"}
sql_variables : ``Dict[str, Dict[str, str]]``
A dictionary of variables and column references associated with the sql query.
"""
text: List[str]
text_with_variables: List[str]
variable_tags: List[str]
sql: List[str]
text_variables: Dict[str, str]
sql_variables: Dict[str, Dict[str, str]]
class TableColumn(NamedTuple):
name: str
column_type: str
is_primary_key: bool
def column_has_string_type(column: TableColumn) -> bool:
if "varchar" in column.column_type:
return True
elif column.column_type == "text":
return True
elif column.column_type == "longtext":
return True
return False
def column_has_numeric_type(column: TableColumn) -> bool:
if "int" in column.column_type:
return True
elif "float" in column.column_type:
return True
elif "double" in column.column_type:
return True
return False
def replace_variables(
sentence: List[str], sentence_variables: Dict[str, str]
) -> Tuple[List[str], List[str]]:
"""
Replaces abstract variables in text with their concrete counterparts.
"""
tokens = []
tags = []
for token in sentence:
if token not in sentence_variables:
tokens.append(token)
tags.append("O")
else:
for word in sentence_variables[token].split():
tokens.append(word)
tags.append(token)
return tokens, tags
def split_table_and_column_names(table: str) -> Iterable[str]:
partitioned = [x for x in table.partition(".") if x != ""]
# Avoid splitting decimal strings.
if partitioned[0].isnumeric() and partitioned[-1].isnumeric():
return [table]
return partitioned
def clean_and_split_sql(sql: str) -> List[str]:
"""
Cleans up and unifies a SQL query. This involves unifying quoted strings
and splitting brackets which aren't formatted consistently in the data.
"""
sql_tokens: List[str] = []
for token in sql.strip().split():
token = token.replace('"', "'").replace("%", "")
if token.endswith("(") and len(token) > 1:
sql_tokens.extend(split_table_and_column_names(token[:-1]))
sql_tokens.extend(split_table_and_column_names(token[-1]))
else:
sql_tokens.extend(split_table_and_column_names(token))
return sql_tokens
def resolve_primary_keys_in_schema(
sql_tokens: List[str], schema: Dict[str, List[TableColumn]]
) -> List[str]:
"""
Some examples in the text2sql datasets use ID as a column reference to the
column of a table which has a primary key. This causes problems if you are trying
to constrain a grammar to only produce the column names directly, because you don't
know what ID refers to. So instead of dealing with that, we just replace it.
"""
primary_keys_for_tables = {
name: max(columns, key=lambda x: x.is_primary_key).name for name, columns in schema.items()
}
resolved_tokens = []
for i, token in enumerate(sql_tokens):
if i > 2:
table_name = sql_tokens[i - 2]
if token == "ID" and table_name in primary_keys_for_tables.keys():
token = primary_keys_for_tables[table_name]
resolved_tokens.append(token)
return resolved_tokens
def clean_unneeded_aliases(sql_tokens: List[str]) -> List[str]:
unneeded_aliases = {}
previous_token = sql_tokens[0]
for (token, next_token) in zip(sql_tokens[1:-1], sql_tokens[2:]):
if token == "AS" and previous_token is not None:
# Check to see if the table name without the alias
# is the same.
table_name = next_token[:-6]
if table_name == previous_token:
# If so, store the mapping as a replacement.
unneeded_aliases[next_token] = previous_token
previous_token = token
dealiased_tokens: List[str] = []
for token in sql_tokens:
new_token = unneeded_aliases.get(token, None)
if new_token is not None and dealiased_tokens[-1] == "AS":
dealiased_tokens.pop()
continue
elif new_token is None:
new_token = token
dealiased_tokens.append(new_token)
return dealiased_tokens
def read_dataset_schema(schema_path: str) -> Dict[str, List[TableColumn]]:
"""
Reads a schema from the text2sql data, returning a dictionary
mapping table names to their columns and respective types.
This handles columns in an arbitrary order and also allows
either ``{Table, Field}`` or ``{Table, Field} Name`` as headers,
because both appear in the data. It also uppercases table and
column names if they are not already uppercase.
Parameters
----------
schema_path : ``str``, required.
The path to the csv schema.
Returns
-------
A dictionary mapping table names to typed columns.
"""
schema: Dict[str, List[TableColumn]] = defaultdict(list)
for i, line in enumerate(open(schema_path, "r")):
if i == 0:
header = [x.strip() for x in line.split(",")]
elif line[0] == "-":
continue
else:
data = {key: value for key, value in zip(header, [x.strip() for x in line.split(",")])}
table = data.get("Table Name", None) or data.get("Table")
column = data.get("Field Name", None) or data.get("Field")
is_primary_key = data.get("Primary Key") == "y"
schema[table.upper()].append(TableColumn(column.upper(), data["Type"], is_primary_key))
return {**schema}
def process_sql_data(
data: List[JsonDict],
use_all_sql: bool = False,
use_all_queries: bool = False,
remove_unneeded_aliases: bool = False,
schema: Dict[str, List[TableColumn]] = None,
) -> Iterable[SqlData]:
"""
A utility function for reading in text2sql data. The blob is
the result of loading the json from a file produced by the script
``scripts/reformat_text2sql_data.py``.
Parameters
----------
data : ``JsonDict``
use_all_sql : ``bool``, optional (default = False)
Whether to use all of the sql queries which have identical semantics,
or whether to just use the first one.
use_all_queries : ``bool``, (default = False)
Whether or not to enforce query sentence uniqueness. If false,
duplicated queries will occur in the dataset as separate instances,
as for a given SQL query, not only are there multiple queries with
the same template, but there are also duplicate queries.
remove_unneeded_aliases : ``bool``, (default = False)
The text2sql data by default creates alias names for `all` tables,
regardless of whether the table is derived or if it is identical to
the original (e.g SELECT TABLEalias0.COLUMN FROM TABLE AS TABLEalias0).
This is not necessary and makes the action sequence and grammar manipulation
much harder in a grammar based decoder. Note that this does not
remove aliases which are legitimately required, such as when a new
table is formed by performing operations on the original table.
schema : ``Dict[str, List[TableColumn]]``, optional, (default = None)
A schema to resolve primary keys against. Converts 'ID' column names
to their actual name with respect to the Primary Key for the table
in the schema.
"""
for example in data:
seen_sentences: Set[str] = set()
for sent_info in example["sentences"]:
# Loop over the different sql statements with "equivalent" semantics
for sql in example["sql"]:
text_with_variables = sent_info["text"].strip().split()
text_vars = sent_info["variables"]
query_tokens, tags = replace_variables(text_with_variables, text_vars)
if not use_all_queries:
key = " ".join(query_tokens)
if key in seen_sentences:
continue
else:
seen_sentences.add(key)
sql_tokens = clean_and_split_sql(sql)
if remove_unneeded_aliases:
sql_tokens = clean_unneeded_aliases(sql_tokens)
if schema is not None:
sql_tokens = resolve_primary_keys_in_schema(sql_tokens, schema)
sql_variables = {}
for variable in example["variables"]:
sql_variables[variable["name"]] = {
"text": variable["example"],
"type": variable["type"],
}
sql_data = SqlData(
text=query_tokens,
text_with_variables=text_with_variables,
variable_tags=tags,
sql=sql_tokens,
text_variables=text_vars,
sql_variables=sql_variables,
)
yield sql_data
# Some questions might have multiple equivalent SQL statements.
# By default, we just use the first one. TODO(Mark): Use the shortest?
if not use_all_sql:
break
| allennlp-semparse-master | allennlp_semparse/common/sql/text2sql_utils.py |
allennlp-semparse-master | allennlp_semparse/common/sql/__init__.py |
|
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, Union
import inspect
import logging
import sys
import traceback
import types
from nltk import Tree
from allennlp.common.util import START_SYMBOL
from allennlp_semparse.common import util, ParsingError, ExecutionError
logger = logging.getLogger(__name__)
# We rely heavily on the typing module and its type annotations for our grammar induction code.
# Unfortunately, the behavior of the typing module changed somewhat substantially between python
# 3.6 and 3.7, so we need to do some gymnastics to get some of our checks to work with both.
# That's what these three methods are about.
def is_callable(type_: Type) -> bool:
if sys.version_info < (3, 7):
from typing import CallableMeta # type: ignore
return isinstance(type_, CallableMeta) # type: ignore
else:
return getattr(type_, "_name", None) == "Callable"
def is_generic(type_: Type) -> bool:
if sys.version_info < (3, 7):
from typing import GenericMeta # type: ignore
return isinstance(type_, GenericMeta) # type: ignore
else:
from typing import _GenericAlias # type: ignore
return isinstance(type_, _GenericAlias) # type: ignore
def get_generic_name(type_: Type) -> str:
if sys.version_info < (3, 7):
origin = type_.__origin__.__name__
else:
# In python 3.7, type_.__origin__ switched to the built-in class, instead of the typing
# class.
origin = type_._name
args = type_.__args__
return f'{origin}[{",".join(arg.__name__ for arg in args)}]'
def infer_collection_type(collection: Any) -> Type:
instance_types = set([type(instance) for instance in collection])
if len(instance_types) != 1:
raise ValueError(f"Inconsistent types in collection: {instance_types}, {collection}")
subtype = list(instance_types)[0]
if isinstance(collection, list):
return List[subtype] # type: ignore
elif isinstance(collection, set):
return Set[subtype] # type: ignore
else:
raise ValueError(f"Unsupported top-level generic type: {collection}")
class PredicateType:
"""
A base class for `types` in a domain language. This serves much the same purpose as
``typing.Type``, but we add a few conveniences to these types, so we construct separate classes
for them and group them together under ``PredicateType`` to have a good type annotation for
these types.
"""
@staticmethod
def get_type(type_: Type) -> "PredicateType":
"""
Converts a python ``Type`` (as you might get from a type annotation) into a
``PredicateType``. If the ``Type`` is callable, this will return a ``FunctionType``;
otherwise, it will return a ``BasicType``.
``BasicTypes`` have a single ``name`` parameter - we typically get this from
``type_.__name__``. This doesn't work for generic types (like ``List[str]``), so we handle
those specially, so that the ``name`` for the ``BasicType`` remains ``List[str]``, as you
would expect.
"""
if is_callable(type_):
callable_args = type_.__args__
argument_types = [PredicateType.get_type(t) for t in callable_args[:-1]]
return_type = PredicateType.get_type(callable_args[-1])
return FunctionType(argument_types, return_type)
elif is_generic(type_):
# This is something like List[int]. type_.__name__ doesn't do the right thing (and
# crashes in python 3.7), so we need to do some magic here.
name = get_generic_name(type_)
else:
name = type_.__name__
return BasicType(name)
@staticmethod
def get_function_type(
arg_types: Sequence["PredicateType"], return_type: "PredicateType"
) -> "PredicateType":
"""
Constructs an NLTK ``ComplexType`` representing a function with the given argument and
return types.
"""
# TODO(mattg): We might need to generalize this to just `get_type`, so we can handle
# functions as arguments correctly in the logic below.
if not arg_types:
# Functions with no arguments are basically constants whose type match their return
# type.
return return_type
return FunctionType(arg_types, return_type)
class BasicType(PredicateType):
"""
A ``PredicateType`` representing a zero-argument predicate (which could technically be a
function with no arguments or a constant; both are treated the same here).
"""
def __init__(self, name: str) -> None:
self.name = name
def __repr__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.name == other.name
return NotImplemented
class FunctionType(PredicateType):
"""
A ``PredicateType`` representing a function with arguments. When seeing this as a string, it
will be in angle brackets, with argument types separated by commas, and the return type
separated from argument types with a colon. For example, ``def f(a: str) -> int:`` would look
like ``<str:int>``, and ``def g(a: int, b: int) -> int`` would look like ``<int,int:int>``.
"""
def __init__(self, argument_types: Sequence[PredicateType], return_type: PredicateType) -> None:
self.argument_types = argument_types
self.return_type = return_type
self.name = f'<{",".join(str(arg) for arg in argument_types)}:{return_type}>'
def __repr__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.name == other.name
return NotImplemented
def predicate(function: Callable) -> Callable:
"""
This is intended to be used as a decorator when you are implementing your ``DomainLanguage``.
This marks a function on a ``DomainLanguage`` subclass as a predicate that can be used in the
language. See the :class:`DomainLanguage` docstring for an example usage, and for what using
this does.
"""
setattr(function, "_is_predicate", True)
return function
def predicate_with_side_args(side_arguments: List[str]) -> Callable:
"""
Like :func:`predicate`, but used when some of the arguments to the function are meant to be
provided by the decoder or other state, instead of from the language. For example, you might
want to have a function use the decoder's attention over some input text when a terminal was
predicted. That attention won't show up in the language productions. Use this decorator, and
pass in the required state to :func:`DomainLanguage.execute_action_sequence`, if you need to
ignore some arguments when doing grammar induction.
In order for this to work out, the side arguments `must` be after any non-side arguments. This
is because we use ``*args`` to pass the non-side arguments, and ``**kwargs`` to pass the side
arguments, and python requires that ``*args`` be before ``**kwargs``.
"""
def decorator(function: Callable) -> Callable:
setattr(function, "_side_arguments", side_arguments)
return predicate(function)
return decorator
def nltk_tree_to_logical_form(tree: Tree) -> str:
"""
Given an ``nltk.Tree`` representing the syntax tree that generates a logical form, this method
produces the actual (lisp-like) logical form, with all of the non-terminal symbols converted
into the correct number of parentheses.
This is used in the logic that converts action sequences back into logical forms. It's very
unlikely that you will need this anywhere else.
"""
# nltk.Tree actually inherits from `list`, so you use `len()` to get the number of children.
# We're going to be explicit about checking length, instead of using `if tree:`, just to avoid
# any funny business nltk might have done (e.g., it's really odd if `if tree:` evaluates to
# `False` if there's a single leaf node with no children).
if len(tree) == 0:
return tree.label()
if len(tree) == 1:
return tree[0].label()
return "(" + " ".join(nltk_tree_to_logical_form(child) for child in tree) + ")"
class DomainLanguage:
"""
A ``DomainLanguage`` specifies the functions available to use for a semantic parsing task. You
write execution code for these functions, and we will automatically induce a grammar from those
functions and give you a lisp interpreter that can use those functions. For example:
.. code-block:: python
class Arithmetic(DomainLanguage):
@predicate
def add(self, num1: int, num2: int) -> int:
return num1 + num2
@predicate
def halve(self, num: int) -> int:
return num / 2
...
Instantiating this class now gives you a language object that can parse and execute logical
forms, can convert logical forms to action sequences (linearized abstract syntax trees) and
back again, and can list all valid production rules in a grammar induced from the specified
functions.
.. code-block:: python
>>> l = Arithmetic()
>>> l.execute("(add 2 3)")
5
>>> l.execute("(halve (add 12 4))")
8
>>> l.logical_form_to_action_sequence("(add 2 3)")
# See the docstring for this function for an description of what these strings mean.
['@start@ -> int', 'int -> [<int,int:int>, int, int]', '<int,int:int> -> add',
'int -> 2', 'int -> 3']
>>> l.action_sequence_to_logical_form(l.logical_form_to_action_sequence('(add 2 3)'))
'(add 2 3)'
>>> l.get_nonterminal_productions()
{'<int,int:int>': ['add', 'divide', 'multiply', 'subtract'], '<int:int>': ['halve'], ...}
This is done with some reflection magic, with the help of the ``@predicate`` decorator and type
annotations. For a method you define on a ``DomainLanguage`` subclass to be included in the
language, it *must* be decorated with ``@predicate``, and it *must* have type annotations on
all arguments and on its return type. You can also add predicates and constants to the
language using the :func:`add_predicate` and :func:`add_constant` functions, if you choose
(minor point: constants with generic types (like ``Set[int]``) must currently be specified as
predicates, as the ``allowed_constants`` dictionary doesn't pass along the generic type
information).
By default, the language we construct is purely functional - no defining variables or using
lambda functions, or anything like that. There are options to allow two extensions to the
default language behavior, which together allow for behavior that is essentially equivalent to
lambda functions: (1) function currying, and (2) function composition. Currying is still
functional, but allows only giving some of the arguments to a function, with a functional return
type. For example, if you allow currying, you can convert a two-argument function like
``(multiply 4 5)`` into a one-argument function like ``(multiply 4)`` (which would then multiply
its single argument by 4). Without being able to save variables, currying isn't `that` useful,
so it is not enabled by default, but it can be used in conjunction with function composition to
get the behavior of lambda functions without needing to explicitly deal with lambdas. Function
composition calls two functions in succession, passing the output of one as the input to
another. The difference between this and regular nested function calls is that it happens
`outside` the nesting, so the input type of the outer function is the input type of the first
function, not the second, as would be the case with nesting. As a simple example, with function
composition you could change the nested expression ``(sum (list1 8))`` into the equivalent
expression ``((* sum list1) 8)``. As a more useful example, consider taking an argmax over a
list: ``(argmax (list3 5 9 2) sin)``, where this will call the ``sin`` function on each element
of the list and return the element with the highest value. If you want a more complex function
when computing a value, say ``sin(3x)``, this would typically be done with lambda functions. We
can accomplish this with currying and function composition: ``(argmax (list3 5 9 2) (* sin
(mutiply 3)))``. In this way we do not need to introduce variables into the language, which are
tricky from a modeling perspective. All of the actual terminal productions in this version
should have a reasonably strong correspondence with the words in the input utterance.
Two important notes on currying and composition: first, in order to perform type inference on
curried functions (to know which argument is being ommitted), we currently rely on `executing`
the subexpressions. This should be ok for simple, determinstic languages, but this is very much
not recommended for things like NMNs at this point. We'd need to implement smarter type
inference for that to work. Second, the grammar induction that we do for currying and
composition is very permissive and quite likely overgenerates productions. If you use this, you
probably should double check all productions that were induced and make sure you really want
them in your grammar, manually removing any that you don't want in your subclass after the
grammar induction step (i.e., in your constructor, after calling `super().__init__()` and
`self.get_nonterminal_productions()`, modify `self._nonterminal_productions` directly).
We have rudimentary support for class hierarchies in the types that you provide. This is done
through adding constants multiple times with different types. For example, say you have a
``Column`` class with ``NumberColumn`` and ``StringColumn`` subclasses. You can have functions
that take the base class ``Column`` as an argument, and other functions that take the
subclasses. These will get types like ``<List[Row],Column:List[str]>`` (for a "select"
function that returns whatever cell text is in that column for the given rows), and
``<List[Row],NumberColumn,Number:List[Row]>`` (for a "greater_than" function that returns rows
with a value in the column greater than the given number). These will generate argument types
of ``Column`` and ``NumberColumn``, respectively. ``NumberColumn`` is a subclass of
``Column``, so we want the ``Column`` production to include all ``NumberColumns`` as options.
This is done by calling ``add_constant()`` with each ``NumberColumn`` twice: once without a
``type_`` argument (which infers the type as ``NumberColumn``), and once with ``type_=Column``.
You can see a concrete example of how this works in the
:class:`~allennlp_semparse.domain_languages.wikitables_language.WikiTablesLanguage`.
Parameters
----------
allowed_constants : ``Dict[str, Any]``, optional (default=None)
If given, we add all items in this dictionary as constants (instances of non-functional
types) in the language. You can also add them manually by calling ``add_constant`` in the
constructor of your ``DomainLanguage``.
start_types : ``Set[Type]``, optional (default=None)
If given, we will constrain the set of start types in the grammar to be this set.
Otherwise, we allow any type that we can get as a return type in the functions in the
language.
allow_function_currying : ``bool``, optional (default=False)
If ``True``, we will add production rules to the grammar (and support in function execution,
etc.) to curry all two-or-more-argument functions into one-argument functions. See the
above for a discussion of what this means and when you might want to do it. If you set this
to ``True``, you likely also want to set ``allow_function_composition`` to ``True``.
allow_function_composition : ``bool``, optional (default=False)
If ``True``, function composition as described above will be enabled in the language,
including support for parsing expressions with function composition, for executing these
expressions, and for converting them to and from action sequences. If you set this to
``True``, you likely also want to set ``allow_function_composition`` to ``True``.
"""
def __init__(
self,
allowed_constants: Dict[str, Any] = None,
start_types: Set[Type] = None,
allow_function_currying: bool = False,
allow_function_composition: bool = False,
) -> None:
self._allow_currying = allow_function_currying
self._allow_composition = allow_function_composition
self._functions: Dict[str, Callable] = {}
self._function_types: Dict[str, List[PredicateType]] = defaultdict(list)
self._start_types: Set[PredicateType] = {
PredicateType.get_type(type_) for type_ in start_types
}
for name in dir(self):
if isinstance(getattr(self, name), types.MethodType):
function = getattr(self, name)
if getattr(function, "_is_predicate", False):
side_arguments = getattr(function, "_side_arguments", None)
self.add_predicate(name, function, side_arguments)
if allowed_constants:
for name, value in allowed_constants.items():
self.add_constant(name, value)
# Caching this to avoid recomputing it every time `get_nonterminal_productions` is called.
self._nonterminal_productions: Dict[str, List[str]] = None
def execute(self, logical_form: str):
"""Executes a logical form, using whatever predicates you have defined."""
if not hasattr(self, "_functions"):
raise RuntimeError("You must call super().__init__() in your Language constructor")
logical_form = logical_form.replace(",", " ")
expression = util.lisp_to_nested_expression(logical_form)
return self._execute_expression(expression)
def execute_action_sequence(
self, action_sequence: List[str], side_arguments: List[Dict] = None
):
"""
Executes the program defined by an action sequence directly, without needing the overhead
of translating to a logical form first. For any given program, :func:`execute` and this
function are equivalent, they just take different representations of the program, so you
can use whichever is more efficient.
Also, if you have state or side arguments associated with particular production rules
(e.g., the decoder's attention on an input utterance when a predicate was predicted), you
`must` use this function to execute the logical form, instead of :func:`execute`, so that
we can match the side arguments with the right functions.
"""
# We'll strip off the first action, because it doesn't matter for execution.
first_action = action_sequence[0]
left_side = first_action.split(" -> ")[0]
if left_side != "@start@":
raise ExecutionError("invalid action sequence")
remaining_actions = action_sequence[1:]
remaining_side_args = side_arguments[1:] if side_arguments else None
return self._execute_sequence(remaining_actions, remaining_side_args)[0]
def get_nonterminal_productions(self) -> Dict[str, List[str]]:
"""
Induces a grammar from the defined collection of predicates in this language and returns
all productions in that grammar, keyed by the non-terminal they are expanding.
This includes terminal productions implied by each predicate as well as productions for the
`return type` of each defined predicate. For example, defining a "multiply" predicate adds
a "<int,int:int> -> multiply" terminal production to the grammar, and `also` a "int ->
[<int,int:int>, int, int]" non-terminal production, because I can use the "multiply"
predicate to produce an int.
"""
if not self._nonterminal_productions:
actions: Dict[Union[str, PredicateType], Set[str]] = defaultdict(set)
# If you didn't give us a set of valid start types, we'll assume all types we know
# about (including functional types) are valid start types.
if self._start_types:
start_types = self._start_types
else:
start_types = set()
for type_list in self._function_types.values():
start_types.update(type_list)
for start_type in start_types:
actions[START_SYMBOL].add(f"{START_SYMBOL} -> {start_type}")
for name, function_type_list in self._function_types.items():
for function_type in function_type_list:
actions[function_type].add(f"{function_type} -> {name}")
if isinstance(function_type, FunctionType):
return_type = function_type.return_type
arg_types = function_type.argument_types
right_side = f"[{function_type}, {', '.join(str(arg_type) for arg_type in arg_types)}]"
actions[return_type].add(f"{return_type} -> {right_side}")
if self._allow_currying:
function_types = [t for t in actions if isinstance(t, FunctionType)]
for function_type in function_types:
if len(function_type.argument_types) > 1:
argument_types = list(set(function_type.argument_types))
for uncurried_arg_type in argument_types:
curried_arg_types = list(
reversed([t for t in function_type.argument_types])
)
curried_arg_types.remove(uncurried_arg_type)
curried_arg_types.reverse()
curried_function_type = PredicateType.get_function_type(
[uncurried_arg_type], function_type.return_type
)
right_side = f'[{function_type}, {", ".join(str(arg) for arg in curried_arg_types)}]'
actions[curried_function_type].add(
f"{curried_function_type} -> {right_side}"
)
if self._allow_composition:
function_types = [t for t in actions if isinstance(t, FunctionType)]
for type1 in function_types:
for type2 in function_types:
if len(type1.argument_types) != 1:
continue
if type1.argument_types[0] != type2.return_type:
continue
composed_type = PredicateType.get_function_type(
type2.argument_types, type1.return_type
)
right_side = f"[*, {type1}, {type2}]"
actions[composed_type].add(f"{composed_type} -> {right_side}")
self._nonterminal_productions = {
str(key): sorted(value) for key, value in actions.items()
}
return self._nonterminal_productions
def all_possible_productions(self) -> List[str]:
"""
Returns a sorted list of all production rules in the grammar induced by
:func:`get_nonterminal_productions`.
"""
all_actions = set()
for action_set in self.get_nonterminal_productions().values():
all_actions.update(action_set)
return sorted(all_actions)
def logical_form_to_action_sequence(self, logical_form: str) -> List[str]:
"""
Converts a logical form into a linearization of the production rules from its abstract
syntax tree. The linearization is top-down, depth-first.
Each production rule is formatted as "LHS -> RHS", where "LHS" is a single non-terminal
type, and RHS is either a terminal or a list of non-terminals (other possible values for
RHS in a more general context-free grammar are not produced by our grammar induction
logic).
Non-terminals are `types` in the grammar, either basic types (like ``int``, ``str``, or
some class that you define), or functional types, represented with angle brackets with a
colon separating arguments from the return type. Multi-argument functions have commas
separating their argument types. For example, ``<int:int>`` is a function that takes an
integer and returns an integer, and ``<int,int:int>`` is a function that takes two integer
arguments and returns an integer.
As an example translation from logical form to complete action sequence, the logical form
``(add 2 3)`` would be translated to ``['@start@ -> int', 'int -> [<int,int:int>, int, int]',
'<int,int:int> -> add', 'int -> 2', 'int -> 3']``.
"""
expression = util.lisp_to_nested_expression(logical_form)
try:
transitions, start_type = self._get_transitions(expression, expected_type=None)
if self._start_types and start_type not in self._start_types:
raise ParsingError(
f"Expression had unallowed start type of {start_type}: {expression}"
)
except ParsingError as error:
logger.error(f"Error parsing logical form: {logical_form}: {error}")
raise
transitions.insert(0, f"@start@ -> {start_type}")
return transitions
def action_sequence_to_logical_form(self, action_sequence: List[str]) -> str:
"""
Takes an action sequence as produced by :func:`logical_form_to_action_sequence`, which is a
linearization of an abstract syntax tree, and reconstructs the logical form defined by that
abstract syntax tree.
"""
# Basic outline: we assume that the bracketing that we get in the RHS of each action is the
# correct bracketing for reconstructing the logical form. This is true when there is no
# currying in the action sequence. Given this assumption, we just need to construct a tree
# from the action sequence, then output all of the leaves in the tree, with brackets around
# the children of all non-terminal nodes.
remaining_actions = [action.split(" -> ") for action in action_sequence]
tree = Tree(remaining_actions[0][1], [])
try:
remaining_actions = self._construct_node_from_actions(tree, remaining_actions[1:])
except ParsingError:
logger.error("Error parsing action sequence: %s", action_sequence)
raise
if remaining_actions:
logger.error("Error parsing action sequence: %s", action_sequence)
logger.error("Remaining actions were: %s", remaining_actions)
raise ParsingError("Extra actions in action sequence")
return nltk_tree_to_logical_form(tree)
def add_predicate(self, name: str, function: Callable, side_arguments: List[str] = None):
"""
Adds a predicate to this domain language. Typically you do this with the ``@predicate``
decorator on the methods in your class. But, if you need to for whatever reason, you can
also call this function yourself with a (type-annotated) function to add it to your
language.
Parameters
----------
name : ``str``
The name that we will use in the induced language for this function.
function : ``Callable``
The function that gets called when executing a predicate with the given name.
side_arguments : ``List[str]``, optional
If given, we will ignore these arguments for the purposes of grammar induction. This
is to allow passing extra arguments from the decoder state that are not explicitly part
of the language the decoder produces, such as the decoder's attention over the question
when a terminal was predicted. If you use this functionality, you also `must` use
``language.execute_action_sequence()`` instead of ``language.execute()``, and you must
pass the additional side arguments needed to that function. See
:func:`execute_action_sequence` for more information.
"""
side_arguments = side_arguments or []
signature = inspect.signature(function)
argument_types = [
param.annotation
for name, param in signature.parameters.items()
if name not in side_arguments
]
return_type = signature.return_annotation
argument_nltk_types: List[PredicateType] = [
PredicateType.get_type(arg_type) for arg_type in argument_types
]
return_nltk_type = PredicateType.get_type(return_type)
function_nltk_type = PredicateType.get_function_type(argument_nltk_types, return_nltk_type)
self._functions[name] = function
self._function_types[name].append(function_nltk_type)
def add_constant(self, name: str, value: Any, type_: Type = None):
"""
Adds a constant to this domain language. You would typically just pass in a list of
constants to the ``super().__init__()`` call in your constructor, but you can also call
this method to add constants if it is more convenient.
Because we construct a grammar over this language for you, in order for the grammar to be
finite we cannot allow arbitrary constants. Having a finite grammar is important when
you're doing semantic parsing - we need to be able to search over this space, and compute
normalized probability distributions.
"""
value_type = type_ if type_ else type(value)
constant_type = PredicateType.get_type(value_type)
self._functions[name] = lambda: value
self._function_types[name].append(constant_type)
def is_nonterminal(self, symbol: str) -> bool:
"""
Determines whether an input symbol is a valid non-terminal in the grammar.
"""
nonterminal_productions = self.get_nonterminal_productions()
return symbol in nonterminal_productions
def _execute_expression(self, expression: Any):
"""
This does the bulk of the work of executing a logical form, recursively executing a single
expression. Basically, if the expression is a function we know about, we evaluate its
arguments then call the function. If it's a list, we evaluate all elements of the list.
If it's a constant (or a zero-argument function), we evaluate the constant.
"""
if isinstance(expression, list):
if isinstance(expression[0], list):
function = self._execute_expression(expression[0])
elif expression[0] in self._functions:
function = self._functions[expression[0]]
elif self._allow_composition and expression[0] == "*":
function = "*"
else:
if isinstance(expression[0], str):
raise ExecutionError(f"Unrecognized function: {expression[0]}")
else:
raise ExecutionError(f"Unsupported expression type: {expression}")
arguments = [self._execute_expression(arg) for arg in expression[1:]]
try:
if self._allow_composition and function == "*":
return self._create_composed_function(arguments[0], arguments[1])
return function(*arguments)
except (TypeError, ValueError):
if self._allow_currying:
# If we got here, then maybe the error is because this should be a curried
# function. We'll check for that and return the curried function if possible.
curried_function = self._get_curried_function(function, arguments)
if curried_function:
return curried_function
traceback.print_exc()
raise ExecutionError(
f"Error executing expression {expression} (see stderr for stack trace)"
)
elif isinstance(expression, str):
if expression not in self._functions:
raise ExecutionError(f"Unrecognized constant: {expression}")
# This is a bit of a quirk in how we represent constants and zero-argument functions.
# For consistency, constants are wrapped in a zero-argument lambda. So both constants
# and zero-argument functions are callable in `self._functions`, and are `BasicTypes`
# in `self._function_types`. For these, we want to return
# `self._functions[expression]()` _calling_ the zero-argument function. If we get a
# `FunctionType` in here, that means we're referring to the function as a first-class
# object, instead of calling it (maybe as an argument to a higher-order function). In
# that case, we return the function _without_ calling it.
# Also, we just check the first function type here, because we assume you haven't
# registered the same function with both a constant type and a `FunctionType`.
if isinstance(self._function_types[expression][0], FunctionType):
return self._functions[expression]
else:
return self._functions[expression]()
return self._functions[expression]
else:
raise ExecutionError(
"Not sure how you got here. Please open a github issue with details."
)
def _execute_sequence(
self, action_sequence: List[str], side_arguments: List[Dict]
) -> Tuple[Any, List[str], List[Dict]]:
"""
This does the bulk of the work of :func:`execute_action_sequence`, recursively executing
the functions it finds and trimming actions off of the action sequence. The return value
is a tuple of (execution, remaining_actions), where the second value is necessary to handle
the recursion.
"""
if not action_sequence:
raise ExecutionError("invalid action sequence")
first_action = action_sequence[0]
remaining_actions = action_sequence[1:]
remaining_side_args = side_arguments[1:] if side_arguments else None
right_side = first_action.split(" -> ")[1]
if right_side in self._functions:
function = self._functions[right_side]
# mypy doesn't like this check, saying that Callable isn't a reasonable thing to pass
# here. But it works just fine; I'm not sure why mypy complains about it.
if isinstance(function, Callable): # type: ignore
function_arguments = inspect.signature(function).parameters
if not function_arguments:
# This was a zero-argument function / constant that was registered as a lambda
# function, for consistency of execution in `execute()`.
execution_value = function()
elif side_arguments:
kwargs = {}
non_kwargs = []
for argument_name in function_arguments:
if argument_name in side_arguments[0]:
kwargs[argument_name] = side_arguments[0][argument_name]
else:
non_kwargs.append(argument_name)
if kwargs and non_kwargs:
# This is a function that has both side arguments and logical form
# arguments - we curry the function so only the logical form arguments are
# left.
def curried_function(*args):
return function(*args, **kwargs)
execution_value = curried_function
elif kwargs:
# This is a function that _only_ has side arguments - we just call the
# function and return a value.
execution_value = function(**kwargs)
else:
# This is a function that has logical form arguments, but no side arguments
# that match what we were given - just return the function itself.
execution_value = function
else:
execution_value = function
return execution_value, remaining_actions, remaining_side_args
else:
# This is a non-terminal expansion, like 'int -> [<int:int>, int, int]'. We need to
# get the function and its arguments, then call the function with its arguments.
# Because we linearize the abstract syntax tree depth first, left-to-right, we can just
# recursively call `_execute_sequence` for the function and all of its arguments, and
# things will just work.
right_side_parts = right_side.split(", ")
if right_side_parts[0] == "[*" and self._allow_composition:
# This one we need to handle differently, because the "function" is a function
# composition which doesn't show up in the action sequence.
function = "*" # type: ignore
else:
# Otherwise, we grab the function itself by executing the next self-contained action
# sequence (if this is a simple function call, that will be exactly one action; if
# it's a higher-order function, it could be many actions).
function, remaining_actions, remaining_side_args = self._execute_sequence(
remaining_actions, remaining_side_args
)
# We don't really need to know what the types of the arguments are, just how many of them
# there are, so we recurse the right number of times.
arguments = []
for _ in right_side_parts[1:]:
argument, remaining_actions, remaining_side_args = self._execute_sequence(
remaining_actions, remaining_side_args
)
arguments.append(argument)
if self._allow_composition and function == "*":
# In this case, both arguments should be functions, and we want to compose them, by
# calling the second argument first, and passing the result to the first argument.
def composed_function(*args):
function_argument, is_curried = self._execute_function(arguments[1], list(args))
if is_curried:
# If the inner function ended up curried, we have to curry the outer
# function too.
return_type = inspect.signature(arguments[0]).return_annotation
inner_signature = inspect.signature(function_argument)
arg_type = list(inner_signature.parameters.values())[0].annotation
# Pretty cool that you can give runtime types to a function defined at
# runtime, but mypy has no idea what to do with this.
def curried_function(x: arg_type) -> return_type: # type: ignore
return arguments[0](function_argument(x))
function_value = curried_function
else:
function_value, _ = self._execute_function(
arguments[0], [function_argument]
)
return function_value
return composed_function, remaining_actions, remaining_side_args
function_value, _ = self._execute_function(function, arguments)
return function_value, remaining_actions, remaining_side_args
def _execute_function(self, function: Callable, arguments: List[Any]) -> Any:
"""
Calls `function` with the given `arguments`, allowing for the possibility of currying the
`function`.
"""
is_curried = False
try:
function_value = function(*arguments)
except TypeError:
if not self._allow_currying:
raise
# If we got here, then maybe the error is because this should be a curried
# function. We'll check for that and return the curried function if possible.
curried_function = self._get_curried_function(function, arguments)
if curried_function:
function_value = curried_function
is_curried = True
else:
raise
return function_value, is_curried
def _get_transitions(
self, expression: Any, expected_type: PredicateType
) -> Tuple[List[str], PredicateType]:
"""
This is used when converting a logical form into an action sequence. This piece
recursively translates a lisp expression into an action sequence, making sure we match the
expected type (or using the expected type to get the right type for constant expressions).
"""
if isinstance(expression, (list, tuple)):
function_transitions, return_type, argument_types = self._get_function_transitions(
expression, expected_type
)
if len(argument_types) != len(expression[1:]):
raise ParsingError(f"Wrong number of arguments for function in {expression}")
argument_transitions = []
for argument_type, subexpression in zip(argument_types, expression[1:]):
argument_transitions.extend(self._get_transitions(subexpression, argument_type)[0])
return function_transitions + argument_transitions, return_type
elif isinstance(expression, str):
if expression not in self._functions:
raise ParsingError(f"Unrecognized constant: {expression}")
constant_types = self._function_types[expression]
if len(constant_types) == 1:
constant_type = constant_types[0]
# This constant had only one type; that's the easy case.
if expected_type and expected_type != constant_type:
raise ParsingError(
f"{expression} did not have expected type {expected_type} "
f"(found {constant_type})"
)
return [f"{constant_type} -> {expression}"], constant_type
else:
if not expected_type:
raise ParsingError(
"With no expected type and multiple types to pick from "
f"I don't know what type to use (constant was {expression})"
)
if expected_type not in constant_types:
raise ParsingError(
f"{expression} did not have expected type {expected_type} "
f"(found these options: {constant_types}; none matched)"
)
return [f"{expected_type} -> {expression}"], expected_type
else:
raise ParsingError(
"Not sure how you got here. Please open an issue on github with details."
)
def _get_function_transitions(
self, expression: Sequence, expected_type: PredicateType
) -> Tuple[List[str], PredicateType, Sequence[PredicateType]]:
"""
A helper method for ``_get_transitions``. This gets the transitions for the predicate
itself in a function call. If we only had simple functions (e.g., "(add 2 3)"), this would
be pretty straightforward and we wouldn't need a separate method to handle it. We split it
out into its own method because handling higher-order functions and currying is complicated
(e.g., something like "((negate add) 2 3)" or "((multiply 3) 2)").
"""
function_expression = expression[0]
# This first block handles getting the transitions and function type (and some error
# checking) _just for the function itself_. If this is a simple function, this is easy; if
# it's a higher-order function, it involves some recursion.
if isinstance(function_expression, list):
# This is a higher-order function. TODO(mattg): we'll just ignore type checking on
# higher-order functions, for now.
# Some annoying redirection here to make mypy happy; need to specify the type of
# function_type.
result = self._get_transitions(function_expression, None)
transitions = result[0]
function_type: FunctionType = result[1] # type: ignore
# This is a bit of an unfortunate hack. In order to handle currying, we currently rely
# on executing the function, for which we need actual function code (see the class
# docstring). I want to avoid executing the function prematurely, though, so this still
# works in cases where you don't need to handle currying higher-order functions. So,
# we'll leave this as `None` and handle it below, if indeed you are currying this
# function.
function = None
elif function_expression in self._functions:
name = function_expression
function_types = self._function_types[function_expression]
if len(function_types) != 1:
raise ParsingError(
f"{function_expression} had multiple types; this is not yet supported for functions"
)
function_type = function_types[0] # type: ignore
transitions = [f"{function_type} -> {name}"]
function = self._functions[function_expression]
elif self._allow_composition and function_expression == "*":
outer_function_expression = expression[1]
if not isinstance(outer_function_expression, list):
outer_function_expression = [outer_function_expression]
inner_function_expression = expression[2]
if not isinstance(inner_function_expression, list):
inner_function_expression = [inner_function_expression]
# This is unfortunately a bit complex. What we really what is the _type_ of these
# expressions. We don't have a function that will give us that. Instead, we have a
# function that will give us return types and argument types. If we have a bare
# function name, like "sum", this works fine. But if it's a higher-order function
# (including a curried function), then the return types and argument types from
# _get_function_transitions aren't what we're looking for here, because that function is
# designed for something else. We need to hack our way around that a bit, by grabbing
# the return type from the inner return type (confusing, I know).
_, outer_return_type, outer_arg_types = self._get_function_transitions(
outer_function_expression, None
)
if isinstance(expression[1], list):
outer_function_type: FunctionType = outer_return_type # type: ignore
else:
outer_function_type = PredicateType.get_function_type( # type: ignore
outer_arg_types, outer_return_type
)
_, inner_return_type, inner_arg_types = self._get_function_transitions(
inner_function_expression, None
)
if isinstance(expression[2], list):
inner_function_type: FunctionType = inner_return_type # type: ignore
else:
inner_function_type = PredicateType.get_function_type( # type: ignore
inner_arg_types, inner_return_type
)
composition_argument_types = [outer_function_type, inner_function_type]
composition_type = PredicateType.get_function_type(
inner_function_type.argument_types, outer_function_type.return_type
)
right_side = f'[*, {", ".join(str(arg) for arg in composition_argument_types)}]'
composition_transition = f"{composition_type} -> {right_side}"
return [composition_transition], composition_type, composition_argument_types
else:
if isinstance(function_expression, str):
raise ParsingError(f"Unrecognized function: {function_expression[0]}")
else:
raise ParsingError(f"Unsupported function_expression type: {function_expression}")
if not isinstance(function_type, FunctionType):
raise ParsingError(f"Zero-arg function or constant called with arguments: {name}")
# Now that we have the transitions for the function itself, and the function's type, we can
# get argument types and do the rest of the transitions. The first thing we need to do is
# check if we need to curry this function, because we're missing an argument.
if (
self._allow_currying
# This check means we're missing an argument to the function.
and len(expression) > 1
and len(function_type.argument_types) - 1 == len(expression) - 1
):
# If we're currying this function, we need to add a transition that encodes the
# currying, and change the function_type accordingly.
arguments = [self._execute_expression(e) for e in expression[1:]]
if function is None:
function = self._execute_expression(function_expression)
curried_function = self._get_curried_function(function, arguments)
# Here we get the FunctionType corresponding to the new, curried function.
signature = inspect.signature(curried_function)
return_type = PredicateType.get_type(signature.return_annotation)
uncurried_arg_type = PredicateType.get_type(
list(signature.parameters.values())[0].annotation
)
curried_function_type = PredicateType.get_function_type(
[uncurried_arg_type], return_type
)
# To fit in with the logic below, we need to basically make a fake `curry`
# FunctionType, with the arguments being the function we're currying and all of the
# curried arguments, and the return type being the one-argument function. Then we
# can re-use all of the existing logic without modification.
curried_arg_types = list(reversed([t for t in function_type.argument_types]))
curried_arg_types.remove(uncurried_arg_type)
curried_arg_types.reverse()
right_side = f'[{function_type}, {", ".join(str(arg) for arg in curried_arg_types)}]'
curry_transition = f"{curried_function_type} -> {right_side}"
transitions.insert(0, curry_transition)
return transitions, curried_function_type, curried_arg_types
argument_types = function_type.argument_types
return_type = function_type.return_type
right_side = f'[{function_type}, {", ".join(str(arg) for arg in argument_types)}]'
first_transition = f"{return_type} -> {right_side}"
transitions.insert(0, first_transition)
if expected_type and expected_type != return_type:
raise ParsingError(
f"{function_expression} did not have expected type {expected_type} "
f"(found {return_type})"
)
return transitions, return_type, argument_types
def _construct_node_from_actions(
self, current_node: Tree, remaining_actions: List[List[str]]
) -> List[List[str]]:
"""
Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned.
"""
if not remaining_actions:
logger.error("No actions left to construct current node: %s", current_node)
raise ParsingError("Incomplete action sequence")
left_side, right_side = remaining_actions.pop(0)
if left_side != current_node.label():
logger.error("Current node: %s", current_node)
logger.error("Next action: %s -> %s", left_side, right_side)
logger.error("Remaining actions were: %s", remaining_actions)
raise ParsingError("Current node does not match next action")
if right_side[0] == "[":
# This is a non-terminal expansion, with more than one child node.
for child_type in right_side[1:-1].split(", "):
child_node = Tree(child_type, [])
current_node.append(child_node) # you add a child to an nltk.Tree with `append`
# For now, we assume that all children in a list like this are non-terminals, so we
# recurse on them. I'm pretty sure that will always be true for the way our
# grammar induction works. We can revisit this later if we need to.
if self._allow_composition and child_type == "*":
# One exception to the comment above is when we are doing function composition.
# The function composition operator * does not have a corresponding action, so
# the recursion on constructing that node doesn't work.
continue
remaining_actions = self._construct_node_from_actions(child_node, remaining_actions)
else:
# The current node is a pre-terminal; we'll add a single terminal child. By
# construction, the right-hand side of our production rules are only ever terminal
# productions or lists of non-terminals.
current_node.append(
Tree(right_side, [])
) # you add a child to an nltk.Tree with `append`
return remaining_actions
def _get_curried_function(self, function: Callable, arguments: List[Any]) -> Optional[Callable]:
signature = inspect.signature(function)
parameters = signature.parameters
if len(parameters) != len(arguments) + 1:
# We only allow currying that makes a function into a one-argument function. This is to
# simplify both the complexity of the `DomainLanguage` code and the complexity of
# whatever model might use the resulting grammar. For all currently-envisioned uses of
# currying, we only need to make one-argument functions. These are predominantly for
# replacing lambda functions in argmaxes and the like.
return None
# Now we have to decide where the missing argument goes in the list of arguments. We will
# look at types to figure that out, and arbitrarily say that if there are multiple matching
# types, the missing one comes last.
missing_arg_index = 0
parameter_types = list(parameters.values())
for parameter in parameter_types:
argument = arguments[missing_arg_index]
if isinstance(argument, (list, set)):
arg_type = infer_collection_type(argument)
else:
arg_type = type(argument)
if parameter.annotation == arg_type:
missing_arg_index += 1
if missing_arg_index == len(parameters) - 1:
break
else:
break
arg_type = parameter_types[missing_arg_index].annotation
# Pretty cool that you can give runtime types to a function defined at runtime, but mypy has
# no idea what to do with this.
def curried_function(x: arg_type) -> signature.return_annotation: # type: ignore
new_arguments = arguments[:missing_arg_index] + [x] + arguments[missing_arg_index:]
return function(*new_arguments)
return curried_function
def _create_composed_function(
self, outer_function: Callable, inner_function: Callable
) -> Callable:
"""
Creating a composed function is easy; just do a `def` and call the functions in order. This
function exists because we need the composed function to have _correct type annotations_,
which is harder. We can't use `*args` for the lambda function that we construct, so we need
to switch on how many arguments the inner function takes, and create functions with the
right argument type annotations.
And, as in other places where we assign types at runtime, mypy has no idea what's going on,
so we tell it to ignore this code. `inspect` will do the right thing, even if mypy can't
analyze it.
"""
inner_signature = inspect.signature(inner_function)
outer_signature = inspect.signature(outer_function)
argument_types = [arg.annotation for arg in inner_signature.parameters.values()]
return_type = outer_signature.return_annotation
if len(argument_types) == 1:
def composed_function(arg1: argument_types[0]) -> return_type: # type: ignore
return outer_function(inner_function(arg1))
elif len(argument_types) == 2:
def composed_function( # type: ignore
arg1: argument_types[0], arg2: argument_types[1] # type:ignore
) -> return_type: # type: ignore
return outer_function(inner_function(arg1, arg2))
elif len(argument_types) == 3:
def composed_function( # type:ignore
arg1: argument_types[0], # type: ignore
arg2: argument_types[1], # type: ignore
arg3: argument_types[2], # type:ignore
) -> return_type: # type: ignore
return outer_function(inner_function(arg1, arg2, arg3))
elif len(argument_types) == 4:
def composed_function( # type:ignore
arg1: argument_types[0], # type:ignore
arg2: argument_types[1], # type:ignore
arg3: argument_types[2], # type:ignore
arg4: argument_types[3], # type:ignore
) -> return_type: # type: ignore
return outer_function(inner_function(arg1, arg2, arg3, arg4))
else:
raise ValueError(
f"Inner function has a type signature that's not currently handled: {inner_function}"
)
return composed_function
def __len__(self):
# This method exists just to make it easier to use this in a MetadataField. Kind of
# annoying, but oh well.
return 0
| allennlp-semparse-master | allennlp_semparse/domain_languages/domain_language.py |
from collections import defaultdict
# We use "Number" in a bunch of places throughout to try to generalize ints and floats.
# Unfortunately, mypy doesn't like this very much, so we have to "type: ignore" a bunch of things.
# But it makes for a nicer induced grammar, so it's worth it.
from numbers import Number
from typing import Dict, List, NamedTuple, Set, Type, Tuple, Any
import logging
import re
from allennlp_semparse.common import Date, ExecutionError, MONTH_NUMBERS
from allennlp_semparse.common.wikitables import wikitables_evaluator as evaluator
from allennlp_semparse.common.wikitables import TableQuestionContext, CellValueType
from allennlp_semparse.domain_languages.domain_language import (
DomainLanguage,
PredicateType,
predicate,
)
logger = logging.getLogger(__name__)
class Row(NamedTuple):
# Maps column names to cell values.
values: Dict[str, CellValueType]
class Column(NamedTuple):
name: str
class StringColumn(Column):
pass
class ComparableColumn(Column):
pass
class DateColumn(ComparableColumn):
pass
class NumberColumn(ComparableColumn):
pass
class WikiTablesLanguage(DomainLanguage):
"""
Implements the functions in the variable free language we use, that's inspired by the one in
"Memory Augmented Policy Optimization for Program Synthesis with Generalization" by Liang et al.
Because some of the functions are only allowed if some conditions hold on the table, we don't
use the ``@predicate`` decorator for all of the language functions. Instead, we add them to
the language using ``add_predicate`` if, e.g., there is a column with dates in it.
"""
def __init__(self, table_context: TableQuestionContext) -> None:
super().__init__(start_types=self._get_start_types_in_context(table_context))
self.table_context = table_context
self.table_data = [Row(row) for row in table_context.table_data]
column_types = table_context.column_types
self._table_has_string_columns = False
self._table_has_date_columns = False
self._table_has_number_columns = False
if "string" in column_types:
self.add_predicate("filter_in", self.filter_in)
self.add_predicate("filter_not_in", self.filter_not_in)
self._table_has_string_columns = True
if "date" in column_types:
self.add_predicate("filter_date_greater", self.filter_date_greater)
self.add_predicate("filter_date_greater_equals", self.filter_date_greater_equals)
self.add_predicate("filter_date_lesser", self.filter_date_lesser)
self.add_predicate("filter_date_lesser_equals", self.filter_date_lesser_equals)
self.add_predicate("filter_date_equals", self.filter_date_equals)
self.add_predicate("filter_date_not_equals", self.filter_date_not_equals)
self.add_predicate("max_date", self.max_date)
self.add_predicate("min_date", self.min_date)
# Adding -1 to mapping because we need it for dates where not all three fields are
# specified. We want to do this only when the table has a date column. This is because
# the knowledge graph is also constructed in such a way that -1 is an entity with date
# columns as the neighbors only if any date columns exist in the table.
self.add_constant("-1", -1, type_=Number)
self._table_has_date_columns = True
if "number" in column_types or "num2" in column_types:
self.add_predicate("filter_number_greater", self.filter_number_greater)
self.add_predicate("filter_number_greater_equals", self.filter_number_greater_equals)
self.add_predicate("filter_number_lesser", self.filter_number_lesser)
self.add_predicate("filter_number_lesser_equals", self.filter_number_lesser_equals)
self.add_predicate("filter_number_equals", self.filter_number_equals)
self.add_predicate("filter_number_not_equals", self.filter_number_not_equals)
self.add_predicate("max_number", self.max_number)
self.add_predicate("min_number", self.min_number)
self.add_predicate("average", self.average)
self.add_predicate("sum", self.sum)
self.add_predicate("diff", self.diff)
self._table_has_number_columns = True
if "date" in column_types or "number" in column_types or "num2" in column_types:
self.add_predicate("argmax", self.argmax)
self.add_predicate("argmin", self.argmin)
self.table_graph = table_context.get_table_knowledge_graph()
# Adding entities and numbers seen in questions as constants.
question_entities, question_numbers = table_context.get_entities_from_question()
self._question_entities = [entity for entity, _ in question_entities]
self._question_numbers = [number for number, _ in question_numbers]
for entity in self._question_entities:
# Forcing the type of entities to be List[str] here to ensure that the language deals with the outputs
# of select-like statements and constants similarly.
self.add_constant(entity, entity, type_=List[str])
for number in self._question_numbers:
self.add_constant(str(number), float(number), type_=Number)
# Keeps track of column name productions so that we can add them to the agenda.
self._column_productions_for_agenda: Dict[str, str] = {}
# Adding column names as constants.
for column_name in table_context.column_names:
column_type = column_name.split(":")[0].replace("_column", "")
column: Column = None
if column_type == "string":
column = StringColumn(column_name)
elif column_type == "date":
column = DateColumn(column_name)
self.add_constant(column_name, column, type_=ComparableColumn)
elif column_type in {"number", "num2"}:
column = NumberColumn(column_name)
self.add_constant(column_name, column, type_=ComparableColumn)
self.add_constant(column_name, column, type_=Column)
self.add_constant(column_name, column)
column_type_name = str(PredicateType.get_type(type(column)))
self._column_productions_for_agenda[
column_name
] = f"{column_type_name} -> {column_name}"
# Mapping from terminal strings to productions that produce them. We use this in the
# agenda-related methods, and some models that use this language look at this field to know
# how many terminals to plan for.
self.terminal_productions: Dict[str, str] = {}
for name, types in self._function_types.items():
self.terminal_productions[name] = "%s -> %s" % (types[0], name)
def _get_start_types_in_context(self, table_context: TableQuestionContext) -> Set[Type]:
start_types: Set[Type] = set()
if "string" in table_context.column_types:
start_types.add(List[str])
if "number" in table_context.column_types or "num2" in table_context.column_types:
start_types.add(Number)
if "date" in table_context.column_types:
start_types.add(Date)
return start_types
def get_agenda(self, conservative: bool = False):
"""
Returns an agenda that can be used guide search.
Parameters
----------
conservative : ``bool``
Setting this flag will return a subset of the agenda items that correspond to high
confidence lexical matches. You'll need this if you are going to use this agenda to
penalize a model for producing logical forms that do not contain some items in it. In
that case, you'll want this agenda to have close to perfect precision, at the cost of a
lower recall. You may not want to set this flag if you are sorting the output from a
search procedure based on how much of this agenda is satisfied.
"""
agenda_items = []
question_tokens = [token.text for token in self.table_context.question_tokens]
question = " ".join(question_tokens)
added_number_filters = False
if self._table_has_number_columns:
if "at least" in question:
agenda_items.append("filter_number_greater_equals")
if "at most" in question:
agenda_items.append("filter_number_lesser_equals")
comparison_triggers = ["greater", "larger", "more"]
if any(f"no {word} than" in question for word in comparison_triggers):
agenda_items.append("filter_number_lesser_equals")
elif any(f"{word} than" in question for word in comparison_triggers):
agenda_items.append("filter_number_greater")
# We want to keep track of this because we do not want to add both number and date
# filters to the agenda if we want to be conservative.
if agenda_items:
added_number_filters = True
for token in question_tokens:
if token in ["next", "below"] or (token == "after" and not conservative):
agenda_items.append("next")
if token in ["previous", "above"] or (token == "before" and not conservative):
agenda_items.append("previous")
if token in ["first", "top"]:
agenda_items.append("first")
if token in ["last", "bottom"]:
agenda_items.append("last")
if token == "same":
agenda_items.append("same_as")
if self._table_has_number_columns:
# "total" does not always map to an actual summing operation.
if token == "total" and not conservative:
agenda_items.append("sum")
if (
token == "difference"
or "how many more" in question
or "how much more" in question
):
agenda_items.append("diff")
if token == "average":
agenda_items.append("average")
if (
token in ["least", "smallest", "shortest", "lowest"]
and "at least" not in question
):
# This condition is too brittle. But for most logical forms with "min", there are
# semantically equivalent ones with "argmin". The exceptions are rare.
if "what is the least" not in question:
agenda_items.append("argmin")
if (
token in ["most", "largest", "highest", "longest", "greatest"]
and "at most" not in question
):
# This condition is too brittle. But for most logical forms with "max", there are
# semantically equivalent ones with "argmax". The exceptions are rare.
if "what is the most" not in question:
agenda_items.append("argmax")
if self._table_has_date_columns:
if token in MONTH_NUMBERS or (
token.isdigit() and len(token) == 4 and int(token) < 2100 and int(token) > 1100
):
# Token is either a month or an year. We'll add date functions.
if not added_number_filters or not conservative:
if "after" in question_tokens:
agenda_items.append("filter_date_greater")
elif "before" in question_tokens:
agenda_items.append("filter_date_lesser")
elif "not" in question_tokens:
agenda_items.append("filter_date_not_equals")
else:
agenda_items.append("filter_date_equals")
if "what is the least" in question and self._table_has_number_columns:
agenda_items.append("min_number")
if "what is the most" in question and self._table_has_number_columns:
agenda_items.append("max_number")
if "when" in question_tokens and self._table_has_date_columns:
if "last" in question_tokens:
agenda_items.append("max_date")
elif "first" in question_tokens:
agenda_items.append("min_date")
else:
agenda_items.append("select_date")
if "how many" in question:
if "sum" not in agenda_items and "average" not in agenda_items:
# The question probably just requires counting the rows. But this is not very
# accurate. The question could also be asking for a value that is in the table.
agenda_items.append("count")
agenda = []
# Adding productions from the global set.
for agenda_item in set(agenda_items):
# Some agenda items may not be present in the terminal productions because some of these
# terminals are table-content specific. For example, if the question triggered "sum",
# and the table does not have number columns, we should not add "<r,<f,n>> -> sum" to
# the agenda.
if agenda_item in self.terminal_productions:
agenda.append(self.terminal_productions[agenda_item])
if conservative:
# Some of the columns in the table have multiple types, and thus occur in the KG as
# different columns. We do not want to add them all to the agenda if their names,
# because it is unlikely that logical forms use them all. In fact, to be conservative,
# we won't add any of them. So we'll first identify such column names.
refined_column_productions: Dict[str, str] = {}
for column_name, signature in self._column_productions_for_agenda.items():
column_type, name = column_name.split(":")
if column_type == "string_column":
if (
f"number_column:{name}" not in self._column_productions_for_agenda
and f"date_column:{name}" not in self._column_productions_for_agenda
):
refined_column_productions[column_name] = signature
elif column_type == "number_column":
if (
f"string_column:{name}" not in self._column_productions_for_agenda
and f"date_column:{name}" not in self._column_productions_for_agenda
):
refined_column_productions[column_name] = signature
else:
if (
f"string_column:{name}" not in self._column_productions_for_agenda
and f"number_column:{name}" not in self._column_productions_for_agenda
):
refined_column_productions[column_name] = signature
# Similarly, we do not want the same spans in the question to be added to the agenda as
# both string and number productions.
refined_entities: List[str] = []
refined_numbers: List[str] = []
for entity in self._question_entities:
if entity.replace("string:", "") not in self._question_numbers:
refined_entities.append(entity)
for number in self._question_numbers:
if f"string:{number}" not in self._question_entities:
refined_numbers.append(number)
else:
refined_column_productions = dict(self._column_productions_for_agenda)
refined_entities = list(self._question_entities)
refined_numbers = list(self._question_numbers)
# Adding column names that occur in question.
question_with_underscores = "_".join(question_tokens)
normalized_question = re.sub("[^a-z0-9_]", "", question_with_underscores)
# We keep track of tokens that are in column names being added to the agenda. We will not
# add string productions to the agenda if those tokens were already captured as column
# names.
# Note: If the same string occurs multiple times, this may cause string productions being
# omitted from the agenda unnecessarily. That is fine, as we want to err on the side of
# adding fewer rules to the agenda.
tokens_in_column_names: Set[str] = set()
for column_name_with_type, signature in refined_column_productions.items():
column_name = column_name_with_type.split(":")[1]
# Underscores ensure that the match is of whole words.
if f"_{column_name}_" in normalized_question:
agenda.append(signature)
for token in column_name.split("_"):
tokens_in_column_names.add(token)
# Adding all productions that lead to entities and numbers extracted from the question.
for entity in refined_entities:
if entity.replace("string:", "") not in tokens_in_column_names:
agenda.append(f"List[str] -> {entity}")
for number in refined_numbers:
# The reason we check for the presence of the number in the question again is because
# some of these numbers are extracted from number words like month names and ordinals
# like "first". On looking at some agenda outputs, I found that they hurt more than help
# in the agenda.
if f"_{number}_" in normalized_question:
agenda.append(f"Number -> {number}")
return agenda
@staticmethod
def is_instance_specific_entity(entity_name: str) -> bool:
"""
Instance specific entities are column names, strings and numbers. Returns True if the entity
is one of those.
"""
entity_is_number = False
try:
float(entity_name)
entity_is_number = True
except ValueError:
pass
# Column names start with "*_column:", strings start with "string:"
return "_column:" in entity_name or entity_name.startswith("string:") or entity_is_number
def evaluate_logical_form(self, logical_form: str, target_list: List[str]) -> bool:
"""
Takes a logical form, and the list of target values as strings from the original lisp
string, and returns True iff the logical form executes to the target list, using the
official WikiTableQuestions evaluation script.
"""
try:
denotation = self.execute(logical_form)
except ExecutionError as error:
logger.warning(f"Failed to execute: {logical_form}. Error: {error}")
return False
return self.evaluate_denotation(denotation, target_list)
def evaluate_action_sequence(self, action_sequence: List[str], target_list: List[str]) -> bool:
"""
Similar to ``evaluate_logical_form`` except that it takes an action sequence instead. The reason this is
separate is because there is a separate method ``DomainLanguage.execute_action_sequence`` that executes the
action sequence directly.
"""
try:
denotation = self.execute_action_sequence(action_sequence)
except ExecutionError:
logger.warning(f"Failed to execute action sequence: {action_sequence}")
return False
return self.evaluate_denotation(denotation, target_list)
def evaluate_denotation(self, denotation: Any, target_list: List[str]) -> bool:
"""
Compares denotation with a target list and returns whether they are both the same according to the official
evaluator.
"""
normalized_target_list = [
TableQuestionContext.normalize_string(value) for value in target_list
]
target_value_list = evaluator.to_value_list(normalized_target_list)
if isinstance(denotation, list):
denotation_list = [str(denotation_item) for denotation_item in denotation]
else:
denotation_list = [str(denotation)]
denotation_value_list = evaluator.to_value_list(denotation_list)
return evaluator.check_denotation(target_value_list, denotation_value_list)
# Things below here are language predicates, until you get to private methods. We start with
# general predicates that are always included in the language, then move to
# column-type-specific predicates, which only get added if we see columns of particular types
# in the table.
@predicate
def all_rows(self) -> List[Row]:
return self.table_data
@predicate
def select_string(self, rows: List[Row], column: StringColumn) -> List[str]:
"""
Select function takes a list of rows and a column name and returns a list of strings as
in cells.
"""
return [str(row.values[column.name]) for row in rows if row.values[column.name] is not None]
@predicate
def select_number(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Select function takes a row (as a list) and a column name and returns the number in that
column. If multiple rows are given, will return the first number that is not None.
"""
numbers: List[float] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, float):
numbers.append(cell_value)
return numbers[0] if numbers else -1 # type: ignore
@predicate
def select_date(self, rows: List[Row], column: DateColumn) -> Date:
"""
Select function takes a row as a list and a column name and returns the date in that column.
"""
dates: List[Date] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, Date):
dates.append(cell_value)
return dates[0] if dates else Date(-1, -1, -1) # type: ignore
@predicate
def same_as(self, rows: List[Row], column: Column) -> List[Row]:
"""
Takes a row and a column and returns a list of rows from the full set of rows that contain
the same value under the given column as the given row.
"""
return_list: List[Row] = []
if not rows:
return return_list
cell_value = rows[0].values[column.name]
for table_row in self.table_data:
new_cell_value = table_row.values[column.name]
if new_cell_value is None or not isinstance(new_cell_value, type(cell_value)):
continue
if new_cell_value == cell_value:
return_list.append(table_row)
return return_list
@predicate
def date(self, year: Number, month: Number, day: Number) -> Date:
"""
Takes three numbers and returns a ``Date`` object whose year, month, and day are the three
numbers in that order.
"""
return Date(year, month, day) # type: ignore
@predicate
def first(self, rows: List[Row]) -> List[Row]:
"""
Takes an expression that evaluates to a list of rows, and returns the first one in that
list.
"""
if not rows:
logger.warning("Trying to get first row from an empty list")
return []
return [rows[0]]
@predicate
def last(self, rows: List[Row]) -> List[Row]:
"""
Takes an expression that evaluates to a list of rows, and returns the last one in that
list.
"""
if not rows:
logger.warning("Trying to get last row from an empty list")
return []
return [rows[-1]]
@predicate
def previous(self, rows: List[Row]) -> List[Row]:
"""
Takes an expression that evaluates to a single row, and returns the row that occurs before
the input row in the original set of rows. If the input row happens to be the top row, we
will return an empty list.
"""
if not rows:
return []
input_row_index = self._get_row_index(rows[0])
if input_row_index > 0:
return [self.table_data[input_row_index - 1]]
return []
@predicate
def next(self, rows: List[Row]) -> List[Row]:
"""
Takes an expression that evaluates to a single row, and returns the row that occurs after
the input row in the original set of rows. If the input row happens to be the last row, we
will return an empty list.
"""
if not rows:
return []
input_row_index = self._get_row_index(rows[0])
if input_row_index < len(self.table_data) - 1 and input_row_index != -1:
return [self.table_data[input_row_index + 1]]
return []
@predicate
def count(self, rows: List[Row]) -> Number:
return len(rows) # type: ignore
@predicate
def mode_string(self, rows: List[Row], column: StringColumn) -> List[str]:
"""
Takes a list of rows and a column and returns the most frequent values (one or more) under
that column in those rows.
"""
most_frequent_list = self._get_most_frequent_values(rows, column)
if not most_frequent_list:
return []
if not all([isinstance(value, str) for value in most_frequent_list]):
raise ExecutionError(f"Invalid values for mode_string: {most_frequent_list}")
return most_frequent_list
@predicate
def mode_number(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Takes a list of rows and a column and returns the most frequent value under
that column in those rows.
"""
most_frequent_list = self._get_most_frequent_values(rows, column)
if not most_frequent_list:
return 0.0 # type: ignore
most_frequent_value = most_frequent_list[0]
if not isinstance(most_frequent_value, Number):
raise ExecutionError(f"Invalid values for mode_number: {most_frequent_value}")
return most_frequent_value
@predicate
def mode_date(self, rows: List[Row], column: DateColumn) -> Date:
"""
Takes a list of rows and a column and returns the most frequent value under
that column in those rows.
"""
most_frequent_list = self._get_most_frequent_values(rows, column)
if not most_frequent_list:
return Date(-1, -1, -1)
most_frequent_value = most_frequent_list[0]
if not isinstance(most_frequent_value, Date):
raise ExecutionError(f"Invalid values for mode_date: {most_frequent_value}")
return most_frequent_value
# These get added to the language (using `add_predicate()`) if we see a date or number column
# in the table.
def argmax(self, rows: List[Row], column: ComparableColumn) -> List[Row]:
"""
Takes a list of rows and a column name and returns a list containing a single row (dict from
columns to cells) that has the maximum numerical value in the given column. We return a list
instead of a single dict to be consistent with the return type of ``select`` and
``all_rows``.
"""
if not rows:
return []
value_row_pairs = [
(row.values[column.name], row) for row in rows if row.values[column.name] is not None
]
if not value_row_pairs:
return []
# Returns a list containing the row with the max cell value.
return [sorted(value_row_pairs, key=lambda x: x[0], reverse=True)[0][1]] # type: ignore
def argmin(self, rows: List[Row], column: ComparableColumn) -> List[Row]:
"""
Takes a list of rows and a column and returns a list containing a single row (dict from
columns to cells) that has the minimum numerical value in the given column. We return a list
instead of a single dict to be consistent with the return type of ``select`` and
``all_rows``.
"""
if not rows:
return []
value_row_pairs = [
(row.values[column.name], row) for row in rows if row.values[column.name] is not None
]
if not value_row_pairs:
return []
# Returns a list containing the row with the max cell value.
return [sorted(value_row_pairs, key=lambda x: x[0])[0][1]] # type: ignore
# These six methods take a list of rows, a column, and a numerical value and return all the
# rows where the value in that column is [comparator] than the given value. They only get
# added to the language if we see a number column in the table.
def filter_number_greater(
self, rows: List[Row], column: NumberColumn, filter_value: Number
) -> List[Row]:
cell_row_pairs = [
(row.values[column.name], row) for row in rows if row.values[column.name] is not None
]
return [
row for cell_value, row in cell_row_pairs if cell_value > filter_value # type: ignore
]
def filter_number_greater_equals(
self, rows: List[Row], column: NumberColumn, filter_value: Number
) -> List[Row]:
cell_row_pairs = [
(row.values[column.name], row) for row in rows if row.values[column.name] is not None
]
return [
row for cell_value, row in cell_row_pairs if cell_value >= filter_value # type: ignore
]
def filter_number_lesser(
self, rows: List[Row], column: NumberColumn, filter_value: Number
) -> List[Row]:
cell_row_pairs = [
(row.values[column.name], row) for row in rows if row.values[column.name] is not None
]
return [
row for cell_value, row in cell_row_pairs if cell_value < filter_value # type: ignore
]
def filter_number_lesser_equals(
self, rows: List[Row], column: NumberColumn, filter_value: Number
) -> List[Row]:
cell_row_pairs = [
(row.values[column.name], row) for row in rows if row.values[column.name] is not None
]
return [
row for cell_value, row in cell_row_pairs if cell_value <= filter_value # type: ignore
]
def filter_number_equals(
self, rows: List[Row], column: NumberColumn, filter_value: Number
) -> List[Row]:
cell_row_pairs = [
(row.values[column.name], row) for row in rows if row.values[column.name] is not None
]
return [
row for cell_value, row in cell_row_pairs if cell_value == filter_value # type: ignore
]
def filter_number_not_equals(
self, rows: List[Row], column: NumberColumn, filter_value: Number
) -> List[Row]:
cell_row_pairs = [
(row.values[column.name], row) for row in rows if row.values[column.name] is not None
]
return [
row for cell_value, row in cell_row_pairs if cell_value != filter_value # type: ignore
]
# These six methods are the same as the six above, but for dates. They only get added to the
# language if we see a date column in the table.
def filter_date_greater(
self, rows: List[Row], column: DateColumn, filter_value: Date
) -> List[Row]:
cell_row_pairs: List[Tuple[Date, Row]] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, Date):
cell_row_pairs.append((cell_value, row))
return [row for cell_value, row in cell_row_pairs if cell_value > filter_value]
def filter_date_greater_equals(
self, rows: List[Row], column: DateColumn, filter_value: Date
) -> List[Row]:
cell_row_pairs: List[Tuple[Date, Row]] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, Date):
cell_row_pairs.append((cell_value, row))
return [row for cell_value, row in cell_row_pairs if cell_value >= filter_value]
def filter_date_lesser(
self, rows: List[Row], column: DateColumn, filter_value: Date
) -> List[Row]:
cell_row_pairs: List[Tuple[Date, Row]] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, Date):
cell_row_pairs.append((cell_value, row))
return [row for cell_value, row in cell_row_pairs if cell_value < filter_value]
def filter_date_lesser_equals(
self, rows: List[Row], column: DateColumn, filter_value: Date
) -> List[Row]:
cell_row_pairs: List[Tuple[Date, Row]] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, Date):
cell_row_pairs.append((cell_value, row))
return [row for cell_value, row in cell_row_pairs if cell_value <= filter_value]
def filter_date_equals(
self, rows: List[Row], column: DateColumn, filter_value: Date
) -> List[Row]:
cell_row_pairs: List[Tuple[Date, Row]] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, Date):
cell_row_pairs.append((cell_value, row))
return [row for cell_value, row in cell_row_pairs if cell_value == filter_value]
def filter_date_not_equals(
self, rows: List[Row], column: DateColumn, filter_value: Date
) -> List[Row]:
cell_row_pairs: List[Tuple[Date, Row]] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, Date):
cell_row_pairs.append((cell_value, row))
return [row for cell_value, row in cell_row_pairs if cell_value != filter_value]
# These two are similar to the filter methods above, but operate on strings obtained from the
# question, instead of dates or numbers. So they check for whether the string value is present
# in the cell or not, instead of using a numerical / date comparator. We only add them to the
# language if we see a string column in the table (which is basically always).
def filter_in(
self, rows: List[Row], column: StringColumn, filter_values: List[str]
) -> List[Row]:
# We accept a list of filter values instead of a single string to allow the outputs of select like
# operations to be passed in as filter values.
# Assuming filter value has underscores for spaces. The cell values also have underscores
# for spaces, so we do not need to replace them here.
# Note that if a list of filter values is passed, we only use the first one.
if not filter_values:
raise ExecutionError(f"Unexpected filter value: {filter_values}")
if isinstance(filter_values, str):
filter_value = filter_values
elif isinstance(filter_values, list):
filter_value = filter_values[0]
else:
raise ExecutionError(f"Unexpected filter value: {filter_values}")
# Also, we need to remove the "string:" that was prepended to the entity name in the language.
filter_value = filter_value.lstrip("string:")
filtered_rows: List[Row] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, str) and filter_value in cell_value:
filtered_rows.append(row)
return filtered_rows
def filter_not_in(
self, rows: List[Row], column: StringColumn, filter_values: List[str]
) -> List[Row]:
# We accept a list of filter values instead of a single string to allow the outputs of select like
# operations to be passed in as filter values.
# Assuming filter value has underscores for spaces. The cell values also have underscores
# for spaces, so we do not need to replace them here.
# Note that if a list of filter values is passed, we only use the first one.
if not filter_values:
raise ExecutionError(f"Unexpected filter value: {filter_values}")
if isinstance(filter_values, str):
filter_value = filter_values
elif isinstance(filter_values, list):
filter_value = filter_values[0]
else:
raise ExecutionError(f"Unexpected filter value: {filter_values}")
# Also, we need to remove the "string:" that was prepended to the entity name in the language.
filter_value = filter_value.lstrip("string:")
filtered_rows: List[Row] = []
for row in rows:
cell_value = row.values[column.name]
if isinstance(cell_value, str) and filter_value not in cell_value:
filtered_rows.append(row)
return filtered_rows
# These are some more date-column-specific functions, which only get added if we see a number
# column.
def max_date(self, rows: List[Row], column: DateColumn) -> Date:
"""
Takes a list of rows and a column and returns the max of the values under that column in
those rows.
"""
cell_values = [
row.values[column.name] for row in rows if row.values[column.name] is not None
]
if not cell_values:
return Date(-1, -1, -1)
if not all([isinstance(value, Date) for value in cell_values]):
raise ExecutionError(f"Invalid values for date selection function: {cell_values}")
return max(cell_values) # type: ignore
def min_date(self, rows: List[Row], column: DateColumn) -> Date:
"""
Takes a list of rows and a column and returns the min of the values under that column in
those rows.
"""
cell_values = [
row.values[column.name] for row in rows if row.values[column.name] is not None
]
if not cell_values:
return Date(-1, -1, -1)
if not all([isinstance(value, Date) for value in cell_values]):
raise ExecutionError(f"Invalid values for date selection function: {cell_values}")
return min(cell_values) # type: ignore
# These are some more number-column-specific functions, which only get added if we see a number
# column.
def max_number(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Takes a list of rows and a column and returns the max of the values under that column in
those rows.
"""
cell_values = [
row.values[column.name] for row in rows if row.values[column.name] is not None
]
if not cell_values:
return 0.0 # type: ignore
if not all([isinstance(value, Number) for value in cell_values]):
raise ExecutionError(f"Invalid values for number selection function: {cell_values}")
return max(cell_values) # type: ignore
def min_number(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Takes a list of rows and a column and returns the min of the values under that column in
those rows.
"""
cell_values = [
row.values[column.name] for row in rows if row.values[column.name] is not None
]
if not cell_values:
return 0.0 # type: ignore
if not all([isinstance(value, Number) for value in cell_values]):
raise ExecutionError(f"Invalid values for number selection function: {cell_values}")
return min(cell_values) # type: ignore
def sum(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Takes a list of rows and a column and returns the sum of the values under that column in
those rows.
"""
cell_values = [
row.values[column.name] for row in rows if row.values[column.name] is not None
]
if not cell_values:
return 0.0 # type: ignore
return sum(cell_values) # type: ignore
def average(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Takes a list of rows and a column and returns the mean of the values under that column in
those rows.
"""
cell_values = [
row.values[column.name] for row in rows if row.values[column.name] is not None
]
if not cell_values:
return 0.0 # type: ignore
return sum(cell_values) / len(cell_values) # type: ignore
def diff(self, first_row: List[Row], second_row: List[Row], column: NumberColumn) -> Number:
"""
Takes a two rows and a number column and returns the difference between the values under
that column in those two rows.
"""
if not first_row or not second_row:
return 0.0 # type: ignore
first_value = first_row[0].values[column.name]
second_value = second_row[0].values[column.name]
if isinstance(first_value, float) and isinstance(second_value, float):
return first_value - second_value # type: ignore
elif first_value is None or second_value is None:
return 0.0 # type: ignore
else:
raise ExecutionError(f"Invalid column for diff: {column.name}")
# End of language predicates. Stuff below here is for private use, helping to implement the
# functions above.
def __eq__(self, other):
if not isinstance(other, WikiTablesLanguage):
return False
return self.table_data == other.table_data
@staticmethod
def _make_date(cell_string: str) -> Date:
string_parts = cell_string.split("_")
year = -1
month = -1
day = -1
for part in string_parts:
if part.isdigit():
if len(part) == 4:
year = int(part)
else:
day = int(part)
elif part in MONTH_NUMBERS:
month = MONTH_NUMBERS[part]
return Date(year, month, day)
def _get_row_index(self, row: Row) -> int:
"""
Takes a row and returns its index in the full list of rows. If the row does not occur in the
table (which should never happen because this function will only be called with a row that
is the result of applying one or more functions on the table rows), the method returns -1.
"""
row_index = -1
for index, table_row in enumerate(self.table_data):
if table_row.values == row.values:
row_index = index
break
return row_index
def _get_most_frequent_values(self, rows: List[Row], column: Column) -> List[Any]:
value_frequencies: Dict[CellValueType, int] = defaultdict(int)
max_frequency = 0
most_frequent_list: List[CellValueType] = []
for row in rows:
cell_value = row.values[column.name]
if cell_value is not None:
value_frequencies[cell_value] += 1
frequency = value_frequencies[cell_value]
if frequency > max_frequency:
max_frequency = frequency
most_frequent_list = [cell_value]
elif frequency == max_frequency:
most_frequent_list.append(cell_value)
return most_frequent_list
| allennlp-semparse-master | allennlp_semparse/domain_languages/wikitables_language.py |
from allennlp_semparse.domain_languages.domain_language import (
DomainLanguage,
START_SYMBOL,
predicate,
predicate_with_side_args,
)
from allennlp_semparse.domain_languages.nlvr_language import NlvrLanguage
from allennlp_semparse.domain_languages.wikitables_language import WikiTablesLanguage
| allennlp-semparse-master | allennlp_semparse/domain_languages/__init__.py |
from collections import defaultdict
from typing import Callable, Dict, List, NamedTuple, Set
from allennlp.common.util import JsonDict
from allennlp_semparse.domain_languages.domain_language import DomainLanguage, predicate
class Object:
"""
``Objects`` are the geometric shapes in the NLVR domain. They have values for attributes shape,
color, x_loc, y_loc and size. We take a dict read from the JSON file and store it here, and
define a get method for getting the attribute values. We need this to be hashable because need
to make sets of ``Objects`` during execution, which get passed around between functions.
Parameters
----------
attributes : ``JsonDict``
The dict for each object from the json file.
"""
def __init__(self, attributes: JsonDict, box_id: str) -> None:
object_color = attributes["color"].lower()
# The dataset has a hex code only for blue for some reason.
if object_color.startswith("#"):
self.color = "blue"
else:
self.color = object_color
object_shape = attributes["type"].lower()
self.shape = object_shape
self.x_loc = attributes["x_loc"]
self.y_loc = attributes["y_loc"]
self.size = attributes["size"]
self._box_id = box_id
def __str__(self):
if self.size == 10:
size = "small"
elif self.size == 20:
size = "medium"
else:
size = "big"
return f"{size} {self.color} {self.shape} at ({self.x_loc}, {self.y_loc}) in {self._box_id}"
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class Box:
"""
This class represents each box containing objects in NLVR.
Parameters
----------
objects_list : ``List[JsonDict]``
List of objects in the box, as given by the json file.
box_id : ``int``
An integer identifying the box index (0, 1 or 2).
"""
def __init__(self, objects_list: List[JsonDict], box_id: int) -> None:
self._name = f"box {box_id + 1}"
self._objects_string = str([str(_object) for _object in objects_list])
self.objects = {Object(object_dict, self._name) for object_dict in objects_list}
self.colors = {obj.color for obj in self.objects}
self.shapes = {obj.shape for obj in self.objects}
def __str__(self):
return self._objects_string
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class Color(NamedTuple):
color: str
class Shape(NamedTuple):
shape: str
class NlvrLanguage(DomainLanguage):
def __init__(self, boxes: Set[Box]) -> None:
self.boxes = boxes
self.objects: Set[Object] = set()
for box in self.boxes:
self.objects.update(box.objects)
allowed_constants = {
"color_blue": Color("blue"),
"color_black": Color("black"),
"color_yellow": Color("yellow"),
"shape_triangle": Shape("triangle"),
"shape_square": Shape("square"),
"shape_circle": Shape("circle"),
"1": 1,
"2": 2,
"3": 3,
"4": 4,
"5": 5,
"6": 6,
"7": 7,
"8": 8,
"9": 9,
}
super().__init__(start_types={bool}, allowed_constants=allowed_constants)
# Mapping from terminal strings to productions that produce them.
# Eg.: "yellow" -> "<Set[Object]:Set[Object]> -> yellow"
# We use this in the agenda-related methods, and some models that use this language look at
# this field to know how many terminals to plan for.
self.terminal_productions: Dict[str, str] = {}
for name, types in self._function_types.items():
self.terminal_productions[name] = f"{types[0]} -> {name}"
# These first two methods are about getting an "agenda", which, given an input utterance,
# tries to guess what production rules should be needed in the logical form.
def get_agenda_for_sentence(self, sentence: str) -> List[str]:
"""
Given a ``sentence``, returns a list of actions the sentence triggers as an ``agenda``. The
``agenda`` can be used while by a parser to guide the decoder. sequences as possible. This
is a simplistic mapping at this point, and can be expanded.
Parameters
----------
sentence : ``str``
The sentence for which an agenda will be produced.
"""
agenda = []
sentence = sentence.lower()
if sentence.startswith("there is a box") or sentence.startswith("there is a tower "):
agenda.append(self.terminal_productions["box_exists"])
elif sentence.startswith("there is a "):
agenda.append(self.terminal_productions["object_exists"])
if "<Set[Box]:bool> -> box_exists" not in agenda:
# These are object filters and do not apply if we have a box_exists at the top.
if "touch" in sentence:
if "top" in sentence:
agenda.append(self.terminal_productions["touch_top"])
elif "bottom" in sentence or "base" in sentence:
agenda.append(self.terminal_productions["touch_bottom"])
elif "corner" in sentence:
agenda.append(self.terminal_productions["touch_corner"])
elif "right" in sentence:
agenda.append(self.terminal_productions["touch_right"])
elif "left" in sentence:
agenda.append(self.terminal_productions["touch_left"])
elif "wall" in sentence or "edge" in sentence:
agenda.append(self.terminal_productions["touch_wall"])
else:
agenda.append(self.terminal_productions["touch_object"])
else:
# The words "top" and "bottom" may be referring to top and bottom blocks in a tower.
if "top" in sentence:
agenda.append(self.terminal_productions["top"])
elif "bottom" in sentence or "base" in sentence:
agenda.append(self.terminal_productions["bottom"])
if " not " in sentence:
agenda.append(self.terminal_productions["negate_filter"])
if " contains " in sentence or " has " in sentence:
agenda.append(self.terminal_productions["all_boxes"])
# This takes care of shapes, colors, top, bottom, big, small etc.
for constant, production in self.terminal_productions.items():
# TODO(pradeep): Deal with constant names with underscores.
if "top" in constant or "bottom" in constant:
# We already dealt with top, bottom, touch_top and touch_bottom above.
continue
if constant in sentence:
if (
"<Set[Object]:Set[Object]> ->" in production
and "<Set[Box]:bool> -> box_exists" in agenda
):
if constant in ["square", "circle", "triangle"]:
agenda.append(self.terminal_productions[f"shape_{constant}"])
elif constant in ["yellow", "blue", "black"]:
agenda.append(self.terminal_productions[f"color_{constant}"])
else:
continue
else:
agenda.append(production)
# TODO (pradeep): Rules for "member_*" productions ("tower" or "box" followed by a color,
# shape or number...)
number_productions = self._get_number_productions(sentence)
for production in number_productions:
agenda.append(production)
if not agenda:
# None of the rules above was triggered!
if "box" in sentence:
agenda.append(self.terminal_productions["all_boxes"])
else:
agenda.append(self.terminal_productions["all_objects"])
return agenda
@staticmethod
def _get_number_productions(sentence: str) -> List[str]:
"""
Gathers all the numbers in the sentence, and returns productions that lead to them.
"""
# The mapping here is very simple and limited, which also shouldn't be a problem
# because numbers seem to be represented fairly regularly.
number_strings = {
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
number_productions = []
tokens = sentence.split()
numbers = number_strings.values()
for token in tokens:
if token in numbers:
number_productions.append(f"int -> {token}")
elif token in number_strings:
number_productions.append(f"int -> {number_strings[token]}")
return number_productions
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.boxes == other.boxes and self.objects == other.objects
return NotImplemented
# All methods below here are predicates in the NLVR language, or helper methods for them.
@predicate
def all_boxes(self) -> Set[Box]:
return self.boxes
@predicate
def all_objects(self) -> Set[Object]:
return self.objects
@predicate
def box_exists(self, boxes: Set[Box]) -> bool:
return len(boxes) > 0
@predicate
def object_exists(self, objects: Set[Object]) -> bool:
return len(objects) > 0
@predicate
def object_in_box(self, box: Set[Box]) -> Set[Object]:
return_set: Set[Object] = set()
for box_ in box:
return_set.update(box_.objects)
return return_set
@predicate
def black(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.color == "black"}
@predicate
def blue(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.color == "blue"}
@predicate
def yellow(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.color == "yellow"}
@predicate
def circle(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.shape == "circle"}
@predicate
def square(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.shape == "square"}
@predicate
def triangle(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.shape == "triangle"}
@predicate
def same_color(self, objects: Set[Object]) -> Set[Object]:
"""
Filters the set of objects, and returns those objects whose color is the most frequent
color in the initial set of objects, if the highest frequency is greater than 1, or an
empty set otherwise.
This is an unusual name for what the method does, but just as ``blue`` filters objects to
those that are blue, this filters objects to those that are of the same color.
"""
return self._get_objects_with_same_attribute(objects, lambda x: x.color)
@predicate
def same_shape(self, objects: Set[Object]) -> Set[Object]:
"""
Filters the set of objects, and returns those objects whose color is the most frequent
color in the initial set of objects, if the highest frequency is greater than 1, or an
empty set otherwise.
This is an unusual name for what the method does, but just as ``triangle`` filters objects
to those that are triangles, this filters objects to those that are of the same shape.
"""
return self._get_objects_with_same_attribute(objects, lambda x: x.shape)
@predicate
def touch_bottom(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.y_loc + obj.size == 100}
@predicate
def touch_left(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.x_loc == 0}
@predicate
def touch_top(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.y_loc == 0}
@predicate
def touch_right(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.x_loc + obj.size == 100}
@predicate
def touch_wall(self, objects: Set[Object]) -> Set[Object]:
return_set: Set[Object] = set()
return return_set.union(
self.touch_top(objects),
self.touch_left(objects),
self.touch_right(objects),
self.touch_bottom(objects),
)
@predicate
def touch_corner(self, objects: Set[Object]) -> Set[Object]:
return_set: Set[Object] = set()
return return_set.union(
self.touch_top(objects).intersection(self.touch_right(objects)),
self.touch_top(objects).intersection(self.touch_left(objects)),
self.touch_bottom(objects).intersection(self.touch_right(objects)),
self.touch_bottom(objects).intersection(self.touch_left(objects)),
)
@predicate
def touch_object(self, objects: Set[Object]) -> Set[Object]:
"""
Returns all objects that touch the given set of objects.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set = set()
for box, box_objects in objects_per_box.items():
candidate_objects = box.objects
for object_ in box_objects:
for candidate_object in candidate_objects:
if self._objects_touch_each_other(object_, candidate_object):
return_set.add(candidate_object)
return return_set
@predicate
def top(self, objects: Set[Object]) -> Set[Object]:
"""
Return the topmost objects (i.e. minimum y_loc). The comparison is done separately for each
box.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set: Set[Object] = set()
for _, box_objects in objects_per_box.items():
min_y_loc = min([obj.y_loc for obj in box_objects])
return_set.update({obj for obj in box_objects if obj.y_loc == min_y_loc})
return return_set
@predicate
def bottom(self, objects: Set[Object]) -> Set[Object]:
"""
Return the bottom most objects(i.e. maximum y_loc). The comparison is done separately for
each box.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set: Set[Object] = set()
for _, box_objects in objects_per_box.items():
max_y_loc = max([obj.y_loc for obj in box_objects])
return_set.update({obj for obj in box_objects if obj.y_loc == max_y_loc})
return return_set
@predicate
def above(self, objects: Set[Object]) -> Set[Object]:
"""
Returns the set of objects in the same boxes that are above the given objects. That is, if
the input is a set of two objects, one in each box, we will return a union of the objects
above the first object in the first box, and those above the second object in the second box.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set = set()
for box in objects_per_box:
# min_y_loc corresponds to the top-most object.
min_y_loc = min([obj.y_loc for obj in objects_per_box[box]])
for candidate_obj in box.objects:
if candidate_obj.y_loc < min_y_loc:
return_set.add(candidate_obj)
return return_set
@predicate
def below(self, objects: Set[Object]) -> Set[Object]:
"""
Returns the set of objects in the same boxes that are below the given objects. That is, if
the input is a set of two objects, one in each box, we will return a union of the objects
below the first object in the first box, and those below the second object in the second box.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set = set()
for box in objects_per_box:
# max_y_loc corresponds to the bottom-most object.
max_y_loc = max([obj.y_loc for obj in objects_per_box[box]])
for candidate_obj in box.objects:
if candidate_obj.y_loc > max_y_loc:
return_set.add(candidate_obj)
return return_set
@predicate
def small(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.size == 10}
@predicate
def medium(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.size == 20}
@predicate
def big(self, objects: Set[Object]) -> Set[Object]:
return {obj for obj in objects if obj.size == 30}
@predicate
def box_count_equals(self, boxes: Set[Box], count: int) -> bool:
return len(boxes) == count
@predicate
def box_count_not_equals(self, boxes: Set[Box], count: int) -> bool:
return len(boxes) != count
@predicate
def box_count_greater(self, boxes: Set[Box], count: int) -> bool:
return len(boxes) > count
@predicate
def box_count_greater_equals(self, boxes: Set[Box], count: int) -> bool:
return len(boxes) >= count
@predicate
def box_count_lesser(self, boxes: Set[Box], count: int) -> bool:
return len(boxes) < count
@predicate
def box_count_lesser_equals(self, boxes: Set[Box], count: int) -> bool:
return len(boxes) <= count
@predicate
def object_color_all_equals(self, objects: Set[Object], color: Color) -> bool:
return all([obj.color == color.color for obj in objects])
@predicate
def object_color_any_equals(self, objects: Set[Object], color: Color) -> bool:
return any([obj.color == color.color for obj in objects])
@predicate
def object_color_none_equals(self, objects: Set[Object], color: Color) -> bool:
return all([obj.color != color.color for obj in objects])
@predicate
def object_shape_all_equals(self, objects: Set[Object], shape: Shape) -> bool:
return all([obj.shape == shape.shape for obj in objects])
@predicate
def object_shape_any_equals(self, objects: Set[Object], shape: Shape) -> bool:
return any([obj.shape == shape.shape for obj in objects])
@predicate
def object_shape_none_equals(self, objects: Set[Object], shape: Shape) -> bool:
return all([obj.shape != shape.shape for obj in objects])
@predicate
def object_count_equals(self, objects: Set[Object], count: int) -> bool:
return len(objects) == count
@predicate
def object_count_not_equals(self, objects: Set[Object], count: int) -> bool:
return len(objects) != count
@predicate
def object_count_greater(self, objects: Set[Object], count: int) -> bool:
return len(objects) > count
@predicate
def object_count_greater_equals(self, objects: Set[Object], count: int) -> bool:
return len(objects) >= count
@predicate
def object_count_lesser(self, objects: Set[Object], count: int) -> bool:
return len(objects) < count
@predicate
def object_count_lesser_equals(self, objects: Set[Object], count: int) -> bool:
return len(objects) <= count
@predicate
def object_color_count_equals(self, objects: Set[Object], count: int) -> bool:
return len({obj.color for obj in objects}) == count
@predicate
def object_color_count_not_equals(self, objects: Set[Object], count: int) -> bool:
return len({obj.color for obj in objects}) != count
@predicate
def object_color_count_greater(self, objects: Set[Object], count: int) -> bool:
return len({obj.color for obj in objects}) > count
@predicate
def object_color_count_greater_equals(self, objects: Set[Object], count: int) -> bool:
return len({obj.color for obj in objects}) >= count
@predicate
def object_color_count_lesser(self, objects: Set[Object], count: int) -> bool:
return len({obj.color for obj in objects}) < count
@predicate
def object_color_count_lesser_equals(self, objects: Set[Object], count: int) -> bool:
return len({obj.color for obj in objects}) <= count
@predicate
def object_shape_count_equals(self, objects: Set[Object], count: int) -> bool:
return len({obj.shape for obj in objects}) == count
@predicate
def object_shape_count_not_equals(self, objects: Set[Object], count: int) -> bool:
return len({obj.shape for obj in objects}) != count
@predicate
def object_shape_count_greater(self, objects: Set[Object], count: int) -> bool:
return len({obj.shape for obj in objects}) > count
@predicate
def object_shape_count_greater_equals(self, objects: Set[Object], count: int) -> bool:
return len({obj.shape for obj in objects}) >= count
@predicate
def object_shape_count_lesser(self, objects: Set[Object], count: int) -> bool:
return len({obj.shape for obj in objects}) < count
@predicate
def object_shape_count_lesser_equals(self, objects: Set[Object], count: int) -> bool:
return len({obj.shape for obj in objects}) <= count
@predicate
def member_count_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.objects) == count}
@predicate
def member_count_not_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.objects) != count}
@predicate
def member_count_greater(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.objects) > count}
@predicate
def member_count_greater_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.objects) >= count}
@predicate
def member_count_lesser(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.objects) < count}
@predicate
def member_count_lesser_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.objects) <= count}
@predicate
def member_color_count_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.colors) == count}
@predicate
def member_color_count_not_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.colors) != count}
@predicate
def member_color_count_greater(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.colors) > count}
@predicate
def member_color_count_greater_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.colors) >= count}
@predicate
def member_color_count_lesser(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.colors) < count}
@predicate
def member_color_count_lesser_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.colors) <= count}
@predicate
def member_shape_count_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.shapes) == count}
@predicate
def member_shape_count_not_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.shapes) != count}
@predicate
def member_shape_count_greater(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.shapes) > count}
@predicate
def member_shape_count_greater_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.shapes) >= count}
@predicate
def member_shape_count_lesser(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.shapes) < count}
@predicate
def member_shape_count_lesser_equals(self, boxes: Set[Box], count: int) -> Set[Box]:
return {box for box in boxes if len(box.shapes) <= count}
@predicate
def member_color_all_equals(self, boxes: Set[Box], color: Color) -> Set[Box]:
return {box for box in boxes if self.object_color_all_equals(box.objects, color)}
@predicate
def member_color_any_equals(self, boxes: Set[Box], color: Color) -> Set[Box]:
return {box for box in boxes if self.object_color_any_equals(box.objects, color)}
@predicate
def member_color_none_equals(self, boxes: Set[Box], color: Color) -> Set[Box]:
return {box for box in boxes if self.object_color_none_equals(box.objects, color)}
@predicate
def member_shape_all_equals(self, boxes: Set[Box], shape: Shape) -> Set[Box]:
return {box for box in boxes if self.object_shape_all_equals(box.objects, shape)}
@predicate
def member_shape_any_equals(self, boxes: Set[Box], shape: Shape) -> Set[Box]:
return {box for box in boxes if self.object_shape_any_equals(box.objects, shape)}
@predicate
def member_shape_none_equals(self, boxes: Set[Box], shape: Shape) -> Set[Box]:
return {box for box in boxes if self.object_shape_none_equals(box.objects, shape)}
@predicate
def member_shape_same(self, boxes: Set[Box]) -> Set[Box]:
return {box for box in boxes if self.object_shape_count_equals(box.objects, 1)}
@predicate
def member_color_same(self, boxes: Set[Box]) -> Set[Box]:
return {box for box in boxes if self.object_color_count_equals(box.objects, 1)}
@predicate
def member_shape_different(self, boxes: Set[Box]) -> Set[Box]:
return {box for box in boxes if self.object_shape_count_not_equals(box.objects, 1)}
@predicate
def member_color_different(self, boxes: Set[Box]) -> Set[Box]:
return {box for box in boxes if self.object_color_count_not_equals(box.objects, 1)}
@predicate
def negate_filter(
self, filter_function: Callable[[Set[Object]], Set[Object]]
) -> Callable[[Set[Object]], Set[Object]]:
def negated_filter(objects: Set[Object]) -> Set[Object]:
return objects.difference(filter_function(objects))
return negated_filter
def _objects_touch_each_other(self, object1: Object, object2: Object) -> bool:
"""
Returns true iff the objects touch each other.
"""
in_vertical_range = (
object1.y_loc <= object2.y_loc + object2.size
and object1.y_loc + object1.size >= object2.y_loc
)
in_horizantal_range = (
object1.x_loc <= object2.x_loc + object2.size
and object1.x_loc + object1.size >= object2.x_loc
)
touch_side = (
object1.x_loc + object1.size == object2.x_loc
or object2.x_loc + object2.size == object1.x_loc
)
touch_top_or_bottom = (
object1.y_loc + object1.size == object2.y_loc
or object2.y_loc + object2.size == object1.y_loc
)
return (in_vertical_range and touch_side) or (in_horizantal_range and touch_top_or_bottom)
def _separate_objects_by_boxes(self, objects: Set[Object]) -> Dict[Box, List[Object]]:
"""
Given a set of objects, separate them by the boxes they belong to and return a dict.
"""
objects_per_box: Dict[Box, List[Object]] = defaultdict(list)
for box in self.boxes:
for object_ in objects:
if object_ in box.objects:
objects_per_box[box].append(object_)
return objects_per_box
def _get_objects_with_same_attribute(
self, objects: Set[Object], attribute_function: Callable[[Object], str]
) -> Set[Object]:
"""
Returns the set of objects for which the attribute function returns an attribute value that
is most frequent in the initial set, if the frequency is greater than 1. If not, all
objects have different attribute values, and this method returns an empty set.
"""
objects_of_attribute: Dict[str, Set[Object]] = defaultdict(set)
for entity in objects:
objects_of_attribute[attribute_function(entity)].add(entity)
if not objects_of_attribute:
return set()
most_frequent_attribute = max(
objects_of_attribute, key=lambda x: len(objects_of_attribute[x])
)
if len(objects_of_attribute[most_frequent_attribute]) <= 1:
return set()
return objects_of_attribute[most_frequent_attribute]
| allennlp-semparse-master | allennlp_semparse/domain_languages/nlvr_language.py |
from typing import Dict, List, Optional, NamedTuple
import torch
from allennlp.data.fields.field import Field
from allennlp.data.vocabulary import Vocabulary
class ProductionRule(NamedTuple):
rule: str
is_global_rule: bool
rule_id: Optional[torch.LongTensor] = None
nonterminal: Optional[str] = None
# This is just here for backward compatability.
ProductionRuleArray = ProductionRule
# mypy doesn't like that we're using a crazy data type - the data type we use here is _supposed_ to
# be in the bounds of DataArray, but ProductionRule definitely isn't. TODO(mattg): maybe we
# should find a better way to loosen those bounds, or let people extend them. E.g., we could have
# DataArray be a class, and let people subclass it, or something.
class ProductionRuleField(Field[ProductionRule]): # type: ignore
"""
This ``Field`` represents a production rule from a grammar, like "S -> [NP, VP]", "N -> John",
or "<b,c> -> [<a,<b,c>>, a]".
We assume a few things about how these rules are formatted:
- There is a left-hand side (LHS) and a right-hand side (RHS), where the LHS is always a
non-terminal, and the RHS is either a terminal, a non-terminal, or a sequence of
terminals and/or non-terminals.
- The LHS and the RHS are joined by " -> ", and this sequence of characters appears nowhere
else in the rule.
- Non-terminal sequences in the RHS are formatted as "[NT1, NT2, ...]".
- Some rules come from a global grammar used for a whole dataset, while other rules are
specific to a particular ``Instance``.
We don't make use of most of these assumptions in this class, but the code that consumes this
``Field`` relies heavily on them in some places.
If the given rule is in the global grammar, we treat the rule as a vocabulary item that will
get an index and (in the model) an embedding. If the rule is not in the global grammar, we do
not create a vocabulary item from the rule, and don't produce a tensor for the rule - we assume
the model will handle representing this rule in some other way.
Because we represent global grammar rules and instance-specific rules differently, this
``Field`` does not lend itself well to batching its arrays, even in a sequence for a single
training instance. A model using this field will have to manually batch together rule
representations after splitting apart the global rules from the ``Instance`` rules.
In a model, this will get represented as a ``ProductionRule``, which is defined above.
This is a namedtuple of ``(rule_string, is_global_rule, [rule_id], nonterminal)``, where the
``rule_id`` ``Tensor``, if present, will have shape ``(1,)``. We don't do any batching of the
``Tensors``, so this gets passed to ``Model.forward()`` as a ``List[ProductionRule]``. We
pass along the rule string because there isn't another way to recover it for instance-specific
rules that do not make it into the vocabulary.
Parameters
----------
rule : ``str``
The production rule, formatted as described above. If this field is just padding, ``rule``
will be the empty string.
is_global_rule : ``bool``
Whether this rule comes from the global grammar or is an instance-specific production rule.
vocab_namespace : ``str``, optional (default="rule_labels")
The vocabulary namespace to use for the global production rules. We use "rule_labels" by
default, because we typically do not want padding and OOV tokens for these, and ending the
namespace with "labels" means we don't get padding and OOV tokens.
nonterminal : ``str``, optional, default = None
The left hand side of the rule. Sometimes having this as separate part of the ``ProductionRule``
can deduplicate work.
"""
def __init__(
self,
rule: str,
is_global_rule: bool,
vocab_namespace: str = "rule_labels",
nonterminal: str = None,
) -> None:
self.rule = rule
self.nonterminal = nonterminal
self.is_global_rule = is_global_rule
self._vocab_namespace = vocab_namespace
self._rule_id: int = None
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self.is_global_rule:
counter[self._vocab_namespace][self.rule] += 1
def index(self, vocab: Vocabulary):
if self.is_global_rule and self._rule_id is None:
self._rule_id = vocab.get_token_index(self.rule, self._vocab_namespace)
def get_padding_lengths(self) -> Dict[str, int]:
return {}
def as_tensor(self, padding_lengths: Dict[str, int]) -> ProductionRule:
if self.is_global_rule:
tensor = torch.LongTensor([self._rule_id])
else:
tensor = None
return ProductionRule(self.rule, self.is_global_rule, tensor, self.nonterminal)
def empty_field(self):
# This _does_ get called, because we don't want to bother with modifying the ListField to
# ignore padding for these. We just make sure the rule is the empty string, which the
# model will use to know that this rule is just padding.
return ProductionRuleField(rule="", is_global_rule=False)
def batch_tensors(
self, tensor_list: List[ProductionRule]
) -> List[ProductionRule]: # type: ignore
return tensor_list
def __str__(self) -> str:
return (
f"ProductionRuleField with rule: {self.rule} (is_global_rule: "
f"{self.is_global_rule}) in namespace: '{self._vocab_namespace}'.'"
)
| allennlp-semparse-master | allennlp_semparse/fields/production_rule_field.py |
from allennlp_semparse.fields.knowledge_graph_field import KnowledgeGraphField
from allennlp_semparse.fields.production_rule_field import ProductionRuleField
| allennlp-semparse-master | allennlp_semparse/fields/__init__.py |
"""
``KnowledgeGraphField`` is a ``Field`` which stores a knowledge graph representation.
"""
from typing import Callable, Dict, List, Set
import editdistance
import torch
from allennlp.common import util
from allennlp.common.checks import ConfigurationError
from allennlp.data.fields import Field, ListField, TextField
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp.data.vocabulary import Vocabulary
from allennlp_semparse.common.knowledge_graph import KnowledgeGraph
class KnowledgeGraphField(Field[Dict[str, torch.Tensor]]):
"""
A ``KnowledgeGraphField`` represents a ``KnowledgeGraph`` as a ``Field`` that can be used in a
``Model``. For each entity in the graph, we output two things: a text representation of the
entity, handled identically to a ``TextField``, and a list of linking features for each token
in some input utterance.
The output of this field is a dictionary::
{
"text": Dict[str, torch.Tensor], # each tensor has shape (batch_size, num_entities, num_entity_tokens)
"linking": torch.Tensor # shape (batch_size, num_entities, num_utterance_tokens, num_features)
}
The ``text`` component of this dictionary is suitable to be passed into a
``TextFieldEmbedder`` (which handles the additional ``num_entities`` dimension without any
issues). The ``linking`` component of the dictionary can be used however you want to decide
which tokens in the utterance correspond to which entities in the knowledge graph.
In order to create the ``text`` component, we use the same dictionary of ``TokenIndexers``
that's used in a ``TextField`` (as we're just representing the text corresponding to each
entity). For the ``linking`` component, we use a set of hard-coded feature extractors that
operate between the text corresponding to each entity and each token in the utterance.
Parameters
----------
knowledge_graph : ``KnowledgeGraph``
The knowledge graph that this field stores.
utterance_tokens : ``List[Token]``
The tokens in some utterance that is paired with the ``KnowledgeGraph``. We compute a set
of features for linking tokens in the utterance to entities in the graph.
tokenizer : ``Tokenizer``, optional (default=``SpacyTokenizer()``)
We'll use this ``Tokenizer`` to tokenize the text representation of each entity.
token_indexers : ``Dict[str, TokenIndexer]``
Token indexers that convert entities into arrays, similar to how text tokens are treated in
a ``TextField``. These might operate on the name of the entity itself, its type, its
neighbors in the graph, etc.
feature_extractors : ``List[str]``, optional
Names of feature extractors to use for computing linking features. These must be
attributes of this object, without the first underscore. The feature extraction functions
are listed as the last methods in this class. For example, to use
:func:`_exact_token_match`, you would pass the string ``exact_token_match``. We will add
an underscore and look for a function matching that name. If this list is omitted, we will
use all available feature functions.
entity_tokens : ``List[List[Token]]``, optional
If you have pre-computed the tokenization of the table text, you can pass it in here. The
must be a list of the tokens in the entity text, for each entity in the knowledge graph, in
the same order in which the knowledge graph returns entities.
linking_features : ``List[List[List[float]]]``, optional
If you have pre-computed the linking features between the utterance and the table text, you
can pass it in here.
include_in_vocab : ``bool``, optional (default=True)
If this is ``False``, we will skip the ``count_vocab_items`` logic, leaving out all table
entity text from the vocabulary computation. You might want to do this if you have a lot
of rare entities in your tables, and you see the same table in multiple training instances,
so your vocabulary counts get skewed and include too many rare entities.
max_table_tokens : ``int``, optional
If given, we will only keep this number of total table tokens. This bounds the memory
usage of the table representations, truncating cells with really long text. We specify a
total number of tokens, not a max cell text length, because the number of table entities
varies.
"""
def __init__(
self,
knowledge_graph: KnowledgeGraph,
utterance_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
tokenizer: Tokenizer = None,
feature_extractors: List[str] = None,
entity_tokens: List[List[Token]] = None,
linking_features: List[List[List[float]]] = None,
include_in_vocab: bool = True,
max_table_tokens: int = None,
) -> None:
self.knowledge_graph = knowledge_graph
self._tokenizer = tokenizer or SpacyTokenizer(pos_tags=True)
self._token_indexers = token_indexers
if not entity_tokens:
entity_texts = [
knowledge_graph.entity_text[entity].lower() for entity in knowledge_graph.entities
]
# TODO(mattg): Because we do tagging on each of these entities in addition to just
# tokenizations, this is quite slow, and about half of our data processing time just
# goes to this (~15 minutes when there are 7k instances). The reason we do tagging is
# so that we can add lemma features. If we can remove the need for lemma / other
# hand-written features, like with a CNN, we can cut down our data processing time by a
# factor of 2.
self.entity_texts = self._tokenizer.batch_tokenize(entity_texts)
else:
self.entity_texts = entity_tokens
entity_text_fields = []
max_entity_tokens = None
if max_table_tokens:
num_entities = len(self.entity_texts)
num_entity_tokens = max(len(entity_text) for entity_text in self.entity_texts)
# This truncates the number of entity tokens used, enabling larger tables (either in
# the number of entities in the table, or the number of tokens per entity) to fit in
# memory, particularly when using ELMo.
if num_entities * num_entity_tokens > max_table_tokens:
max_entity_tokens = int(max_table_tokens / num_entities)
for entity_text in self.entity_texts:
if max_entity_tokens:
entity_text = entity_text[:max_entity_tokens]
entity_text_fields.append(TextField(entity_text, token_indexers))
if self.entity_texts:
self._entity_text_field = ListField(entity_text_fields)
else:
empty_text_field = TextField([], self._token_indexers).empty_field()
self._entity_text_field = ListField([empty_text_field]).empty_field()
self.utterance_tokens = utterance_tokens
self._include_in_vocab = include_in_vocab
feature_extractors = (
feature_extractors
if feature_extractors is not None
else [
"number_token_match",
"exact_token_match",
"contains_exact_token_match",
"lemma_match",
"contains_lemma_match",
"edit_distance",
"related_column",
"related_column_lemma",
"span_overlap_fraction",
"span_lemma_overlap_fraction",
]
)
self._feature_extractors: List[
Callable[[str, List[Token], Token, int, List[Token]], float]
] = []
for feature_extractor_name in feature_extractors:
extractor = getattr(self, "_" + feature_extractor_name, None)
if not extractor:
raise ConfigurationError(
f"Invalid feature extractor name: {feature_extractor_name}"
)
self._feature_extractors.append(extractor)
if not linking_features:
# For quicker lookups in our feature functions, we'll additionally store some
# dictionaries that map entity strings to useful information about the entity.
self._entity_text_map: Dict[str, List[Token]] = {}
for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):
self._entity_text_map[entity] = entity_text
self._entity_text_exact_text: Dict[str, Set[str]] = {}
for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):
self._entity_text_exact_text[entity] = set(e.text for e in entity_text)
self._entity_text_lemmas: Dict[str, Set[str]] = {}
for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):
self._entity_text_lemmas[entity] = set(e.lemma_ for e in entity_text)
self.linking_features = self._compute_linking_features()
else:
self.linking_features = linking_features
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self._include_in_vocab:
self._entity_text_field.count_vocab_items(counter)
def index(self, vocab: Vocabulary):
self._entity_text_field.index(vocab)
def __len__(self) -> int:
return len(self.utterance_tokens)
def get_padding_lengths(self) -> Dict[str, int]:
padding_lengths = {
"num_entities": len(self.entity_texts),
"num_utterance_tokens": len(self.utterance_tokens),
}
padding_lengths.update(self._entity_text_field.get_padding_lengths())
return padding_lengths
def as_tensor(self, padding_lengths: Dict[str, int]) -> Dict[str, torch.Tensor]:
text_tensors = self._entity_text_field.as_tensor(padding_lengths)
padded_linking_features = util.pad_sequence_to_length(
self.linking_features, padding_lengths["num_entities"], default_value=lambda: []
)
padded_linking_arrays = []
def default_feature_value():
return [0.0] * len(self._feature_extractors)
for linking_features in padded_linking_features:
padded_features = util.pad_sequence_to_length(
linking_features,
padding_lengths["num_utterance_tokens"],
default_value=default_feature_value,
)
padded_linking_arrays.append(padded_features)
linking_features_tensor = torch.FloatTensor(padded_linking_arrays)
return {"text": text_tensors, "linking": linking_features_tensor}
def _compute_linking_features(self) -> List[List[List[float]]]:
linking_features = []
for entity, entity_text in zip(self.knowledge_graph.entities, self.entity_texts):
entity_features = []
for token_index, token in enumerate(self.utterance_tokens):
token_features = []
for feature_extractor in self._feature_extractors:
token_features.append(
feature_extractor(
entity, entity_text, token, token_index, self.utterance_tokens
)
)
entity_features.append(token_features)
linking_features.append(entity_features)
return linking_features
def empty_field(self) -> "KnowledgeGraphField":
return KnowledgeGraphField(KnowledgeGraph(set(), {}), [], self._token_indexers)
def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
text_tensors = [tensor["text"] for tensor in tensor_list]
batched_text = self._entity_text_field.batch_tensors(text_tensors)
batched_linking = torch.stack([tensor["linking"] for tensor in tensor_list])
return {"text": batched_text, "linking": batched_linking}
# Below here we have feature extractor functions. To keep a consistent API for easy logic
# above, some of these functions have unused arguments.
# These feature extractors are generally pretty specific to the logical form language and
# problem setting in WikiTableQuestions. This whole notion of feature extraction should
# eventually be made more general (or just removed, if we can replace it with CNN features...).
# For the feature functions used in the original parser written in PNP, see here:
# https://github.com/allenai/pnp/blob/wikitables2/src/main/scala/org/allenai/wikitables/SemanticParserFeatureGenerator.scala
# One notable difference between how the features work here and how they worked in PNP is that
# we're using the table text when computing string matches, while PNP used the _entity name_.
# It turns out that the entity name is derived from the table text, so this should be roughly
# equivalent, except in the case of some numbers. If there are cells with different text that
# normalize to the same name, you could get `_2` or similar appended to the name, so the way we
# do it here should just be better. But it's a possible minor source of variation from the
# original parser.
# Another difference between these features and the PNP features is that the span overlap used
# a weighting scheme to downweight matches on frequent words (like "the"), and the lemma
# overlap feature value was calculated a little differently. I'm guessing that doesn't make a
# huge difference...
def _number_token_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
# PNP had a "spanFeatures" function that said whether an entity was a-priori known to link
# to a token or set of tokens in the question. This was only used for numbers, and it's
# not totally clear to me how this number feature overlapped with the token match features
# in the original implementation (I think in most cases it was the same, except for things
# like "four million", because the token match is derived from the entity name, which would
# be 4000000, and wouldn't match "four million").
#
# Our implementation basically just adds a duplicate token match feature that's specific to
# numbers. It'll break in some rare cases (e.g., "Which four had four million ..."), but
# those shouldn't be a big deal.
if ":" in entity:
# This check works because numbers are the only entities that don't contain ":". All
# others in both WikiTables languages do (e.g.: fb:row.row.column_name,
# date_column:year, string:usl_a_league etc.).
return 0.0
return self._contains_exact_token_match(entity, entity_text, token, token_index, tokens)
def _exact_token_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
if len(entity_text) != 1:
return 0.0
return self._contains_exact_token_match(entity, entity_text, token, token_index, tokens)
def _contains_exact_token_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
if token.text in self._entity_text_exact_text[entity]:
return 1.0
return 0.0
def _lemma_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
if len(entity_text) != 1:
return 0.0
return self._contains_lemma_match(entity, entity_text, token, token_index, tokens)
def _contains_lemma_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
if token.text in self._entity_text_exact_text[entity]:
return 1.0
if token.lemma_ in self._entity_text_lemmas[entity]:
return 1.0
return 0.0
def _edit_distance(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
edit_distance = float(editdistance.eval(" ".join(e.text for e in entity_text), token.text))
return 1.0 - edit_distance / len(token.text)
def _related_column(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
# Check if the entity is a column name in one of the two WikiTables languages.
if not entity.startswith("fb:row.row") and "_column:" not in entity:
return 0.0
for neighbor in self.knowledge_graph.neighbors[entity]:
if token.text in self._entity_text_exact_text[neighbor]:
return 1.0
return 0.0
def _related_column_lemma(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
# Check if the entity is a column name in one of the two WikiTables languages.
if not entity.startswith("fb:row.row") and "_column:" not in entity:
return 0.0
for neighbor in self.knowledge_graph.neighbors[entity]:
if token.text in self._entity_text_exact_text[neighbor]:
return 1.0
if token.lemma_ in self._entity_text_lemmas[neighbor]:
return 1.0
return 0.0
def _span_overlap_fraction(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
entity_words = set(entity_token.text for entity_token in entity_text)
if not entity_words:
# Some tables have empty cells.
return 0
seen_entity_words = set()
token_index_left = token_index
while token_index < len(tokens) and tokens[token_index].text in entity_words:
seen_entity_words.add(tokens[token_index].text)
token_index += 1
while token_index_left >= 0 and tokens[token_index_left].text in entity_words:
seen_entity_words.add(tokens[token_index_left].text)
token_index_left -= 1
return len(seen_entity_words) / len(entity_words)
def _span_lemma_overlap_fraction(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
entity_lemmas = set(entity_token.lemma_ for entity_token in entity_text)
if not entity_lemmas:
# Some tables have empty cells.
return 0
seen_entity_lemmas = set()
token_index_left = token_index
while token_index < len(tokens) and tokens[token_index].lemma_ in entity_lemmas:
seen_entity_lemmas.add(tokens[token_index].lemma_)
token_index += 1
while token_index_left >= 0 and tokens[token_index_left].lemma_ in entity_lemmas:
seen_entity_lemmas.add(tokens[token_index_left].lemma_)
token_index_left -= 1
return len(seen_entity_lemmas) / len(entity_lemmas)
| allennlp-semparse-master | allennlp_semparse/fields/knowledge_graph_field.py |
allennlp-semparse-master | allennlp_semparse/nltk_languages/__init__.py |
|
allennlp-semparse-master | allennlp_semparse/nltk_languages/contexts/__init__.py |
|
from typing import List, Dict, Set, Tuple
from collections import defaultdict
import logging
import re
from nltk import Tree
from nltk.sem.logic import ApplicationExpression, Expression, LambdaExpression, BasicType, Type
from allennlp_semparse.common import util
from allennlp_semparse.common.errors import ParsingError
from allennlp_semparse.domain_languages.domain_language import nltk_tree_to_logical_form
from allennlp_semparse.nltk_languages.type_declarations import type_declaration as types
logger = logging.getLogger(__name__)
class World:
"""
Base class for defining a world in a new domain. This class defines a method to translate a
logical form as per a naming convention that works with NLTK's ``LogicParser``. The sub-classes
can decide on the convention by overriding the ``_map_name`` method that does token level
mapping. This class also defines methods for transforming logical form strings into parsed
``Expressions``, and ``Expressions`` into action sequences.
Parameters
----------
constant_type_prefixes : ``Dict[str, BasicType]`` (optional)
If you have an unbounded number of constants in your domain, you are required to add
prefixes to their names to denote their types. This is the mapping from prefixes to types.
global_type_signatures : ``Dict[str, Type]`` (optional)
A mapping from translated names to their types.
global_name_mapping : ``Dict[str, str]`` (optional)
A name mapping from the original names in the domain to the translated names.
num_nested_lambdas : ``int`` (optional)
Does the language used in this ``World`` permit lambda expressions? And if so, how many
nested lambdas do we need to worry about? This is important when considering the space of
all possible actions, which we need to enumerate a priori for the parser.
"""
def __init__(
self,
constant_type_prefixes: Dict[str, BasicType] = None,
global_type_signatures: Dict[str, Type] = None,
global_name_mapping: Dict[str, str] = None,
num_nested_lambdas: int = 0,
) -> None:
# NLTK has a naming convention for variable types. If the world has predicate or entity names beyond
# what's defined in the COMMON_NAME_MAPPING, they need to be added to this dict.
# We initialize this dict with common predicate names and update it as we process logical forms.
self.local_name_mapping: Dict[str, str] = {}
# Similarly, these are the type signatures not in the COMMON_TYPE_SIGNATURE.
self.local_type_signatures: Dict[str, Type] = {}
self.global_name_mapping = global_name_mapping or {}
self.global_type_signatures = global_type_signatures or {}
# We keep a reverse map as well to put the terminals back in action sequences.
self.reverse_name_mapping = {
mapped_name: name for name, mapped_name in self.global_name_mapping.items()
}
type_prefixes = constant_type_prefixes or {}
self._num_nested_lambdas = num_nested_lambdas
if num_nested_lambdas > 3:
raise NotImplementedError(
"For ease of implementation, we currently only handle at "
"most three nested lambda expressions"
)
self._lambda_variables = set(["x", "y", "z"][:num_nested_lambdas])
self._logic_parser = types.DynamicTypeLogicParser(
constant_type_prefixes=type_prefixes, type_signatures=self.global_type_signatures
)
self._right_side_indexed_actions: Dict[str, List[Tuple[str, str]]] = None
# Caching this to avoid recompting it every time `get_valid_actions` is called.
self._valid_actions: Dict[str, List[str]] = None
# Caching this to avoid recompting it every time `get_multi_match_mapping` is called.
self._multi_match_mapping: Dict[Type, List[Type]] = None
def get_name_mapping(self) -> Dict[str, str]:
# Python 3.5 syntax for merging two dictionaries.
return {**self.global_name_mapping, **self.local_name_mapping}
def get_type_signatures(self) -> Dict[str, str]:
# Python 3.5 syntax for merging two dictionaries.
return {**self.global_type_signatures, **self.local_type_signatures}
def is_terminal(self, symbol: str) -> bool:
"""
This function will be called on nodes of a logical form tree, which are either non-terminal
symbols that can be expanded or terminal symbols that must be leaf nodes. Returns ``True``
if the given symbol is a terminal symbol.
"""
# We special-case 'lambda' here because it behaves weirdly in action sequences.
return (
symbol in self.global_name_mapping
or symbol in self.local_name_mapping
or "lambda" in symbol
)
def get_valid_actions(self) -> Dict[str, List[str]]:
if not self._valid_actions:
multi_match_mapping = self.get_multi_match_mapping()
self._valid_actions = types.get_valid_actions(
self.get_name_mapping(),
self.get_type_signatures(),
self.get_basic_types(),
valid_starting_types=self.get_valid_starting_types(),
num_nested_lambdas=self._num_nested_lambdas,
multi_match_mapping=multi_match_mapping,
)
return self._valid_actions
def get_paths_to_root(
self, action: str, max_path_length: int = 20, beam_size: int = 30, max_num_paths: int = 10
) -> List[List[str]]:
"""
For a given action, returns at most ``max_num_paths`` paths to the root (production with
``START_SYMBOL``) that are not longer than ``max_path_length``.
"""
action_left_side, _ = action.split(" -> ")
right_side_indexed_actions = self._get_right_side_indexed_actions()
lists_to_expand: List[Tuple[str, List[str]]] = [(action_left_side, [action])]
completed_paths = []
while lists_to_expand:
need_to_expand = False
for left_side, path in lists_to_expand:
if left_side == types.START_SYMBOL:
completed_paths.append(path)
else:
need_to_expand = True
if not need_to_expand or len(completed_paths) >= max_num_paths:
break
# We keep track of finished and unfinished lists separately because we truncate the beam
# later, and we want the finished lists to be at the top of the beam.
finished_new_lists = []
unfinished_new_lists = []
for left_side, actions in lists_to_expand:
for next_left_side, next_action in right_side_indexed_actions[left_side]:
if next_action in actions:
# Ignoring paths with loops (of size 1)
continue
new_actions = list(actions)
new_actions.append(next_action)
# Ignoring lists that are too long, and have too many repetitions.
path_length = len(new_actions)
if path_length <= max_path_length or next_left_side == types.START_SYMBOL:
if next_left_side == types.START_SYMBOL:
finished_new_lists.append((next_left_side, new_actions))
else:
unfinished_new_lists.append((next_left_side, new_actions))
new_lists = finished_new_lists + unfinished_new_lists
lists_to_expand = new_lists[:beam_size]
return completed_paths[:max_num_paths]
def all_possible_actions(self) -> List[str]:
all_actions = set()
for action_set in self.get_valid_actions().values():
all_actions.update(action_set)
for i in range(self._num_nested_lambdas):
lambda_var = chr(ord("x") + i)
for basic_type in self.get_basic_types():
production = f"{basic_type} -> {lambda_var}"
all_actions.add(production)
return sorted(all_actions)
def _get_curried_functions(self) -> Dict[str, int]:
raise NotImplementedError()
def _get_right_side_indexed_actions(self):
if not self._right_side_indexed_actions:
self._right_side_indexed_actions = defaultdict(list)
all_actions = self.all_possible_actions()
for possible_action in all_actions:
left_side, right_side = possible_action.split(" -> ")
if "[" not in right_side:
self._right_side_indexed_actions[right_side].append(
(left_side, possible_action)
)
else:
right_side_parts = right_side[1:-1].split(", ")
for right_side_part in right_side_parts:
self._right_side_indexed_actions[right_side_part].append(
(left_side, possible_action)
)
return self._right_side_indexed_actions
def get_basic_types(self) -> Set[Type]:
"""
Returns the set of basic types (types of entities) in the world.
"""
raise NotImplementedError
def get_valid_starting_types(self) -> Set[Type]:
"""
Returns the set of all types t, such that actions ``{START_SYMBOL} -> t`` are valid. In other
words, these are all the possible types of complete logical forms in this world.
"""
raise NotImplementedError
def get_multi_match_mapping(self) -> Dict[Type, List[Type]]:
"""
Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it
matches.
"""
if self._multi_match_mapping is None:
self._multi_match_mapping = {}
basic_types = self.get_basic_types()
for basic_type in basic_types:
if isinstance(basic_type, types.MultiMatchNamedBasicType):
matched_types: List[str] = []
# We need to check if each type in the `types_to_match` field for the given
# MultiMatchNamedBasic type is itself in the set of basic types allowed in this
# world, and add it to the mapping only if it is. Some basic types that the
# multi match type can match with may be diallowed in the world due to the
# instance-specific context.
for type_ in basic_type.types_to_match:
if type_ in basic_types:
matched_types.append(type_)
self._multi_match_mapping[basic_type] = matched_types
return self._multi_match_mapping
def parse_logical_form(self, logical_form: str, remove_var_function: bool = True) -> Expression:
"""
Takes a logical form as a string, maps its tokens using the mapping and returns a parsed expression.
Parameters
----------
logical_form : ``str``
Logical form to parse
remove_var_function : ``bool`` (optional)
``var`` is a special function that some languages use within lambda functions to
indicate the usage of a variable. If your language uses it, and you do not want to
include it in the parsed expression, set this flag. You may want to do this if you are
generating an action sequence from this parsed expression, because it is easier to let
the decoder not produce this function due to the way constrained decoding is currently
implemented.
"""
if not logical_form.startswith("("):
logical_form = f"({logical_form})"
if remove_var_function:
# Replace "(x)" with "x"
logical_form = re.sub(r"\(([x-z])\)", r"\1", logical_form)
# Replace "(var x)" with "(x)"
logical_form = re.sub(r"\(var ([x-z])\)", r"(\1)", logical_form)
parsed_lisp = util.lisp_to_nested_expression(logical_form)
translated_string = self._process_nested_expression(parsed_lisp)
type_signature = self.local_type_signatures.copy()
type_signature.update(self.global_type_signatures)
return self._logic_parser.parse(translated_string, signature=type_signature)
def get_action_sequence(self, expression: Expression) -> List[str]:
"""
Returns the sequence of actions (as strings) that resulted in the given expression.
"""
# Starting with the type of the whole expression
return self._get_transitions(expression, [f"{types.START_TYPE} -> {expression.type}"])
def get_logical_form(self, action_sequence: List[str], add_var_function: bool = True) -> str:
"""
Takes an action sequence and constructs a logical form from it. This is useful if you want
to get a logical form from a decoded sequence of actions generated by a transition based
semantic parser.
Parameters
----------
action_sequence : ``List[str]``
The sequence of actions as strings (eg.: ``['{START_SYMBOL} -> t', 't -> <e,t>', ...]``).
add_var_function : ``bool`` (optional)
``var`` is a special function that some languages use within lambda functions to
indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``). Due to
the way constrained decoding is currently implemented, it is easier for the decoder to
not produce these functions. In that case, setting this flag adds the function in the
logical form even though it is not present in the action sequence.
"""
# Basic outline: we assume that the bracketing that we get in the RHS of each action is the
# correct bracketing for reconstructing the logical form. This is true when there is no
# currying in the action sequence. Given this assumption, we just need to construct a tree
# from the action sequence, then output all of the leaves in the tree, with brackets around
# the children of all non-terminal nodes.
remaining_actions = [action.split(" -> ") for action in action_sequence]
tree = Tree(remaining_actions[0][1], [])
try:
remaining_actions = self._construct_node_from_actions(
tree, remaining_actions[1:], add_var_function
)
except ParsingError:
logger.error("Error parsing action sequence: %s", action_sequence)
raise
if remaining_actions:
logger.error("Error parsing action sequence: %s", action_sequence)
logger.error("Remaining actions were: %s", remaining_actions)
raise ParsingError("Extra actions in action sequence")
return nltk_tree_to_logical_form(tree)
def _construct_node_from_actions(
self, current_node: Tree, remaining_actions: List[List[str]], add_var_function: bool
) -> List[List[str]]:
"""
Given a current node in the logical form tree, and a list of actions in an action sequence,
this method fills in the children of the current node from the action sequence, then
returns whatever actions are left.
For example, we could get a node with type ``c``, and an action sequence that begins with
``c -> [<r,c>, r]``. This method will add two children to the input node, consuming
actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,
recursively) and ``r`` (and all of its children, recursively). This method assumes that
action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``
appear before actions for the subtree under ``r``. If there are any actions in the action
sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be
returned.
"""
if not remaining_actions:
logger.error("No actions left to construct current node: %s", current_node)
raise ParsingError("Incomplete action sequence")
left_side, right_side = remaining_actions.pop(0)
if left_side != current_node.label():
mismatch = True
multi_match_mapping = {
str(key): [str(value) for value in values]
for key, values in self.get_multi_match_mapping().items()
}
current_label = current_node.label()
if (
current_label in multi_match_mapping
and left_side in multi_match_mapping[current_label]
):
mismatch = False
if mismatch:
logger.error("Current node: %s", current_node)
logger.error("Next action: %s -> %s", left_side, right_side)
logger.error("Remaining actions were: %s", remaining_actions)
raise ParsingError("Current node does not match next action")
if right_side[0] == "[":
# This is a non-terminal expansion, with more than one child node.
for child_type in right_side[1:-1].split(", "):
if child_type.startswith("'lambda"):
# We need to special-case the handling of lambda here, because it's handled a
# bit weirdly in the action sequence. This is stripping off the single quotes
# around something like `'lambda x'`.
child_type = child_type[1:-1]
child_node = Tree(child_type, [])
current_node.append(child_node) # you add a child to an nltk.Tree with `append`
if not self.is_terminal(child_type):
remaining_actions = self._construct_node_from_actions(
child_node, remaining_actions, add_var_function
)
elif self.is_terminal(right_side):
# The current node is a pre-terminal; we'll add a single terminal child. We need to
# check first for whether we need to add a (var _) around the terminal node, though.
if add_var_function and right_side in self._lambda_variables:
right_side = f"(var {right_side})"
if add_var_function and right_side == "var":
raise ParsingError("add_var_function was true, but action sequence already had var")
current_node.append(
Tree(right_side, [])
) # you add a child to an nltk.Tree with `append`
else:
# The only way this can happen is if you have a unary non-terminal production rule.
# That is almost certainly not what you want with this kind of grammar, so we'll crash.
# If you really do want this, open a PR with a valid use case.
raise ParsingError(
f"Found a unary production rule: {left_side} -> {right_side}. "
"Are you sure you want a unary production rule in your grammar?"
)
return remaining_actions
@classmethod
def _infer_num_arguments(cls, type_signature: str) -> int:
"""
Takes a type signature and infers the number of arguments the corresponding function takes.
Examples:
e -> 0
<r,e> -> 1
<e,<e,t>> -> 2
<b,<<b,#1>,<#1,b>>> -> 3
"""
if "<" not in type_signature:
return 0
# We need to find the return type from the signature. We do that by removing the outer most
# angular brackets and traversing the remaining substring till the angular brackets (if any)
# balance. Once we hit a comma after the angular brackets are balanced, whatever is left
# after it is the return type.
type_signature = type_signature[1:-1]
num_brackets = 0
char_index = 0
for char in type_signature:
if char == "<":
num_brackets += 1
elif char == ">":
num_brackets -= 1
elif char == ",":
if num_brackets == 0:
break
char_index += 1
return_type = type_signature[char_index + 1 :]
return 1 + cls._infer_num_arguments(return_type)
def _process_nested_expression(self, nested_expression) -> str:
"""
``nested_expression`` is the result of parsing a logical form in Lisp format.
We process it recursively and return a string in the format that NLTK's ``LogicParser``
would understand.
"""
expression_is_list = isinstance(nested_expression, list)
expression_size = len(nested_expression)
if expression_is_list and expression_size == 1 and isinstance(nested_expression[0], list):
return self._process_nested_expression(nested_expression[0])
elements_are_leaves = [isinstance(element, str) for element in nested_expression]
if all(elements_are_leaves):
mapped_names = [self._map_name(name) for name in nested_expression]
else:
mapped_names = []
for element, is_leaf in zip(nested_expression, elements_are_leaves):
if is_leaf:
mapped_names.append(self._map_name(element))
else:
mapped_names.append(self._process_nested_expression(element))
if mapped_names[0] == "\\":
# This means the predicate is lambda. NLTK wants the variable name to not be within parantheses.
# Adding parentheses after the variable.
arguments = [mapped_names[1]] + [f"({name})" for name in mapped_names[2:]]
else:
arguments = [f"({name})" for name in mapped_names[1:]]
return f'({mapped_names[0]} {" ".join(arguments)})'
def _map_name(self, name: str, keep_mapping: bool = False) -> str:
"""
Takes the name of a predicate or a constant as used by Sempre, maps it to a unique string
such that NLTK processes it appropriately. This is needed because NLTK has a naming
convention for variables:
- Function variables: Single upper case letter optionally followed by digits
- Individual variables: Single lower case letter (except e for events) optionally
followed by digits
- Constants: Everything else
Parameters
----------
name : ``str``
Token from Sempre's logical form.
keep_mapping : ``bool``, optional (default=False)
If this is ``True``, we will add the name and its mapping to our local state, so that
:func:`get_name_mapping` and :func:`get_valid_actions` know about it. You typically
want to do this when you're `initializing` the object, but you very likely don't want
to when you're parsing logical forms - getting an ill-formed logical form can then
change your state in bad ways, for instance.
"""
raise NotImplementedError
def _add_name_mapping(self, name: str, translated_name: str, name_type: Type = None):
"""
Utility method to add a name and its translation to the local name mapping, and the corresponding
signature, if available to the local type signatures. This method also updates the reverse name
mapping.
"""
self.local_name_mapping[name] = translated_name
self.reverse_name_mapping[translated_name] = name
if name_type:
self.local_type_signatures[translated_name] = name_type
def _get_transitions(self, expression: Expression, current_transitions: List[str]) -> List[str]:
# The way we handle curried functions in here is a bit of a mess, but it works. For any
# function that takes more than one argument, the NLTK Expression object will be curried,
# and so the standard "visitor" pattern used by NLTK will result in action sequences that
# are also curried. We need to detect these curried functions and uncurry them in the
# action sequence. We do that by keeping around a dictionary mapping multi-argument
# functions to the number of arguments they take. When we see a multi-argument function,
# we check to see if we're at the top-level, first instance of that function by checking
# its number of arguments with NLTK's `uncurry()` function. If it is, we output an action
# using those arguments. Otherwise, we're at an intermediate node of a curried function,
# and we squelch the action that would normally be generated.
# TODO(mattg): There might be some way of removing the need for `curried_functions` here,
# using instead the `argument_types()` function I added to `ComplexType`, but my guess is
# that it would involve needing to modify nltk, and I don't want to bother with figuring
# that out right now.
curried_functions = self._get_curried_functions()
expression_type = expression.type
try:
# ``Expression.visit()`` takes two arguments: the first one is a function applied on
# each sub-expression and the second is a combinator that is applied to the list of
# values returned from the function applications. We just want the list of all
# sub-expressions here.
sub_expressions = expression.visit(lambda x: x, lambda x: x)
transformed_types = [sub_exp.type for sub_exp in sub_expressions]
if isinstance(expression, LambdaExpression):
# If the expression is a lambda expression, the list of sub expressions does not
# include the "lambda x" term. We're adding it here so that we will see transitions
# like
# <e,d> -> [\x, d] instead of
# <e,d> -> [d]
transformed_types = ["lambda x"] + transformed_types
elif isinstance(expression, ApplicationExpression):
function, arguments = expression.uncurry()
function_type = function.type
if function_type in curried_functions:
expected_num_arguments = curried_functions[function_type]
if len(arguments) == expected_num_arguments:
# This is the initial application of a curried function. We'll use this
# node in the expression to generate the action for this function, using
# all of its arguments.
transformed_types = [function.type] + [
argument.type for argument in arguments
]
else:
# We're at an intermediate node. We'll set `transformed_types` to `None`
# to indicate that we need to squelch this action.
transformed_types = None
if transformed_types:
transition = f"{expression_type} -> {transformed_types}"
current_transitions.append(transition)
for sub_expression in sub_expressions:
self._get_transitions(sub_expression, current_transitions)
except NotImplementedError:
# This means that the expression is a leaf. We simply make a transition from its type to itself.
original_name = str(expression)
if original_name in self.reverse_name_mapping:
original_name = self.reverse_name_mapping[original_name]
transition = f"{expression_type} -> {original_name}"
current_transitions.append(transition)
return current_transitions
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| allennlp-semparse-master | allennlp_semparse/nltk_languages/worlds/world.py |
allennlp-semparse-master | allennlp_semparse/nltk_languages/worlds/__init__.py |
|
allennlp-semparse-master | allennlp_semparse/nltk_languages/type_declarations/__init__.py |
|
"""
This module defines some classes that are generally useful for defining a type system for a new
domain. We inherit the type logic in ``nltk.sem.logic`` and add some functionality on top of it
here. There are two main improvements:
1) Firstly, we allow defining multiple basic types with their own names (see ``NamedBasicType``).
2) Secondly, we allow defining function types that have placeholders in them (see
``PlaceholderType``).
We also extend NLTK's ``LogicParser`` to define a ``DynamicTypeLogicParser`` that knows how to deal
with the two improvements above.
"""
from typing import Dict, List, Optional, Set, Tuple, Union
from collections import defaultdict
import itertools
from nltk.sem.logic import (
Expression,
ApplicationExpression,
ConstantExpression,
LogicParser,
Variable,
)
from nltk.sem.logic import Type, BasicType, ComplexType as NltkComplexType, ANY_TYPE
from allennlp.common.util import START_SYMBOL
class ComplexType(NltkComplexType):
"""
In NLTK, a ``ComplexType`` is a function. These functions are curried, so if you need multiple
arguments for your function you nest ``ComplexTypes``. That currying makes things difficult
for us, and we mitigate the problems by adding ``return_type`` and ``argument_type`` functions
to ``ComplexType``.
"""
def return_type(self) -> Type:
"""
Gives the final return type for this function. If the function takes a single argument,
this is just ``self.second``. If the function takes multiple arguments and returns a basic
type, this should be the final ``.second`` after following all complex types. That is the
implementation here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method.
"""
return_type = self.second
while isinstance(return_type, ComplexType):
return_type = return_type.second
return return_type
def argument_types(self) -> List[Type]:
"""
Gives the types of all arguments to this function. For functions returning a basic type,
we grab all ``.first`` types until ``.second`` is no longer a ``ComplexType``. That logic
is implemented here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method.
"""
arguments = [self.first]
remaining_type = self.second
while isinstance(remaining_type, ComplexType):
arguments.append(remaining_type.first)
remaining_type = remaining_type.second
return arguments
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
"""
Takes a set of ``BasicTypes`` and replaces any instances of ``ANY_TYPE`` inside this
complex type with each of those basic types.
"""
substitutions = []
for first_type in substitute_any_type(self.first, basic_types):
for second_type in substitute_any_type(self.second, basic_types):
substitutions.append(self.__class__(first_type, second_type))
return substitutions
class HigherOrderType(ComplexType):
"""
A higher-order function is a ``ComplexType`` that returns functions. We just override
``return_type`` and ``argument_types`` to make sure that these types are correct.
Parameters
----------
num_arguments : ``int``
How many arguments this function takes before returning a function. We'll go through this
many levels of nested ``ComplexTypes`` before returning the final ``.second`` as our return
type.
first : ``Type``
Passed to NLTK's ComplexType.
second : ``Type``
Passed to NLTK's ComplexType.
"""
def __init__(self, num_arguments: int, first: Type, second: Type) -> None:
super().__init__(first, second)
self.num_arguments = num_arguments
def return_type(self) -> Type:
return_type = self.second
for _ in range(self.num_arguments - 1):
return_type = return_type.second
return return_type
def argument_types(self) -> List[Type]:
arguments = [self.first]
remaining_type = self.second
for _ in range(self.num_arguments - 1):
arguments.append(remaining_type.first)
remaining_type = remaining_type.second
return arguments
class NamedBasicType(BasicType):
"""
A ``BasicType`` that also takes the name of the type as an argument to its constructor. Type
resolution uses the output of ``__str__`` as well, so basic types with different
representations do not resolve against each other.
Parameters
----------
string_rep : ``str``
String representation of the type.
"""
def __init__(self, string_rep) -> None:
self._string_rep = string_rep
def __str__(self):
# TODO (pradeep): This limits the number of basic types we can have to 26. We may want to
# change this in the future if we extend to domains where we have more than 26 basic types.
if self._string_rep == START_SYMBOL:
return START_SYMBOL
else:
return self._string_rep.lower()[0]
def str(self):
return self._string_rep
class MultiMatchNamedBasicType(NamedBasicType):
"""
A ``NamedBasicType`` that matches with any type within a list of ``BasicTypes`` that it takes
as an additional argument during instantiation. We just override the ``matches`` method in
``BasicType`` to match against any of the types given by the list.
Parameters
----------
string_rep : ``str``
String representation of the type, passed to super class.
types_to_match : ``List[BasicType]``
List of types that this type should match with.
"""
def __init__(self, string_rep, types_to_match: List[BasicType]) -> None:
super().__init__(string_rep)
self.types_to_match = set(types_to_match)
def matches(self, other):
return super().matches(other) or other in self.types_to_match
class PlaceholderType(ComplexType):
"""
``PlaceholderType`` is a ``ComplexType`` that involves placeholders, and thus its type
resolution is context sensitive. This is an abstract class for all placeholder types like
reverse, and, or, argmax, etc.
Note that ANY_TYPE in NLTK's type system doesn't work like a wild card. Once the type of a
variable gets resolved to a specific type, NLTK changes the type of that variable to that
specific type. Hence, what NLTK calls "ANY_TYPE", is essentially a "yet-to-be-decided" type.
This is a problem because we may want the same variable to bind to different types within a
logical form, and using ANY_TYPE for this purpose will cause a resolution failure. For example
the count function may apply to both rows and cells in the same logical form, and making count
of type ``ComplexType(ANY_TYPE, DATE_NUM_TYPE)`` will cause a resolution error. This class lets
you define ``ComplexType`` s with placeholders that are actually wild cards.
The subclasses of this abstract class need to do three things
1) Override the property ``_signature`` to define the type signature (this is just the
signature's string representation and will not affect type inference or checking). You will see
this signature in action sequences.
2) Override ``resolve`` to resolve the type appropriately (see the docstring in ``resolve`` for
more information).
3) Override ``get_application_type`` which returns the return type when this type is applied as
a function to an argument of a specified type. For example, if you defined a reverse type by
inheriting from this class, ``get_application_type`` gets an argument of type ``<a,b>``, it
should return ``<b,a>`` .
"""
_signature: str = None
def resolve(self, other: Type) -> Optional[Type]:
"""
This method is central to type inference and checking. When a variable's type is being
checked, we compare what we know of its type against what is expected of its type by its
context. The expectation is provided as ``other``. We make sure that there are no
contradictions between this type and other, and return an updated type which may be more
specific than the original type.
For example, say this type is of the function variable F in F(cell), and we start out with
``<?, d>`` (that is, it takes any type and returns ``d`` ). Now we have already resolved
cell to be of type ``e`` . Then ``resolve`` gets called with ``other = <e, ?>`` , because
we know F is a function that took a constant of type ``e`` . When we resolve ``<e, ?>``
against ``<?, d>`` , there will not be a contradiction, because any type can be
successfully resolved against ``?`` . Finally we return ``<e, d>`` as the resolved type.
As a counter example, if we are trying to resolve ``<?, d>`` against ``<?, e>`` , the
resolution fails, and in that case, this method returns ``None`` .
Note that a successful resolution does not imply equality of types because of one of them
may be ANY_TYPE, and so in the subclasses of this type, we explicitly resolve in both
directions.
"""
raise NotImplementedError
def get_application_type(self, argument_type: Type) -> Type:
"""
This method returns the resulting type when this type is applied as a function to an argument of
the given type.
"""
raise NotImplementedError
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
"""
Placeholders mess with substitutions, so even though this method is implemented in the
superclass, we override it here with a ``NotImplementedError`` to be sure that subclasses
think about what the right thing to do here is, and do it correctly.
"""
raise NotImplementedError
def __eq__(self, other) -> bool:
return self.__class__ == other.__class__
def matches(self, other) -> bool:
# self == ANY_TYPE = True iff self.first == ANY_TYPE and self.second == ANY_TYPE.
return self == other or self == ANY_TYPE or other == ANY_TYPE
def __str__(self):
if self == ANY_TYPE:
# If the type remains unresolved, we return ? instead of its signature.
return str(ANY_TYPE)
else:
return self._signature
def str(self):
if self == ANY_TYPE:
return ANY_TYPE.str()
else:
return self._signature
__hash__ = ComplexType.__hash__
class UnaryOpType(PlaceholderType):
"""
``UnaryOpType`` is a kind of ``PlaceholderType`` that takes an argument of any type and returns
an expression of the same type. ``identity`` is an example of this kind of function. The type
signature of ``UnaryOpType`` is <#1, #1>.
Parameters
----------
allowed_substitutions : ``Set[BasicType]``, optional (default=None)
If given, this sets restrictions on the types that can be substituted. That is, say you
have a unary operation that is only permitted for numbers and dates, you can pass those in
here, and we will only consider those types when calling :func:`substitute_any_type`. If
this is ``None``, all basic types are allowed.
signature : ``str``, optional (default='<#1,#1>')
The signature of the operation is what will appear in action sequences that include this
type. The default value is suitable for functions that apply to any type. If you have a
restricted set of allowed substitutions, you likely want to change the type signature to
reflect that.
"""
def __init__(
self,
type_: BasicType = ANY_TYPE,
allowed_substitutions: Set[BasicType] = None,
signature: str = "<#1,#1>",
) -> None:
super().__init__(type_, type_)
self._allowed_substitutions = allowed_substitutions
self._signature = signature
def resolve(self, other) -> Optional[Type]:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None
other_first = other.first.resolve(other.second)
if not other_first:
return None
other_second = other.second.resolve(other_first)
if not other_second:
return None
return UnaryOpType(other_first, self._allowed_substitutions, self._signature)
def get_application_type(self, argument_type: Type) -> Type:
return argument_type
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
if self.first != ANY_TYPE:
return [self]
allowed_basic_types = (
self._allowed_substitutions if self._allowed_substitutions else basic_types
)
return [
UnaryOpType(basic_type, self._allowed_substitutions, self._signature)
for basic_type in allowed_basic_types
]
class BinaryOpType(PlaceholderType):
"""
``BinaryOpType`` is a function that takes two arguments of the same type and returns an
argument of that type. ``+``, ``-``, ``and`` and ``or`` are examples of this kind of function.
The type signature of ``BinaryOpType`` is ``<#1,<#1,#1>>``.
Parameters
----------
allowed_substitutions : ``Set[BasicType]``, optional (default=None)
If given, this sets restrictions on the types that can be substituted. That is, say you
have a unary operation that is only permitted for numbers and dates, you can pass those in
here, and we will only consider those types when calling :func:`substitute_any_type`. If
this is ``None``, all basic types are allowed.
signature : ``str``, optional (default='<#1,<#1,#1>>')
The signature of the operation is what will appear in action sequences that include this
type. The default value is suitable for functions that apply to any type. If you have a
restricted set of allowed substitutions, you likely want to change the type signature to
reflect that.
"""
def __init__(
self,
type_: BasicType = ANY_TYPE,
allowed_substitutions: Set[BasicType] = None,
signature: str = "<#1,<#1,#1>>",
) -> None:
super().__init__(type_, ComplexType(type_, type_))
self._allowed_substitutions = allowed_substitutions
self._signature = signature
def resolve(self, other: Type) -> Optional[Type]:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None
if not isinstance(other.second, NltkComplexType):
return None
other_first = other.first.resolve(other.second.first)
if other_first is None:
return None
other_first = other_first.resolve(other.second.second)
if not other_first:
return None
other_second = other.second.resolve(ComplexType(other_first, other_first))
if not other_second:
return None
return BinaryOpType(other_first, self._allowed_substitutions, self._signature)
def get_application_type(self, argument_type: Type) -> Type:
return ComplexType(argument_type, argument_type)
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
if self.first != ANY_TYPE:
return [self]
allowed_basic_types = (
self._allowed_substitutions if self._allowed_substitutions else basic_types
)
return [
BinaryOpType(basic_type, self._allowed_substitutions, self._signature)
for basic_type in allowed_basic_types
]
class TypedConstantExpression(ConstantExpression):
"""
NLTK assumes all constants are of type ``EntityType`` (e) by default. We define this new class
where we can pass a default type to the constructor and use that in the ``_set_type`` method.
"""
def __init__(self, variable, default_type: Type) -> None:
super(TypedConstantExpression, self).__init__(variable)
self._default_type = default_type
def _set_type(self, other_type=ANY_TYPE, signature=None) -> None:
if other_type == ANY_TYPE:
super(TypedConstantExpression, self)._set_type(self._default_type, signature)
else:
super(TypedConstantExpression, self)._set_type(other_type, signature)
class DynamicTypeApplicationExpression(ApplicationExpression):
"""
NLTK's ``ApplicationExpression`` (which represents function applications like P(x)) has two
limitations, which we overcome by inheriting from ``ApplicationExpression`` and overriding two
methods.
Firstly, ``ApplicationExpression`` does not handle the case where P's type involves
placeholders (R, V, !=, etc.), which are special cases because their return types depend on the
type of their arguments (x). We override the property ``type`` to redefine the type of the
application.
Secondly, NLTK's variables only bind to entities, and thus the variable types are 'e' by
default. We get around this issue by replacing x with X, whose initial type is ANY_TYPE, and
later gets resolved based on the type signature of the function whose scope the variable
appears in. This variable binding operation is implemented by overriding ``_set_type`` below.
"""
def __init__(
self, function: Expression, argument: Expression, variables_with_placeholders: Set[str]
) -> None:
super(DynamicTypeApplicationExpression, self).__init__(function, argument)
self._variables_with_placeholders = variables_with_placeholders
@property
def type(self):
# This gets called when the tree is being built by ``LogicParser.parse``. So, we do not
# have access to the type signatures yet. Thus, we need to look at the name of the function
# to return the type.
if not str(self.function) in self._variables_with_placeholders:
return super(DynamicTypeApplicationExpression, self).type
if self.function.type == ANY_TYPE:
return ANY_TYPE
argument_type = self.argument.type
return self.function.type.get_application_type(argument_type)
def _set_type(self, other_type: Type = ANY_TYPE, signature=None) -> None:
"""
We override this method to do just one thing on top of ``ApplicationExpression._set_type``.
In lambda expressions of the form /x F(x), where the function is F and the argument is x,
we can use the type of F to infer the type of x. That is, if F is of type <a, b>, we can
resolve the type of x against a. We do this as the additional step after setting the type
of F(x).
So why does NLTK not already do this? NLTK assumes all variables (x) are of type entity
(e). So it does not have to resolve the type of x anymore. However, this would cause type
inference failures in our case since x can bind to rows, numbers or cells, each of which
has a different type. To deal with this issue, we made X of type ANY_TYPE. Also, LambdaDCS
(and some other languages) contain a var function that indicate the usage of variables
within lambda functions. We map var to V, and made it of type <#1, #1>. We cannot leave X
as ANY_TYPE because that would propagate up the tree. We need to set its type when we have
the information about F. Hence this method. Note that the language may or may not contain
the var function. We deal with both cases below.
"""
super(DynamicTypeApplicationExpression, self)._set_type(other_type, signature)
# TODO(pradeep): Assuming the mapping of "var" function is "V". Do something better.
if isinstance(self.argument, ApplicationExpression) and str(self.argument.function) == "V":
self.argument.argument._set_type(self.function.type.first)
if str(self.argument) == "X" and str(self.function) != "V":
self.argument._set_type(self.function.type.first)
class DynamicTypeLogicParser(LogicParser):
"""
``DynamicTypeLogicParser`` is a ``LogicParser`` that can deal with ``NamedBasicType`` and
``PlaceholderType`` appropriately. Our extension here does two things differently.
Firstly, we should handle constants of different types. We do this by passing a dict of format
``{name_prefix: type}`` to the constructor. For example, your domain has entities of types
unicorns and elves, and you have an entity "Phil" of type unicorn, and "Bob" of type "elf". The
names of the two entities should then be "unicorn:phil" and "elf:bob" respectively.
Secondly, since we defined a new kind of ``ApplicationExpression`` above, the ``LogicParser``
should be able to create this new kind of expression.
"""
def __init__(
self,
type_check: bool = True,
constant_type_prefixes: Dict[str, BasicType] = None,
type_signatures: Dict[str, Type] = None,
) -> None:
super(DynamicTypeLogicParser, self).__init__(type_check)
self._constant_type_prefixes = constant_type_prefixes or {}
self._variables_with_placeholders = {
name for name, type_ in type_signatures.items() if isinstance(type_, PlaceholderType)
}
def make_ApplicationExpression(self, function, argument):
return DynamicTypeApplicationExpression(
function, argument, self._variables_with_placeholders
)
def make_VariableExpression(self, name):
if ":" in name:
prefix = name.split(":")[0]
if prefix in self._constant_type_prefixes:
return TypedConstantExpression(Variable(name), self._constant_type_prefixes[prefix])
else:
raise RuntimeError(
f"Unknown prefix: {prefix}. Did you forget to pass it to the constructor?"
)
return super(DynamicTypeLogicParser, self).make_VariableExpression(name)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
class NameMapper:
"""
The ``LogicParser`` we use has some naming conventions for functions (i.e. they should start
with an upper case letter, and the remaining characters can only be digits). This means that we
have to internally represent functions with unintuitive names. This class will automatically
give unique names following the convention, and populate central mappings with these names. If
for some reason you need to manually define the alias, you can do so by passing an alias to
`map_name_with_signature`.
Parameters
----------
language_has_lambda : ``bool`` (optional, default=False)
If your language has lambda functions, the word "lambda" needs to be in the name mapping,
mapped to the alias "\". NLTK understands this symbol, and it doesn't need a type signature
for it. Setting this flag to True adds the mapping to `name_mapping`.
alias_prefix : ``str`` (optional, default="F")
The one letter prefix used for all aliases. You do not need to specify it if you have only
instance of this class for you language. If not, you can specify a different prefix for each
name mapping you use for your language.
"""
def __init__(self, language_has_lambda: bool = False, alias_prefix: str = "F") -> None:
self.name_mapping: Dict[str, str] = {}
if language_has_lambda:
self.name_mapping["lambda"] = "\\"
self.type_signatures: Dict[str, Type] = {}
assert len(alias_prefix) == 1 and alias_prefix.isalpha(), (
f"Invalid alias prefix: {alias_prefix}" "Needs to be a single upper case character."
)
self._alias_prefix = alias_prefix.upper()
self._name_counter = 0
def map_name_with_signature(self, name: str, signature: Type, alias: str = None) -> None:
if name in self.name_mapping:
alias = self.name_mapping[name]
old_signature = self.type_signatures[alias]
if old_signature != signature:
raise RuntimeError(
f"{name} already added with signature {old_signature}. "
f"Cannot add it again with {signature}!"
)
else:
alias = alias or f"{self._alias_prefix}{self._name_counter}"
self._name_counter += 1
self.name_mapping[name] = alias
self.type_signatures[alias] = signature
def get_alias(self, name: str) -> str:
if name not in self.name_mapping:
raise RuntimeError(f"Unmapped name: {name}")
return self.name_mapping[name]
def get_signature(self, name: str) -> Type:
alias = self.get_alias(name)
return self.type_signatures[alias]
def substitute_any_type(type_: Type, basic_types: Set[BasicType]) -> List[Type]:
"""
Takes a type and a set of basic types, and substitutes all instances of ANY_TYPE with all
possible basic types and returns a list with all possible combinations. Note that this
substitution is unconstrained. That is, If you have a type with placeholders, <#1,#1> for
example, this may substitute the placeholders with different basic types. In that case, you'd
want to use ``_substitute_placeholder_type`` instead.
"""
if type_ == ANY_TYPE:
return list(basic_types)
if isinstance(type_, BasicType):
return [type_]
# If we've made it this far, we have a ComplexType, and we can just call
# `type_.substitute_any_type()`.
return type_.substitute_any_type(basic_types)
def _make_production_string(source: Type, target: Union[List[Type], Type]) -> str:
return f"{source} -> {target}"
def _get_complex_type_production(
complex_type: ComplexType, multi_match_mapping: Dict[Type, List[Type]]
) -> List[Tuple[Type, str]]:
"""
Takes a complex type (without any placeholders), gets its return values, and returns productions
(perhaps each with multiple arguments) that produce the return values. This method also takes
care of ``MultiMatchNamedBasicTypes``. If one of the arguments or the return types is a multi
match type, it gets all the substitutions of those types from ``multi_match_mapping`` and forms
a list with all possible combinations of substitutions. If the complex type passed to this method
has no ``MultiMatchNamedBasicTypes``, the returned list will contain a single tuple. For
example, if the complex is type ``<a,<<b,c>,d>>``, and ``a`` is a multi match type that matches
``e`` and ``f``, this gives the following list of tuples: ``[('d', 'd -> [<a,<<b,c>,d>, e,
<b,c>]), ('d', 'd -> [<a,<<b,c>,d>, f, <b,c>])]`` Note that we assume there will be no
productions from the multi match type, and the list above does not contain ``('d', 'd ->
[<a,<<b,c>,d>, a, <b,c>>]')``.
"""
return_type = complex_type.return_type()
if isinstance(return_type, MultiMatchNamedBasicType):
return_types_matched = list(
multi_match_mapping[return_type]
if return_type in multi_match_mapping
else return_type.types_to_match
)
else:
return_types_matched = [return_type]
arguments = complex_type.argument_types()
argument_types_matched = []
for argument_type in arguments:
if isinstance(argument_type, MultiMatchNamedBasicType):
matched_types = list(
multi_match_mapping[argument_type]
if argument_type in multi_match_mapping
else argument_type.types_to_match
)
argument_types_matched.append(matched_types)
else:
argument_types_matched.append([argument_type])
complex_type_productions: List[Tuple[Type, str]] = []
for matched_return_type in return_types_matched:
for matched_arguments in itertools.product(*argument_types_matched):
complex_type_productions.append(
(
matched_return_type,
_make_production_string(return_type, [complex_type] + list(matched_arguments)),
)
)
return complex_type_productions
def get_valid_actions(
name_mapping: Dict[str, str],
type_signatures: Dict[str, Type],
basic_types: Set[Type],
multi_match_mapping: Dict[Type, List[Type]] = None,
valid_starting_types: Set[Type] = None,
num_nested_lambdas: int = 0,
) -> Dict[str, List[str]]:
"""
Generates all the valid actions starting from each non-terminal. For terminals of a specific
type, we simply add a production from the type to the terminal. For all terminal `functions`,
we additionally add a rule that allows their return type to be generated from an application of
the function. For example, the function ``<e,<r,<d,r>>>``, which takes three arguments and
returns an ``r`` would generate a the production rule ``r -> [<e,<r,<d,r>>>, e, r, d]``.
For functions that do not contain ANY_TYPE or placeholder types, this is straight-forward.
When there are ANY_TYPES or placeholders, we substitute the ANY_TYPE with all possible basic
types, and then produce a similar rule. For example, the identity function, with type
``<#1,#1>`` and basic types ``e`` and ``r``, would produce the rules ``e -> [<#1,#1>, e]`` and
``r -> [<#1,#1>, r]``.
We additionally add a valid action from the start symbol to all ``valid_starting_types``.
Parameters
----------
name_mapping : ``Dict[str, str]``
The mapping of names that appear in your logical form languages to their aliases for NLTK.
If you are getting all valid actions for a type declaration, this can be the
``COMMON_NAME_MAPPING``.
type_signatures : ``Dict[str, Type]``
The mapping from name aliases to their types. If you are getting all valid actions for a
type declaration, this can be the ``COMMON_TYPE_SIGNATURE``.
basic_types : ``Set[Type]``
Set of all basic types in the type declaration.
multi_match_mapping : ``Dict[Type, List[Type]]`` (optional)
A mapping from `MultiMatchNamedBasicTypes` to the types they can match. This may be
different from the type's ``types_to_match`` field based on the context. While building action
sequences that lead to complex types with ``MultiMatchNamedBasicTypes``, if a type does not
occur in this mapping, the default set of ``types_to_match`` for that type will be used.
valid_starting_types : ``Set[Type]``, optional
These are the valid starting types for your grammar; e.g., what types are we allowed to
parse expressions into? We will add a "START -> TYPE" rule for each of these types. If
this is ``None``, we default to using ``basic_types``.
num_nested_lambdas : ``int`` (optional)
Does the language used permit lambda expressions? And if so, how many nested lambdas do we
need to worry about? We'll add rules like "<r,d> -> ['lambda x', d]" for all complex
types, where the variable is determined by the number of nestings. We currently only
permit up to three levels of nesting, just for ease of implementation.
"""
valid_actions: Dict[str, Set[str]] = defaultdict(set)
valid_starting_types = valid_starting_types or basic_types
for type_ in valid_starting_types:
valid_actions[str(START_TYPE)].add(_make_production_string(START_TYPE, type_))
complex_types = set()
for name, alias in name_mapping.items():
# Lambda functions and variables associated with them get produced in specific contexts. So
# we do not add them to ``valid_actions`` here, and let ``GrammarState`` deal with it.
# ``var`` is a special function that some languages (like LambdaDCS) use within lambda
# functions to indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``)
# We do not have to produce this function outside the scope of lambda. Even within lambdas,
# it is a lot easier to not do it, and let the action sequence to logical form transformation
# logic add it to the output logical forms instead.
if name in ["lambda", "var", "x", "y", "z"]:
continue
name_type = type_signatures[alias]
# Type to terminal productions.
for substituted_type in substitute_any_type(name_type, basic_types):
valid_actions[str(substituted_type)].add(
_make_production_string(substituted_type, name)
)
# Keeping track of complex types.
if isinstance(name_type, ComplexType) and name_type != ANY_TYPE:
complex_types.add(name_type)
for complex_type in complex_types:
for substituted_type in substitute_any_type(complex_type, basic_types):
for head, production in _get_complex_type_production(
substituted_type, multi_match_mapping or {}
):
valid_actions[str(head)].add(production)
# We can produce complex types with a lambda expression, though we'll leave out
# placeholder types for now.
for i in range(num_nested_lambdas):
lambda_var = chr(ord("x") + i)
# We'll only allow lambdas to be functions that take and return basic types as their
# arguments, for now. Also, we're doing this for all possible complex types where
# the first and second types are basic types. So we may be overgenerating a bit.
for first_type in basic_types:
for second_type in basic_types:
key = ComplexType(first_type, second_type)
production_string = _make_production_string(
key, ["lambda " + lambda_var, second_type]
)
valid_actions[str(key)].add(production_string)
valid_action_strings = {key: sorted(value) for key, value in valid_actions.items()}
return valid_action_strings
START_TYPE = NamedBasicType(START_SYMBOL)
# TODO(mattg): We're hard-coding three lambda variables here. This isn't a great way to do
# this; it's just something that works for now, that we can fix later if / when it's needed.
# If you allow for more than three nested lambdas, or if you want to use different lambda
# variable names, you'll have to change this somehow.
LAMBDA_VARIABLES = set(["x", "y", "z"])
def is_nonterminal(production: str) -> bool:
# TODO(pradeep): This is pretty specific to the assumptions made in converting types to
# strings (e.g., that we're only using the first letter for types, lowercased).
# TODO(pradeep): Also we simply check the surface forms here, and this works for
# wikitables and nlvr. We should ideally let the individual type declarations define their own
# variants of this method.
if production in ["<=", "<"]:
# Some grammars (including the wikitables grammar) have "less than" and "less than or
# equal to" functions that are terminals. We don't want to treat those like our
# "<t,d>" types.
return False
if production[0] == "<":
return True
if production.startswith("fb:"):
return False
if len(production) > 1 or production in LAMBDA_VARIABLES:
return False
return production[0].islower()
| allennlp-semparse-master | allennlp_semparse/nltk_languages/type_declarations/type_declaration.py |
import json
import os
import sys
from collections import defaultdict
from typing import Dict, Any, Iterable, Tuple
import glob
import argparse
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
JsonDict = Dict[str, Any]
def process_dataset(data: JsonDict, split_type: str) -> Iterable[Tuple[str, JsonDict]]:
splits = defaultdict(list)
for example in data:
if split_type == "query_split":
example_split = example["query-split"]
splits[example_split].append(example)
else:
sentences = example.pop("sentences")
for sentence in sentences:
new_example = example.copy()
new_example["sentences"] = [sentence]
split = sentence["question-split"]
splits[split].append(new_example)
for split, examples in splits.items():
if split.isdigit():
yield ("split_" + split + ".json", examples)
else:
yield (split + ".json", examples)
def main(output_directory: int, data: str) -> None:
"""
Processes the text2sql data into the following directory structure:
``dataset/{query_split, question_split}/{train,dev,test}.json``
for datasets which have train, dev and test splits, or:
``dataset/{query_split, question_split}/{split_{split_id}}.json``
for datasets which use cross validation.
The JSON format is identical to the original datasets, apart from they
are split into separate files with respect to the split_type. This means that
for the question split, all of the sql data is duplicated for each sentence
which is bucketed together as having the same semantics.
As an example, the following blob would be put "as-is" into the query split
dataset, and split into two datasets with identical blobs for the question split,
differing only in the "sentence" key, where blob1 would end up in the train split
and blob2 would be in the dev split, with the rest of the json duplicated in each.
{
"comments": [],
"old-name": "",
"query-split": "train",
"sentences": [{blob1, "question-split": "train"}, {blob2, "question-split": "dev"}],
"sql": [],
"variables": []
},
Parameters
----------
output_directory : str, required.
The output directory.
data: str, default = None
The path to the data director of https://github.com/jkkummerfeld/text2sql-data.
"""
json_files = glob.glob(os.path.join(data, "*.json"))
for dataset in json_files:
dataset_name = os.path.basename(dataset)[:-5]
print(
f"Processing dataset: {dataset} into query and question "
f"splits at output path: {output_directory + '/' + dataset_name}"
)
full_dataset = json.load(open(dataset))
if not isinstance(full_dataset, list):
full_dataset = [full_dataset]
for split_type in ["query_split", "question_split"]:
dataset_out = os.path.join(output_directory, dataset_name, split_type)
for split, split_dataset in process_dataset(full_dataset, split_type):
dataset_out = os.path.join(output_directory, dataset_name, split_type)
os.makedirs(dataset_out, exist_ok=True)
json.dump(split_dataset, open(os.path.join(dataset_out, split), "w"), indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="process text2sql data into a more readable format."
)
parser.add_argument("--out", type=str, help="The serialization directory.")
parser.add_argument("--data", type=str, help="The path to the text2sql data directory.")
args = parser.parse_args()
main(args.out, args.data)
| allennlp-semparse-master | scripts/reformat_text2sql_data.py |
import json
import os
import sys
import argparse
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
from allennlp.data.dataset_readers.dataset_utils.text2sql_utils import process_sql_data
from allennlp.semparse.contexts.sql_context_utils import SqlVisitor, format_grammar_string
from allennlp.semparse.contexts.text2sql_table_context import GRAMMAR_DICTIONARY
from parsimonious.grammar import Grammar
# still TODO:
# JOIN, seems hard.
# Added query to pos_value - check this, very unclear if it is the correct way to handle this.
# not all functions can take * as an argument.
# Check whether LIKE can take non string arguments (example in scholar dataset)
def parse_dataset(filename: str, filter_by: str = None, verbose: bool = False):
grammar_string = format_grammar_string(GRAMMAR_DICTIONARY)
grammar = Grammar(grammar_string)
filter_by = filter_by or "13754332dvmklfdsaf-3543543"
data = json.load(open(filename))
num_queries = 0
num_parsed = 0
filtered_errors = 0
non_basic_as_aliases = 0
as_count = 0
queries_with_weird_as = 0
for i, sql_data in enumerate(process_sql_data(data)):
sql_visitor = SqlVisitor(grammar)
if any([x[:7] == "DERIVED"] for x in sql_data.sql):
# NOTE: DATA hack alert - the geography dataset doesn't alias derived tables consistently,
# so we fix the data a bit here instead of completely re-working the grammar.
sql_to_use = []
for j, token in enumerate(sql_data.sql):
if token[:7] == "DERIVED" and sql_data.sql[j - 1] == ")":
sql_to_use.append("AS")
sql_to_use.append(token)
previous_token = None
query_has_weird_as = False
for j, token in enumerate(sql_to_use[:-1]):
if token == "AS" and previous_token is not None:
table_name = sql_to_use[j + 1][:-6]
if table_name != previous_token:
non_basic_as_aliases += 1
query_has_weird_as = True
as_count += 1
previous_token = token
if query_has_weird_as:
queries_with_weird_as += 1
sql_string = " ".join(sql_to_use)
else:
sql_string = " ".join(sql_data.sql)
num_queries += 1
try:
sql_visitor.parse(sql_string)
num_parsed += 1
except Exception as e:
if filter_by in sql_string:
filtered_errors += 1
if verbose and filter_by not in sql_string:
print()
print(e)
print(" ".join(sql_data.text))
print(sql_data.sql)
try:
import sqlparse
print(sqlparse.format(sql_string, reindent=True))
except Exception:
print(sql_string)
if (i + 1) % 500 == 0:
print(f"\tProcessed {i + 1} queries.")
return (
num_parsed,
num_queries,
filtered_errors,
non_basic_as_aliases,
as_count,
queries_with_weird_as,
)
def main(
data_directory: int, dataset: str = None, filter_by: str = None, verbose: bool = False
) -> None:
"""
Parameters
----------
data_directory : str, required.
The path to the data directory of https://github.com/jkkummerfeld/text2sql-data
which has been preprocessed using scripts/reformat_text2sql_data.py.
dataset : str, optional.
The dataset to parse. By default all are parsed.
filter_by : str, optional
Compute statistics about a particular error and only print errors which don't contain this string.
verbose : bool, optional.
Whether to print information about incorrectly parsed SQL.
"""
directory_dict = {path: files for path, names, files in os.walk(data_directory) if files}
for directory, data_files in directory_dict.items():
if "query_split" in directory or (dataset is not None and dataset not in directory):
continue
print(f"Parsing dataset at {directory}")
parsed = 0
total_non_aliases = 0
total_as_count = 0
total_queries_with_weird_as = 0
total_filtered_errors = 0
total = 0
for json_file in data_files:
print(f"\tParsing split at {json_file}")
file_path = os.path.join(directory, json_file)
stats = parse_dataset(file_path, filter_by, verbose)
parsed += stats[0]
total += stats[1]
total_filtered_errors += stats[2]
total_non_aliases += stats[3]
total_as_count += stats[4]
total_queries_with_weird_as += stats[5]
print(f"\tParsed {parsed} out of {total} queries, coverage {parsed/total}")
print(
f"\tFound {total_non_aliases} out of {total_as_count} non simple AS aliases. "
f"percentage: {total_non_aliases/total_as_count}"
)
print(
f"\tFound {total_queries_with_weird_as} out of {total} queries with > 1 weird AS. "
f"percentage: {total_queries_with_weird_as/total}"
)
if filter_by is not None:
print(
f"\tOf {total - parsed} errors, {total_filtered_errors / (total - parsed + 1e-13)} "
f"contain {filter_by}"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check the coverage of a SQL Grammar on the text2sql datasets."
)
parser.add_argument("--data", type=str, help="The path to the text2sql data directory.")
parser.add_argument(
"--dataset",
type=str,
default=None,
help="The dataset to check coverage for. Defaults to all datasets.",
)
parser.add_argument("--filter", type=str, default=None, help="A string to filter by.")
parser.add_argument("--verbose", help="Verbose output.", action="store_true")
args = parser.parse_args()
main(args.data, args.dataset, args.filter, args.verbose)
| allennlp-semparse-master | scripts/examine_sql_coverage.py |
#!/usr/bin/env python3
import argparse
from typing import Dict
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("version_type", choices=["stable", "latest", "current"])
parser.add_argument("--minimal", action="store_true", default=False)
return parser.parse_args()
def post_process(version: str, minimal: bool = False):
if version.startswith("v"):
return version if not minimal else version[1:]
return version if minimal else f"v{version}"
def get_current_version() -> str:
VERSION: Dict[str, str] = {}
with open("allennlp_semparse/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
return VERSION["VERSION"]
def get_latest_version() -> str:
# Import this here so this requirements isn't mandatory when we just want to
# call `get_current_version`.
import requests
resp = requests.get("https://api.github.com/repos/allenai/allennlp-semparse/tags")
return resp.json()[0]["name"]
def get_stable_version() -> str:
import requests
resp = requests.get("https://api.github.com/repos/allenai/allennlp-semparse/releases/latest")
return resp.json()["tag_name"]
def main() -> None:
opts = parse_args()
if opts.version_type == "stable":
print(post_process(get_stable_version(), opts.minimal))
elif opts.version_type == "latest":
print(post_process(get_latest_version(), opts.minimal))
elif opts.version_type == "current":
print(post_process(get_current_version(), opts.minimal))
else:
raise NotImplementedError
if __name__ == "__main__":
main()
| allennlp-semparse-master | scripts/get_version.py |
import json
import logging
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.DEBUG
)
from allennlp.commands.train import datasets_from_params
from allennlp.common import Params
from allennlp.data import Instance
def main(params: Params, outdir: str):
os.makedirs(outdir, exist_ok=True)
params["dataset_reader"]["include_table_metadata"] = True
if "validation_dataset_reader" in params:
params["validation_dataset_reader"]["include_table_metadata"] = True
all_datasets = datasets_from_params(params)
for name, dataset in all_datasets.items():
with open(outdir + name + ".jsonl", "w") as outfile:
for instance in iter(dataset):
outfile.write(to_json_line(instance) + "\n")
def to_json_line(instance: Instance):
json_obj = {}
question_tokens = instance.fields["question"].tokens
json_obj["question_tokens"] = [
{"text": token.text, "lemma": token.lemma_} for token in question_tokens
]
json_obj["table_lines"] = instance.fields["table_metadata"].metadata
action_map = {i: action.rule for i, action in enumerate(instance.fields["actions"].field_list)}
if "target_action_sequences" in instance.fields:
targets = []
for target_sequence in instance.fields["target_action_sequences"].field_list:
targets.append([])
for target_index_field in target_sequence.field_list:
targets[-1].append(action_map[target_index_field.sequence_index])
json_obj["target_action_sequences"] = targets
json_obj["example_lisp_string"] = instance.fields["example_lisp_string"].metadata
entity_texts = []
for entity_text in instance.fields["table"].entity_texts:
tokens = [{"text": token.text, "lemma": token.lemma_} for token in entity_text]
entity_texts.append(tokens)
json_obj["entity_texts"] = entity_texts
json_obj["linking_features"] = instance.fields["table"].linking_features
return json.dumps(json_obj)
if __name__ == "__main__":
param_file = sys.argv[1]
outdir = "wikitables_preprocessed_data/"
params = Params.from_file(param_file)
main(params, outdir)
| allennlp-semparse-master | scripts/wikitables/preprocess_data.py |
#! /usr/bin/env python
import sys
import os
import argparse
import gzip
import logging
import math
from multiprocessing import Process
sys.path.insert(
0, os.path.dirname(os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
)
from allennlp.common.util import JsonDict
from allennlp.data.tokenizers import WordTokenizer
from allennlp.data.dataset_readers.semantic_parsing.wikitables import util as wikitables_util
from allennlp.semparse.contexts import TableQuestionContext
from allennlp.semparse.domain_languages import WikiTablesLanguage
from allennlp.semparse import ActionSpaceWalker
def search(
tables_directory: str,
data: JsonDict,
output_path: str,
max_path_length: int,
max_num_logical_forms: int,
use_agenda: bool,
output_separate_files: bool,
conservative_agenda: bool,
) -> None:
print(f"Starting search with {len(data)} instances", file=sys.stderr)
language_logger = logging.getLogger("allennlp.semparse.domain_languages.wikitables_language")
language_logger.setLevel(logging.ERROR)
tokenizer = WordTokenizer()
if output_separate_files and not os.path.exists(output_path):
os.makedirs(output_path)
if not output_separate_files:
output_file_pointer = open(output_path, "w")
for instance_data in data:
utterance = instance_data["question"]
question_id = instance_data["id"]
if utterance.startswith('"') and utterance.endswith('"'):
utterance = utterance[1:-1]
# For example: csv/200-csv/47.csv -> tagged/200-tagged/47.tagged
table_file = instance_data["table_filename"].replace("csv", "tagged")
target_list = instance_data["target_values"]
tokenized_question = tokenizer.tokenize(utterance)
table_file = f"{tables_directory}/{table_file}"
context = TableQuestionContext.read_from_file(table_file, tokenized_question)
world = WikiTablesLanguage(context)
walker = ActionSpaceWalker(world, max_path_length=max_path_length)
correct_logical_forms = []
if use_agenda:
agenda = world.get_agenda(conservative=conservative_agenda)
allow_partial_match = not conservative_agenda
all_logical_forms = walker.get_logical_forms_with_agenda(
agenda=agenda, max_num_logical_forms=10000, allow_partial_match=allow_partial_match
)
else:
all_logical_forms = walker.get_all_logical_forms(max_num_logical_forms=10000)
for logical_form in all_logical_forms:
if world.evaluate_logical_form(logical_form, target_list):
correct_logical_forms.append(logical_form)
if output_separate_files and correct_logical_forms:
with gzip.open(f"{output_path}/{question_id}.gz", "wt") as output_file_pointer:
for logical_form in correct_logical_forms:
print(logical_form, file=output_file_pointer)
elif not output_separate_files:
print(f"{question_id} {utterance}", file=output_file_pointer)
if use_agenda:
print(f"Agenda: {agenda}", file=output_file_pointer)
if not correct_logical_forms:
print("NO LOGICAL FORMS FOUND!", file=output_file_pointer)
for logical_form in correct_logical_forms[:max_num_logical_forms]:
print(logical_form, file=output_file_pointer)
print(file=output_file_pointer)
if not output_separate_files:
output_file_pointer.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"table_directory",
type=str,
help="Location of the 'tagged' directory in the" "WikiTableQuestions dataset",
)
parser.add_argument("data_file", type=str, help="Path to the *.examples file")
parser.add_argument(
"output_path",
type=str,
help="""Path to the output directory if
'output_separate_files' is set, or to the output file if not.""",
)
parser.add_argument(
"--max-path-length",
type=int,
dest="max_path_length",
default=10,
help="Max length to which we will search exhaustively",
)
parser.add_argument(
"--max-num-logical-forms",
type=int,
dest="max_num_logical_forms",
default=100,
help="Maximum number of logical forms returned",
)
parser.add_argument(
"--use-agenda",
dest="use_agenda",
action="store_true",
help="Use agenda while searching for logical forms",
)
parser.add_argument(
"--conservative",
action="store_true",
help="Get conservative agenda, and select logical forms with complete match.",
)
parser.add_argument(
"--output-separate-files",
dest="output_separate_files",
action="store_true",
help="""If set, the script will output gzipped
files, one per example. You may want to do this if you;re making data to
train a parser.""",
)
parser.add_argument(
"--num-splits",
dest="num_splits",
type=int,
default=0,
help="Number of splits to make of the data, to run as many processes (default 0)",
)
args = parser.parse_args()
input_data = [
wikitables_util.parse_example_line(example_line) for example_line in open(args.data_file)
]
if args.num_splits == 0 or len(input_data) <= args.num_splits or not args.output_separate_files:
search(
args.table_directory,
input_data,
args.output_path,
args.max_path_length,
args.max_num_logical_forms,
args.use_agenda,
args.output_separate_files,
args.conservative,
)
else:
chunk_size = math.ceil(len(input_data) / args.num_splits)
start_index = 0
for i in range(args.num_splits):
if i == args.num_splits - 1:
data_split = input_data[start_index:]
else:
data_split = input_data[start_index : start_index + chunk_size]
start_index += chunk_size
process = Process(
target=search,
args=(
args.table_directory,
data_split,
args.output_path,
args.max_path_length,
args.max_num_logical_forms,
args.use_agenda,
args.output_separate_files,
args.conservative,
),
)
print(f"Starting process {i}", file=sys.stderr)
process.start()
| allennlp-semparse-master | scripts/wikitables/search_for_logical_forms.py |
#! /usr/bin/env python
import sys
import os
import gzip
import argparse
sys.path.insert(
0, os.path.dirname(os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
)
from allennlp.data.dataset_readers import WikiTablesDatasetReader
from allennlp.data.dataset_readers.semantic_parsing.wikitables import util
from allennlp.models.archival import load_archive
def make_data(
input_examples_file: str,
tables_directory: str,
archived_model_file: str,
output_dir: str,
num_logical_forms: int,
) -> None:
reader = WikiTablesDatasetReader(
tables_directory=tables_directory, keep_if_no_logical_forms=True, output_agendas=True
)
dataset = reader.read(input_examples_file)
input_lines = []
with open(input_examples_file) as input_file:
input_lines = input_file.readlines()
archive = load_archive(archived_model_file)
model = archive.model
model.training = False
model._decoder_trainer._max_num_decoded_sequences = 100
for instance, example_line in zip(dataset, input_lines):
outputs = model.forward_on_instance(instance)
world = instance.fields["world"].metadata
parsed_info = util.parse_example_line(example_line)
example_id = parsed_info["id"]
target_list = parsed_info["target_values"]
logical_forms = outputs["logical_form"]
correct_logical_forms = []
for logical_form in logical_forms:
if world.evaluate_logical_form(logical_form, target_list):
correct_logical_forms.append(logical_form)
if len(correct_logical_forms) >= num_logical_forms:
break
num_found = len(correct_logical_forms)
print(f"{num_found} found for {example_id}")
if num_found == 0:
continue
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file = gzip.open(os.path.join(output_dir, f"{example_id}.gz"), "wb")
for logical_form in correct_logical_forms:
logical_form_line = (logical_form + "\n").encode("utf-8")
output_file.write(logical_form_line)
output_file.close()
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("input", type=str, help="Input file")
argparser.add_argument("tables_directory", type=str, help="Tables directory")
argparser.add_argument("archived_model", type=str, help="Archived model.tar.gz")
argparser.add_argument(
"--output-dir", type=str, dest="output_dir", help="Output directory", default="erm_output"
)
argparser.add_argument(
"--num-logical-forms",
type=int,
dest="num_logical_forms",
help="Number of logical forms to output",
default=10,
)
args = argparser.parse_args()
make_data(
args.input,
args.tables_directory,
args.archived_model,
args.output_dir,
args.num_logical_forms,
)
| allennlp-semparse-master | scripts/wikitables/generate_data_from_erm_model.py |
#! /usr/bin/env python
"""
NLVR dataset has at most four worlds corresponding to each sentence (with 93% of the sentences
appearing with four worlds), identified by the prefixes in identifiers. This script groups the
worlds and corresponding labels together to enable training a parser with this information.
"""
import json
import argparse
from collections import defaultdict
def group_dataset(input_file: str, output_file: str) -> None:
instance_groups = defaultdict(lambda: {"worlds": [], "labels": []})
for line in open(input_file):
data = json.loads(line)
# "identifier" in the original dataset looks something like 4055-3, where 4055 is common
# across all four instances with the same sentence, but different worlds, and the suffix
# differentiates among the four instances.
identifier = data["identifier"].split("-")[0]
instance_groups[identifier]["identifier"] = identifier
instance_groups[identifier]["sentence"] = data["sentence"]
instance_groups[identifier]["worlds"].append(data["structured_rep"])
instance_groups[identifier]["labels"].append(data["label"])
with open(output_file, "w") as output:
for instance_group in instance_groups.values():
json.dump(instance_group, output)
output.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_file", type=str, help="NLVR data file in json format")
parser.add_argument("output_file", type=str, help="Grouped output file in json format")
args = parser.parse_args()
group_dataset(args.input_file, args.output_file)
| allennlp-semparse-master | scripts/nlvr/group_nlvr_worlds.py |
#! /usr/bin/env python
import json
import argparse
from typing import Tuple, List
import os
import sys
sys.path.insert(
0, os.path.dirname(os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
)
from allennlp.common.util import JsonDict
from allennlp.semparse.domain_languages import NlvrLanguage
from allennlp.semparse.domain_languages.nlvr_language import Box
from allennlp.semparse import ActionSpaceWalker
def read_json_line(line: str) -> Tuple[str, str, List[JsonDict], List[str]]:
data = json.loads(line)
instance_id = data["identifier"]
sentence = data["sentence"]
if "worlds" in data:
structured_reps = data["worlds"]
label_strings = [label_str.lower() for label_str in data["labels"]]
else:
# We're reading ungrouped data.
structured_reps = [data["structured_rep"]]
label_strings = [data["label"].lower()]
return instance_id, sentence, structured_reps, label_strings
def process_data(
input_file: str,
output_file: str,
max_path_length: int,
max_num_logical_forms: int,
ignore_agenda: bool,
write_sequences: bool,
) -> None:
"""
Reads an NLVR dataset and returns a JSON representation containing sentences, labels, correct and
incorrect logical forms. The output will contain at most `max_num_logical_forms` logical forms
each in both correct and incorrect lists. The output format is:
``[{"id": str, "label": str, "sentence": str, "correct": List[str], "incorrect": List[str]}]``
"""
processed_data: JsonDict = []
# We can instantiate the ``ActionSpaceWalker`` with any world because the action space is the
# same for all the ``NlvrLanguage`` objects. It is just the execution that differs.
walker = ActionSpaceWalker(NlvrLanguage({}), max_path_length=max_path_length)
for line in open(input_file):
instance_id, sentence, structured_reps, label_strings = read_json_line(line)
worlds = []
for structured_representation in structured_reps:
boxes = {
Box(object_list, box_id)
for box_id, object_list in enumerate(structured_representation)
}
worlds.append(NlvrLanguage(boxes))
labels = [label_string == "true" for label_string in label_strings]
correct_logical_forms = []
incorrect_logical_forms = []
if ignore_agenda:
# Get 1000 shortest logical forms.
logical_forms = walker.get_all_logical_forms(max_num_logical_forms=1000)
else:
# TODO (pradeep): Assuming all worlds give the same agenda.
sentence_agenda = worlds[0].get_agenda_for_sentence(sentence)
logical_forms = walker.get_logical_forms_with_agenda(
sentence_agenda, max_num_logical_forms * 10
)
for logical_form in logical_forms:
if all([world.execute(logical_form) == label for world, label in zip(worlds, labels)]):
if len(correct_logical_forms) <= max_num_logical_forms:
correct_logical_forms.append(logical_form)
else:
if len(incorrect_logical_forms) <= max_num_logical_forms:
incorrect_logical_forms.append(logical_form)
if (
len(correct_logical_forms) >= max_num_logical_forms
and len(incorrect_logical_forms) >= max_num_logical_forms
):
break
if write_sequences:
correct_sequences = [
worlds[0].logical_form_to_action_sequence(logical_form)
for logical_form in correct_logical_forms
]
incorrect_sequences = [
worlds[0].logical_form_to_action_sequence(logical_form)
for logical_form in incorrect_logical_forms
]
processed_data.append(
{
"id": instance_id,
"sentence": sentence,
"correct_sequences": correct_sequences,
"incorrect_sequences": incorrect_sequences,
"worlds": structured_reps,
"labels": label_strings,
}
)
else:
processed_data.append(
{
"id": instance_id,
"sentence": sentence,
"correct_logical_forms": correct_logical_forms,
"incorrect_logical_forms": incorrect_logical_forms,
"worlds": structured_reps,
"labels": label_strings,
}
)
with open(output_file, "w") as outfile:
for instance_processed_data in processed_data:
json.dump(instance_processed_data, outfile)
outfile.write("\n")
outfile.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, help="NLVR data file")
parser.add_argument("output", type=str, help="Processed output")
parser.add_argument(
"--max-path-length",
type=int,
dest="max_path_length",
help="Maximum path length for logical forms",
default=12,
)
parser.add_argument(
"--max-num-logical-forms",
type=int,
dest="max_num_logical_forms",
help="Maximum number of logical forms per denotation, per question",
default=20,
)
parser.add_argument(
"--ignore-agenda",
dest="ignore_agenda",
help="Should we ignore the "
"agenda and use consistency as the only signal to get logical forms?",
action="store_true",
)
parser.add_argument(
"--write-action-sequences",
dest="write_sequences",
help="If this "
"flag is set, action sequences instead of logical forms will be written "
"to the json file. This will avoid having to parse the logical forms again "
"in the NlvrDatasetReader.",
action="store_true",
)
args = parser.parse_args()
process_data(
args.input,
args.output,
args.max_path_length,
args.max_num_logical_forms,
args.ignore_agenda,
args.write_sequences,
)
| allennlp-semparse-master | scripts/nlvr/get_nlvr_logical_forms.py |
#! /usr/bin/env python
import sys
import os
import json
import argparse
sys.path.insert(
0, os.path.dirname(os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
)
from allennlp.data.dataset_readers import NlvrDatasetReader
from allennlp.models import NlvrCoverageSemanticParser
from allennlp.models.archival import load_archive
from allennlp.semparse.worlds import NlvrWorld
def make_data(
input_file: str, output_file: str, archived_model_file: str, max_num_decoded_sequences: int
) -> None:
reader = NlvrDatasetReader(output_agendas=True)
model = load_archive(archived_model_file).model
if not isinstance(model, NlvrCoverageSemanticParser):
model_type = type(model)
raise RuntimeError(
f"Expected an archived NlvrCoverageSemanticParser, but found {model_type} instead"
)
# Tweaking the decoder trainer to coerce the it to generate a k-best list. Setting k to 100
# here, so that we can filter out the inconsistent ones later.
model._decoder_trainer._max_num_decoded_sequences = 100
num_outputs = 0
num_sentences = 0
with open(output_file, "w") as outfile:
for line in open(input_file):
num_sentences += 1
input_data = json.loads(line)
sentence = input_data["sentence"]
structured_representations = input_data["worlds"]
labels = input_data["labels"]
instance = reader.text_to_instance(sentence, structured_representations)
outputs = model.forward_on_instance(instance)
action_strings = outputs["best_action_strings"]
logical_forms = outputs["logical_form"]
correct_sequences = []
# Checking for consistency
worlds = [NlvrWorld(structure) for structure in structured_representations]
for sequence, logical_form in zip(action_strings, logical_forms):
denotations = [world.execute(logical_form) for world in worlds]
denotations_are_correct = [
label.lower() == str(denotation).lower()
for label, denotation in zip(labels, denotations)
]
if all(denotations_are_correct):
correct_sequences.append(sequence)
correct_sequences = correct_sequences[:max_num_decoded_sequences]
if not correct_sequences:
continue
output_data = {
"id": input_data["identifier"],
"sentence": sentence,
"correct_sequences": correct_sequences,
"worlds": structured_representations,
"labels": input_data["labels"],
}
json.dump(output_data, outfile)
outfile.write("\n")
num_outputs += 1
outfile.close()
sys.stderr.write(f"{num_outputs} out of {num_sentences} sentences have outputs.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, help="Input data file")
parser.add_argument("output", type=str, help="Output data file")
parser.add_argument(
"archived_model", type=str, help="Path to archived model.tar.gz to use for decoding"
)
parser.add_argument(
"--max-num-sequences",
type=int,
dest="max_num_sequences",
help="Maximum number of sequences per instance to output",
default=20,
)
args = parser.parse_args()
make_data(args.input, args.output, args.archived_model, args.max_num_sequences)
| allennlp-semparse-master | scripts/nlvr/generate_data_from_erm_model.py |
import torch
from transformers import BertTokenizer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, AdamW, BertConfig, BertModel, AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased')
| covid-sim-master | demo-env/download_bert_model.py |
# -*- coding: utf-8 -*-
import requests
import time
import math
import signal
def is_ok(url: str) -> bool:
"""
Returns True if the provided URL responds with a 2XX when fetched via
a HTTP GET request.
"""
try:
resp = requests.get(url)
except:
return False
return True if math.floor(resp.status_code / 100) == 2 else False
def scan():
"""
Broadcasts the availability of the proxy's HTTP server once both the
API and UI are ready for traffic.
This script exists solely to ease confusion locally, as both Flask and
the HTTP server bundled with `create-react-app` output logs telling the
user about the ports they're bound to (even though they're inaccessible).
"""
print("")
print("⚓️ Ahoy!")
print("")
print(
"Your application is starting and will be available at " +
"http://localhost:8080 when it's ready."
)
print("")
# If someone tries to cancel the `docker-compose up` invocation, docker
# will send a SIGTERM to the program. We need to handle this and set a
# value that allows the loop to be broken.
term = False
def handle_interrupt(signal_number, stack_frame):
global term
term = True
signal.signal(signal.SIGTERM, handle_interrupt)
last_check = time.perf_counter()
is_api_live = False
while (not is_api_live):
if term is True:
break
# We don't use `time.sleep()`, as that'd prevent us from being able
# to break the loop quickly in the event of a SIGTERM.
now = time.perf_counter()
if (now - last_check >= 5):
last_check = now
if not is_api_live:
is_api_live = is_ok("http://api:8000")
if is_api_live:
print("")
print("✨ Your local environment is ready:")
print("")
print(" http://localhost:8080")
print("")
print("⛵️ Smooth sailing!")
print("")
if __name__ == "__main__":
scan()
| covid-sim-master | sonar/ping.py |
import torch
from transformers import BertTokenizer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, AdamW, BertConfig, BertModel, AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased')
| covid-sim-master | api/download_bert_model.py |
import argparse
import faiss
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
import subprocess
import tqdm
import pickle
def file_len(fname):
p = subprocess.Popen(['wc', '-l', fname], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result, err = p.communicate()
if p.returncode != 0:
raise IOError(err)
return int(result.strip().split()[0])
def do_pca(num_vecs = 1e6, fname = "output.jsonl", variance = 0.98):
vecs = []
with open(fname, "r", encoding = "utf-8") as f:
for i,line in enumerate(f):
data_dict = eval(line)
data_dict["vec"] = np.array([float(x) for x in data_dict["vec"].split(" ")]).astype("float32")
vecs.append(data_dict["vec"])
if i > num_vecs: break
vecs = np.array(vecs)
print("Fitting PCA to variance {}...".format(variance))
pca = PCA(n_components = 0.98)
vecs_pca = pca.fit_transform(vecs)
print("Done. Number of dimensions: {}".format(vecs_pca.shape[1]))
return pca
def add_to_index(index, vecs, pca, cosine = False):
vecs = np.array(vecs).astype("float32")
vecs = pca.transform(vecs)
if cosine:
vecs /= np.linalg.norm(vecs, axis = 1, keepdims = True)
index.add(np.ascontiguousarray(vecs))
def index_vectors(similarity_type, fitted_pca, fname):
dims = fitted_pca.components_.shape[0]
if similarity_type == "cosine" or similarity_type == "dot_product":
index = faiss.IndexFlatIP(dims)
elif similarity_type == "l2":
index = faiss.IndexFlatL2(dims)
else:
raise Exception("Unsupported metric.")
vecs = []
print("Loading vecs & indexing...")
with open(fname, "r", encoding = "utf-8") as f:
for i,line in tqdm.tqdm(enumerate(f), total = 2.4*1e6):
vec = eval(line)["vec"]
vec = [float(x) for x in vec.split(" ")]
vecs.append(vec)
if i > 150000: break
if (len(vecs) > 2048):
add_to_index(index,vecs,pca, similarity_type == "cosine")
vecs = []
if len(vecs) > 0:
add_to_index(index, vecs, pca, similarity_type == "cosine")
print("Done indexing, Saving to file")
index_fname = fname.rsplit(".", 1)[0]+".index"
faiss.write_index(index, index_fname)
return index
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='balanced brackets generation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--fname', dest='fname', type=str,
default="output.mean-cls.jsonl")
parser.add_argument('--num_vecs_pca', dest='num_vecs_pca', type=int,
default=150 * 1e3)
parser.add_argument('--pca_variance', dest='pca_variance', type=float,
default=0.985)
parser.add_argument('--similarity_type', dest='similarity_type', type=str,
default="cosine")
args = parser.parse_args()
pca = do_pca(args.num_vecs_pca, args.fname, args.pca_variance)
pca_filename = args.fname.rsplit(".", 1)[0]+".pca.pickle"
with open(pca_filename, "wb") as f:
pickle.dump(pca, f)
index = index_vectors(args.similarity_type, pca, args.fname)
| covid-sim-master | api/covid-ai2/build_index.py |
import torch
from transformers import BertTokenizer
import csv
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, AdamW, BertConfig, BertModel, AutoTokenizer, AutoModel
import time
import datetime
import random
import numpy as np
from transformers import get_linear_schedule_with_warmup
import sys
import argparse
import os
from collections import defaultdict
from torch import nn
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from typing import List, Dict
from torch.utils.data import Dataset
from collections import Counter, defaultdict
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from typing import List
import tqdm
import csv
import json
import faiss
import argparse
class BertEncoder(object):
def __init__(self, device = 'cpu'):
#self.tokenizer = BertTokenizer.from_pretrained('scibert_scivocab_uncased/vocab.txt')
#self.model = BertModel.from_pretrained('scibert_scivocab_uncased/')
self.tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
self.model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased')
self.model.eval()
self.model.to(device)
self.device = device
self.pad_token = self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0]
def tokenize_and_pad(self, texts: List[str]):
indexed_texts = [self.tokenizer.encode(text, add_special_tokens=True, max_length = 512) for text in texts] #
max_len = max(len(text) for text in indexed_texts)
indexed_texts = [text + [self.pad_token] * (max_len - len(text)) for text in indexed_texts]
idx_tensor = torch.LongTensor(indexed_texts).to(self.device)
att_tensor = idx_tensor != self.pad_token
return idx_tensor, att_tensor
def encode(self, sentences: List[str], sentence_ids: List[str], batch_size: int, strategy: str = "cls", fname=""):
assert len(sentences) == len(sentence_ids)
with open(fname, "w", encoding = "utf-8") as f:
for batch_idx in tqdm.tqdm(range(0, len(sentences), batch_size), total = len(sentences)//batch_size):
batch_sents = sentences[batch_idx: batch_idx + batch_size]
batch_ids = sentence_ids[batch_idx: batch_idx + batch_size]
assert len(batch_sents) == len(batch_ids)
idx, att_mask = self.tokenize_and_pad(batch_sents)
with torch.no_grad():
outputs = self.model(idx, attention_mask = att_mask)
last_hidden = outputs[0]
if strategy == "cls":
h = last_hidden[:, 0, ...]
elif strategy == "mean-cls":
h = torch.cat([last_hidden[:, 0, ...], torch.mean(last_hidden, axis = 1)], axis = 1)
elif strategy == "mean-cls-max":
h_max = torch.max(last_hidden, axis = 1).values
h = torch.cat([last_hidden[:, 0, ...], torch.mean(last_hidden, axis = 1), h_max], axis = 1)
elif strategy == "mean":
h = torch.mean(last_hidden, axis = 1)
elif strategy == "median":
h = torch.median(last_hidden, axis = 1).values
elif strategy == "max":
h = torch.max(last_hidden, axis = 1).values
elif strategy == "min":
h = torch.min(last_hidden, axis = 1).values
batch_np = h.detach().cpu().numpy()
assert len(batch_np) == len(batch_sents)
sents_states_ids = zip(batch_sents, batch_np, batch_ids)
for sent, vec, sent_id in sents_states_ids:
vec_str = " ".join(["%.4f" % x for x in vec])
sent_dict = {"text": sent, "vec": vec_str, "id": sent_id}
f.write(json.dumps(sent_dict) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='collect bert states over sentences',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input-filename', dest='input_filename', type=str,
default="results.tsv")
parser.add_argument('--pooling', dest='pooling', type=str,
default="cls")
parser.add_argument('--output_fname', dest='output_fname', type=str,
default="output-cls.jsonl")
parser.add_argument('--device', dest='device', type=str,
default="cpu")
args = parser.parse_args()
df = pd.read_csv(args.input_filename, sep = "\t")
ids, sents = df["sentence_id"].tolist(), df["sentence_text"].tolist()
encoder = BertEncoder(args.device)
encoder.encode(sents, ids, batch_size = 32, strategy = args.pooling, fname=args.output_fname)
| covid-sim-master | api/covid-ai2/run_bert.py |
import pandas as pd
import tqdm
import pickle
import random
import itertools
import torch
from transformers import BertTokenizer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import AutoTokenizer, AutoModel, AutoConfig
from transformers import BertForSequenceClassification, AdamW, BertConfig, BertModel, AutoTokenizer, AutoModel
import numpy as np
from typing import List
from torch import nn
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from typing import Dict, Tuple
from scipy.spatial.distance import cosine as cosine_distance
from collections import defaultdict, Counter
import nltk
from nltk import ngrams as get_ngrams
from termcolor import colored
import streamlit as st
class BertModel(torch.nn.Module):
def __init__(self, device: str, mode: str = "eval", load_existing = True):
super().__init__()
config = AutoConfig.from_pretrained('Shauli/RE-metric-model-siamese-spike', output_hidden_states=True)
self.tokenizer = AutoTokenizer.from_pretrained('Shauli/RE-metric-model-siamese-spike')
self.model = AutoModel.from_pretrained('Shauli/RE-metric-model-siamese-spike', config=config)
self.train_dataset = train_dataset
self.dev_dataset = dev_dataset
self.linear_arg1_1 = torch.nn.Linear(768, 64) #torch.load("finetuned_model/metric_model/linear.pt") #torch.nn.Linear(768, 64)
self.linear_arg1_1.load_state_dict(torch.load("linear1.pt"))
self.linear_arg2_1 = torch.nn.Linear(768, 64)
self.linear_arg1_2 = torch.nn.Linear(768, 64)
self.linear_arg1_2.load_state_dict(torch.load("linear2.pt"))
self.linear_arg2_2 = torch.nn.Linear(768, 64)
self.linear_is_same_relation = torch.nn.Linear(768, 1)
#self.linear_is_same_relation.load_state_dict(torch.load("finetuned_model/metric_model3/linear_rel_clf.pt"))
self.bce_loss = torch.nn.BCEWithLogitsLoss()
self.model.eval()
def tokenize(self, original_sentence: List[str], add_cls = True, add_sep = True) -> Tuple[List[str], Dict[int, int]]:
"""
Parameters
----------
Returns
-------
bert_tokens: The sentence, tokenized by BERT tokenizer.
orig_to_tok_map: An output dictionary consisting of a mapping (alignment) between indices in the original tokenized sentence, and indices in the sentence tokenized by the BERT tokenizer. See https://github.com/google-research/bert
"""
if add_cls:
bert_tokens = ["[CLS]"]
else:
bert_tokens = []
orig_to_tok_map = {}
tok_to_orig_map = {}
has_subwords = False
is_subword = []
for i, w in enumerate(original_sentence):
tokenized_w = self.tokenizer.tokenize(w)
has_subwords = len(tokenized_w) > 1
is_subword.append(has_subwords)
bert_tokens.extend(tokenized_w)
orig_to_tok_map[i] = len(bert_tokens) - 1
tok_to_orig_map = {}
if add_sep:
bert_tokens.append("[SEP]")
tok_to_orig_map = get_tok_to_orig_map(orig_to_tok_map, len(original_sentence), len(bert_tokens))
indexed_tokens = self.tokenizer.convert_tokens_to_ids(bert_tokens)
tokens_tensor = torch.tensor([indexed_tokens]).to(self.device_to_use)
return (bert_tokens, orig_to_tok_map, tok_to_orig_map, tokens_tensor)
def forward(self, x):
outputs = self.model(x)
states = outputs[0][0] #[seq_len, 768]
return states
def forward_pass(self, x, is_query: bool):
outputs = self.model(x)
states = outputs[0][0] #[seq_len, 768]
if is_query:
states = self.linear_arg1_1(states)
else:
states = self.linear_arg1_2(states)
return states
def forward_with_loss_calculation_inference(self, x_2, sent1_arg1_vec, sent1_arg2_vec, n_max = 8, normalize = False):
idx_arg1_all, idx_arg2_all, all_ngrams = None, None, None
states_2 = self.forward_pass(x_2, is_query = False)
is_neg_pred = torch.zeros(1)# self.linear_is_same_relation(states_2[0])
all_ngrams = get_all_ngrams_spans(len(x_2[0]), [], start_ind = 0,
n_max = n_max)
ngrams = [states_2[ngram[0]:ngram[1]].mean(dim=0) for ngram in all_ngrams]
ngrams = torch.stack(ngrams).to(self.device_to_use)
dist_arg1_all = torch.sqrt(((ngrams-sent1_arg1_vec)**2).sum(dim = 1))
dist_arg2_all = torch.sqrt(((ngrams-sent1_arg2_vec)**2).sum(dim = 1))
idx_arg1_all = torch.argsort(dist_arg1_all).detach().cpu().numpy()
idx_arg2_all = torch.argsort(dist_arg2_all).detach().cpu().numpy()
return idx_arg1_all, idx_arg2_all, all_ngrams, is_neg_pred
def get_entity_range(index_orig, orig_to_tok_map):
m = min(orig_to_tok_map.keys())
if orig_to_tok_map[index_orig] == 1: return (1,2)
if index_orig == 0: return (1, orig_to_tok_map[index_orig] + 1)
before = index_orig - 1
tok_range = (orig_to_tok_map[before] + 1, orig_to_tok_map[index_orig] + 1)
return tok_range
def get_entity_range_multiword_expression(start_and_end, orig_to_tok_map):
start, end = start_and_end
start_range = get_entity_range(start, orig_to_tok_map)
end_range = get_entity_range(end, orig_to_tok_map)
return [start_range[0], end_range[1]]
def get_tok_to_orig_map(orig_to_tok_map, num_words, num_tokens):
ranges = [get_entity_range(i, orig_to_tok_map) for i in range(num_words)]
tok_to_orig_map = {}
for i in range(num_words):
min,max = ranges[i]
for tok in range(min,max):
tok_to_orig_map[tok] = i
for tok in range(num_tokens):
if tok not in tok_to_orig_map:
tok_to_orig_map[tok] = num_words -1
return tok_to_orig_map
def get_all_ngrams_spans(seq_len, forbidden_ranges: List[tuple], start_ind = 0, n_max = 15):
def is_intersecting(ngram, forbidden_ranges):
return [(r[1] > ngram[0] >= r[0]) or(r[1] > ngram[1] >= r[0]) for r in forbidden_ranges]
all_ngrams = []
for n in range(2,n_max+1):
ngrams = list(get_ngrams(range(start_ind, seq_len), n))
all_ngrams.extend(ngrams)
all_ngrams = [(ngram[0], ngram[-1]) for ngram in all_ngrams]
all_ngrams = [ngram for ngram in all_ngrams if not any(is_intersecting(ngram, forbidden_ranges))]
return all_ngrams
def add_arguments(sent:str, arg1_start, arg1_end, arg2_start, arg2_end):
s_lst = sent.split(" ")
if arg1_start > arg2_start:
arg1_start, arg2_start = arg2_start, arg1_start
arg1_end, arg2_end = arg2_end, arg1_end
arg1_str, arg2_str = "{{ARG2:", "<<ARG1:"
else:
arg1_str, arg2_str = "<<ARG1:", "{{ARG2:"
s_with_args = s_lst[:arg1_start] + [arg1_str] + s_lst[arg1_start:arg1_end+1] + [">>"] + s_lst[arg1_end+1:arg2_start] + [arg2_str] + s_lst[arg2_start:arg2_end+1] + ["}}"] +s_lst[arg2_end+1:]
#s_with_args = s_lst[:arg1_start] + [arg1_str+s_lst[arg1_ind]] + s_lst[arg1_ind+1:arg2_ind] + [arg2_str+s_lst[arg2_ind]] + s_lst[arg2_ind+1:]
s_with_args = " ".join(s_with_args).replace("ARG1: ", "ARG1:").replace("ARG2: ", "ARG2:")
s_with_args = s_with_args.replace(" >>", ">>").replace(" }}", "}}")
return s_with_args
def prepare_example(sent1, arg1_sent1, arg2_sent1):
sent1 = add_arguments(sent1, arg1_sent1[0], arg1_sent1[1], arg2_sent1[0], arg2_sent1[1])
idx = [[[arg1_sent1[0], arg1_sent1[1]], [arg2_sent1[0], arg2_sent1[1]]], [[0, 1], [0, 1]]]
return sent1, np.array(idx)
def evaluate_model(spike_df, sents, model, k, max_ngrams = 5, num_examples = 200):
arg1_mean, arg2_mean = get_query_rep(spike_df, model, k = k)
preds = []
count = 0
for i in range(len(sents)):
with torch.no_grad():
bert_tokens, orig_to_tok_map, tok_to_orig_map, tokens_tensor = model.tokenize(sents[i].split(" "), add_sep = True, add_cls = False)
x = tokens_tensor
idx_arg1_all, idx_arg2_all, all_ngrams, is_neg_pred = model.forward_with_loss_calculation_inference(x, arg1_mean, arg2_mean, orig_to_tok_map, mode = "eval", n_max=max_ngrams)
preds.append({"sent": sents_concat, "tokens": bert_tokens, "tok2orig": tok_to_orig_map, "orig2tok": orig_to_tok_map,
"preds_arg1_tokens": idx_arg1_all, "preds_arg2_tokens": idx_arg2_all,
"all_ngrams": all_ngrams})
return preds
def get_query_rep(spike_df, model, k = 5):
query_sents = spike_df["sentence_text"].tolist()[:k]
query_arg1_starts = spike_df["arg1_first_index"][:k]
query_arg1_ends = spike_df["arg1_last_index"][:k]
query_arg2_starts = spike_df["arg2_first_index"][:k]
query_arg2_ends = spike_df["arg2_last_index"][:k]
arg1_vecs, arg2_vecs = [], []
for i in range(min(len(spike_df), k)):
sent1 = query_sents[i] # use first query in all examples.
arg1_sent1 = [query_arg1_starts[i], query_arg1_ends[i]]
arg2_sent1 = [query_arg2_starts[i], query_arg2_ends[i]]
sent1, idx = prepare_example(sent1, arg1_sent1, arg2_sent1)
bert_tokens, orig_to_tok_map, tok_to_orig_map, tokens_tensor = model.tokenize(sent1.split(" "), add_sep = False, add_cls = True)
with torch.no_grad():
x = torch.unsqueeze(tokens_tensor,0)
states = model.forward_pass(tokens_tensor, is_query = True)
sent1_range_arg1 = get_entity_range_multiword_expression(idx[0][0], orig_to_tok_map)
sent1_range_arg2 = get_entity_range_multiword_expression(idx[0][1], orig_to_tok_map)
sent1_arg1_vec, sent1_arg2_vec = states[sent1_range_arg1[0]:sent1_range_arg1[1]].mean(dim=0), states[sent1_range_arg2[0]:sent1_range_arg2[1]].mean(dim=0)
arg1_vecs.append(sent1_arg1_vec)
arg2_vecs.append(sent1_arg2_vec)
arg1_mean = torch.stack(arg1_vecs, dim = 0).mean(dim = 0)
arg2_mean = torch.stack(arg2_vecs, dim = 0).mean(dim = 0)
return arg1_mean, arg2_mean
def main(model, results_sents, spike_df, num_results, max_ngrams):
captures = []
captures_tuples = []
def pretty_print(sent, idx_arg1, idx_arg2):
sent_lst = sent.split(" ")
sent = " ".join(sent_lst[:idx_arg1[0]]) + " " + colored(" ".join(sent_lst[idx_arg1[0]:idx_arg1[1]]), "red") + " " + " ".join(sent_lst[idx_arg1[1]:])
sent_lst = sent.split(" ")
sent = " ".join(sent_lst[:idx_arg2[0]]) + " " + colored(" ".join(sent_lst[idx_arg2[0]:idx_arg2[1]]), "blue") + " " + " ".join(sent_lst[idx_arg2[1]:])
return sent
def perform_annotation(sent, arg_borders):
def
(k, borders):
return len([(s, e) for (s, e) in borders if s <= k < e]) != 0
sent_lst = sent.split(" ")
sent_new = []
arg_colors = ["#8ef", "#fea", "#faa", "#fea", "#8ef", "#afa", "#d8ff35", "#8c443b", "#452963"]
is_inside=False
for i, w in enumerate(sent_lst):
for arg in range(len(arg_borders)):
if is_between(i, [arg_borders[arg]]):
if not is_inside:
sent_new.append((w, "ARG{}".format(arg+1), arg_colors[arg]))
else:
sent_new.append((w, arg_colors[arg]))
is_inside = True
break
else:
is_inside = False
sent_new.append(" " + w + " ")
return sent_new
results_sents = results_sents[:num_results]
results = evaluate_model(spike_df, results_sents, model, k=5, max_ngrams = max_ngrams, num_examples = len(results_sents))
annotated = []
for p in results:
pred_arg1, pred_arg2 = p["preds_arg1_tokens"], p["preds_arg2_tokens"]
ngram_pred_arg1_idx, ngram_pred_arg2_idx = p["all_ngrams"][pred_arg1[0]], p["all_ngrams"][pred_arg2[0]]
arg1_start = p["tok2orig"][ngram_pred_arg1_idx[0]]
arg1_end = p["tok2orig"][ngram_pred_arg1_idx[1]]
arg2_start = p["tok2orig"][ngram_pred_arg2_idx[0]]
arg2_end = p["tok2orig"][ngram_pred_arg2_idx[1]]
sent = p["sent"]
sent_lst = sent.split(" ")
arg1_str = " ".join(sent_lst[arg1_start:arg1_end])
arg2_str = " ".join(sent_lst[arg2_start:arg2_end])
captures.append((arg1_str, arg2_str))
captures_tuples.append("{}; {}".format(arg1_str, arg2_str))
annotated_sent = perform_annotation(sent, [[arg1_start, arg1_end], [arg2_start, arg2_end]])
#annotated_sent = annotated_sent[p["l"]:]
annotated.append(annotated_sent)
# aggregate arguments
args1, args2 = list(zip(*captures))
arg1_counter, arg2_counter, tuples_counter = Counter(args1), Counter(args2), Counter(captures_tuples)
return annotated, arg1_counter.most_common(500), arg2_counter.most_common(500), tuples_counter.most_common(500)
| covid-sim-master | api/covid-ai2/alignment_supervised2.py |
#import bert
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
#import matplotlib.pyplot as plt
#import spike_queries
#from termcolor import colored
#import random
from collections import Counter, defaultdict
#from viterbi_trellis import ViterbiTrellis
import streamlit as st
from annot import annotation
import re
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def get_spike_results_arguments_representations(model, spike_results, layers, num_args):
sents = spike_results["sentence_text"].tolist()
#arg1_idx_start = spike_results["arg1_first_index"].to_numpy().astype(int)
#arg2_idx_start = spike_results["arg2_first_index"].to_numpy().astype(int)
#arg1_idx_end = spike_results["arg1_last_index"].to_numpy().astype(int)
#arg2_idx_end = spike_results["arg2_last_index"].to_numpy().astype(int)
arguments_borders = []
for i in range(num_args):
start = spike_results["arg{}_first_index".format(i + 1)].to_numpy().astype(int)
end = spike_results["arg{}_last_index".format(i + 1)].to_numpy().astype(int)
arguments_borders.append((start, end))
args_rep = defaultdict(list)
for i,s in enumerate(sents):
if not type(s) == str: continue
H, _, _, orig2tok = model.encode(s, layers=layers)
for arg_ind in range(num_args):
start, end = arguments_borders[arg_ind][0][i], arguments_borders[arg_ind][1][i]
arg_vecs = H[orig2tok[start]:orig2tok[end] + 1]
arg_mean = np.mean(arg_vecs, axis = 0)
args_rep[arg_ind].append(arg_mean)
return [np.mean(args_rep[arg], axis = 0) for arg in range(num_args)]
def get_similarity_to_arguments(padded_representations, args_reps):
num_sents, seq_len, bert_dim = padded_representations.shape
padded_representations = padded_representations.reshape((num_sents*seq_len, bert_dim))
#print(padded_representations.shape)
sims = cosine_similarity(args_reps, padded_representations)
sims = sims.reshape((len(args_reps), num_sents, seq_len))
return sims
def pad(representations):
for i in range(len(representations)): # zero cls, ., sep
representations[i][0][:] = np.random.rand()
representations[i][-1][:] = np.random.rand()
representations[i][-2][:] = np.random.rand()
pad_width = max([len(s) for s in representations])
padded_representations = np.array(
[np.concatenate([r, -np.ones((pad_width - len(r), 768))]) for r in representations])
return padded_representations
def get_probable_alignments(sims_args, mappings_to_orig):
argument2sent2alignments = dict()
"""
:param sims_args: similarity to arguments per query, shape: [num_args,num_sents,padded_sent_len]
:return:
"""
for arg in range(sims_args.shape[0]):
sent2alignments = dict()
for sent_ind in range(sims_args.shape[1]):
sorted_sims_idx = np.argsort(-sims_args[arg][sent_ind])
sorted_sims_vals = sims_args[arg][sent_ind][sorted_sims_idx]
sorted_sims_idx_mapped = [mappings_to_orig[sent_ind][j] if j in mappings_to_orig[sent_ind] else -1 for j in
sorted_sims_idx]
sent2alignments[sent_ind] = list(zip(sorted_sims_idx_mapped, sorted_sims_vals))
argument2sent2alignments[arg] = sent2alignments
return argument2sent2alignments
def print_nicely(sent, arg1_borders, arg2_borders):
def is_start(k, borders):
return len([(s, e) for (s, e) in borders if s == k]) != 0
def is_end(k, borders):
return len([(s, e) for (s, e) in borders if e == k]) != 0
sent_lst = sent.split(" ")
sent_new = []
for i, w in enumerate(sent_lst):
if is_start(i, arg1_borders) or is_start(i, arg2_borders):
type_arg = color.BLUE + "ARG1" if is_start(i, arg1_borders) else color.BLUE + "ARG2"
sent_new.append(color.BOLD + "[" + type_arg)
sent_new.append(w)
if is_end(i, arg1_borders) or is_end(i, arg2_borders):
# type_arg = color.BLUE + "ARG1" if is_end(i,arg1_borders) else "ARG2" + color.END
sent_new.append("]" + color.END)
return " ".join(sent_new)
def perform_annotation(sent, arg_borders):
def is_between(k, borders):
return len([(s, e) for (s, e) in borders if s <= k < e]) != 0
sent_lst = sent.split(" ")
sent_new = []
arg_colors = ["#8ef", "#fea", "#faa", "#fea", "#8ef", "#afa", "#d8ff35", "#8c443b", "#452963"]
for i, w in enumerate(sent_lst):
for arg in range(len(arg_borders)):
if is_between(i, [arg_borders[arg]]):
sent_new.append((w, "ARG{}".format(arg+1), arg_colors[arg]))
break
else:
sent_new.append(" " + w + " ")
return sent_new
def main(model, results_sents, spike_results, spike_query, layers, num_results):
arg2preds = {}
# count args
regex = re.compile("arg.:")
num_args = len(re.findall(regex, spike_query))
# represent args
args_reps = get_spike_results_arguments_representations(model, spike_results.head(num_results), layers, num_args)
representations = []
mappings_to_orig = []
mappings_to_tok = []
tokenized_txts = []
orig_sents = []
for i, s in enumerate(results_sents):
if not type(s) == str: continue
H, tokenized_text, tok_to_orig_map, orig2tok = model.encode(s, layers=layers)
orig_sents.append(s)
representations.append(H)
mappings_to_orig.append(tok_to_orig_map)
mappings_to_tok.append(orig2tok)
tokenized_txts.append(tokenized_text)
if i > num_results: break
#return (arg1_rep, arg2_rep), (representations, mappings_to_orig, mappings_to_tok, tokenized_txts, orig_sents)
padded_representations = pad(representations)
num_sents, seq_len, bert_dim = padded_representations.shape
num_tokens = num_sents * seq_len
sims_args = get_similarity_to_arguments(padded_representations, args_reps)
arguments2sent2alignments = get_probable_alignments(sims_args, mappings_to_orig)
for arg in range(num_args):
dicts = [{"sent": orig_sents[i], "pred_idx": list(zip(*arguments2sent2alignments[arg][i]))[0],
"preds_sims": list(zip(*arguments2sent2alignments[arg][i]))[1]} for i in range(num_sents)]
arg2preds[arg] = dicts
colored_sents = []
annotated_sents = []
for i in range(num_sents):
#arg1_dict, arg2_dict = arg2preds[0][i], arg2preds[1][i]
arg_dicts = [arg2preds[j][i] for j in range(num_args)]
sent = arg_dicts[0]["sent"]
#arg1_idx, arg2_idx = arg1_dict["pred_idx"][0], arg2_dict["pred_idx"][0]
arg_idx = [arg_dict["pred_idx"][0] for arg_dict in arg_dicts]
#colored_sent = print_nicely(sent, [(arg1_idx, arg1_idx+1)], [(arg2_idx, arg2_idx+1)])
#annotated_sents.append(perform_annotation(sent, [(arg1_idx, arg1_idx+1)], [(arg2_idx, arg2_idx+1)]))
borders = [(k,k+1) for k in arg_idx]
colored_sent = None#print_nicely(sent, borders)
annotated_sents.append(perform_annotation(sent, borders))
colored_sents.append(colored_sent)
return colored_sents, annotated_sents | covid-sim-master | api/covid-ai2/alignment.py |
import streamlit as st
import pandas as pd
import numpy as np
import pandas as pd
import faiss
import bert
from bert import BertEncoder
import pickle
import spike_queries
import sklearn
import time
from sklearn.cluster import KMeans as Kmeans
@st.cache(allow_output_mutation=True)
def load_sents_and_ids():
with st.spinner('Loading sentences and IDs...'):
df = pd.read_csv("covid-all-sents-2.4.csv", sep = "\t")
sents = df["sentence_text"].tolist()
ids = [hash(s) for s in sents]
id2ind = {ids[i]:i for i,s in enumerate(sents)}
ind2id = {i:ids[i] for i,s in enumerate(sents)}
return df, sents, ids, id2ind, ind2id
@st.cache(allow_output_mutation=True)
def load_index(similarity, pooling):
with st.spinner('Loading FAISS index...'):
fname = "output.new." + pooling + ".index"
index = faiss.read_index(fname)
return index
@st.cache(allow_output_mutation=True)
def load_bert():
with st.spinner('Loading BERT...'):
model = bert.BertEncoder("cpu")
return model
@st.cache(allow_output_mutation=True)
def load_pca(pooling):
fname = "output.new." + pooling + ".pca.pickle"
with open(fname, "rb") as f:
return pickle.load(f)
@st.cache(allow_output_mutation=True)
def perform_clustering(vecs, num_clusts):
kmeans = Kmeans(n_clusters = num_clusts, random_state = 0)
kmeans.fit(vecs)
return kmeans.labels_, kmeans.cluster_centers_
def build_html(first, results):
s = "<details><summary>" + first + "</summary>"
s += "<ul>"
for result in results[1:]:
s += "<li>" + result + "</li>"
s += "</ul>"
s += "</details>"
return s
st.title('COVID-19 Clustering')
mode = "Sentence" #st.sidebar.radio("Mode", ("Sentence", "SPIKE-covid19"))
similarity = "dot product" #st.sidebar.selectbox('Similarity', ('dot product', "l2"))
pooling = "cls" #st.sidebar.selectbox('Pooling', ('cls', 'mean-cls'))
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_colwidth', -1)
pd.options.display.max_colwidth = 150
df, sents, ids, id2ind, ind2id = load_sents_and_ids()
print("len sents", len(sents))
index = load_index(similarity, pooling)
bert = load_bert()
pca = load_pca(pooling)
st.write("Uses {}-dimensional vectors".format(pca.components_.shape[0]))
if mode == "Sentence":
input_sentence = st.text_input('Enter a sentence for similarity search', 'The virus can spread rapidly via different transimission vectors.')
filter_by = "None" #st.selectbox('Filter results based on:', ('None', 'Boolean query', 'Token query', 'Syntactic query'))
query_type = "syntactic" if "syntactic" in filter_by.lower() else "boolean" if "boolean" in filter_by.lower() else "token" if "token" in filter_by.lower() else None
filter_by_spike = query_type is not None
if query_type == "syntactic":
filter_query = st.text_input('SPIKE query', 'The [subj:l coronavirus] [copula:w is] prevalent among [w:e bats].')
elif query_type == "boolean":
filter_query = st.text_input('SPIKE query', 'virus lemma=persist on')
elif query_type == "token":
filter_query = st.text_input('SPIKE query', 'novel coronavirus')
if query_type is not None:
filter_size = st.slider('Max number of results', 1, 10000, 3000)
results_df = spike_queries.perform_query(filter_query, dataset_name = "covid19", num_results = filter_size, query_type = query_type)
results_sents = np.array(results_df["sentence_text"].tolist())
results_ids = [hash(s) for s in results_sents]
st.write("Found {} matches".format(len(results_ids)))
else:
number_of_sentence_results = st.slider('Number of results', 1, 10000, 5000) #int(st.text_input('Number of results', 100))
#number_of_clusters = st.slider("Number of clusters", 2, 512, 50)
number_of_clusters = st.slider("Number of clusters", 2, 512 if query_type is None else len(results_ids), 50 if query_type is None else len(results_ids)//3)
show_results = True
start = st.button('Run')
if start:
if mode == "Sentence":
#input_sentence = st.text_input('Input sentence', 'The virus can spread rapidly via different transimission vectors.')
encoding = pca.transform(bert.encode([input_sentence], [1], batch_size = 1, strategy = pooling, fname = "dummy.txt", write = False))#.squeeze()
#st.write("filter by spike: ", filter_by_spike)
#st.write(encoding.shape)
#st.write(index.d)
show_all_for_each_cluster = []
if not filter_by_spike:
D,I = index.search(np.ascontiguousarray(encoding), number_of_sentence_results)
results_encodings = np.array([index.reconstruct(int(i)) for i in I.squeeze()])
results_sents = np.array([sents[i] for i in I.squeeze()])
clust_ids, clust_centroids = perform_clustering(results_encodings, number_of_clusters)
for clust_id in sorted(set(clust_ids.flatten())):
idx = clust_ids == clust_id
relevant = results_sents[idx]
relevant_vecs = results_encodings[idx]
dists_to_centroid = sklearn.metrics.pairwise_distances(relevant_vecs, [clust_centroids[clust_id]])[:,0]
idx_sorted = dists_to_centroid.argsort()
closest_sent = relevant[idx_sorted[0]]
st.subheader("Cluster {}".format(clust_id))
df_results = pd.DataFrame(relevant)
html = build_html(closest_sent, relevant[idx_sorted])
st.markdown(html, unsafe_allow_html = True)
#st.write(relevant[:3])
else:
encoding_of_spike_results = np.array([index.reconstruct(id2ind[i]) for i in results_ids if i in id2ind])
sims = sklearn.metrics.pairwise.cosine_similarity(encoding, encoding_of_spike_results)
idx_sorted = sims.argsort()[0]
spike_sents_sorted = results_sents[idx_sorted][::-1]
I = np.array([[id2ind[hash(s)] for s in spike_sents_sorted if hash(s) in id2ind]])
if show_results:
pass
#results = [sents[i] for i in I.squeeze()]
#st.write("Performed query of type '{}'. Similarity search results:".format(mode))
#st.write(st.table(results))
| covid-sim-master | api/covid-ai2/clustering-demo.py |
# FROM THE PACKAGE st-annotated-text https://github.com/tvst/st-annotated-text
import streamlit.components.v1
from htbuilder import HtmlElement, div, span, styles
from htbuilder.units import px, rem, em
def annotation(body, label="", background="#ddd", color="#333", **style):
"""Build an HtmlElement span object with the given body and annotation label.
The end result will look something like this:
[body | label]
Parameters
----------
body : string
The string to put in the "body" part of the annotation.
label : string
The string to put in the "label" part of the annotation.
background : string
The color to use for the background "chip" containing this annotation.
color : string
The color to use for the body and label text.
**style : dict
Any CSS you want to use to customize the containing "chip".
Examples
--------
Produce a simple annotation with default colors:
>>> annotation("apple", "fruit")
Produce an annotation with custom colors:
>>> annotation("apple", "fruit", background="#FF0", color="black")
Produce an annotation with crazy CSS:
>>> annotation("apple", "fruit", background="#FF0", border="1px dashed red")
"""
if "font_family" not in style:
style["font_family"] = "sans-serif"
return span(
style=styles(
background=background,
border_radius=rem(0.33),
color=color,
padding=(rem(0.17), rem(0.67)),
display="inline-flex",
justify_content="center",
align_items="center",
**style,
)
)(
body,
span(
style=styles(
color=color,
font_size=em(0.67),
opacity=0.5,
padding_left=rem(0.5),
text_transform="uppercase",
margin_bottom=px(-2),
)
)(label)
)
def annotated_text(*args):
"""Writes test with annotations into your Streamlit app.
Parameters
----------
*args : str, tuple or htbuilder.HtmlElement
Arguments can be:
- strings, to draw the string as-is on the screen.
- tuples of the form (main_text, annotation_text, background, color) where
background and foreground colors are optional and should be an CSS-valid string such as
"#aabbcc" or "rgb(10, 20, 30)"
- HtmlElement objects in case you want to customize the annotations further. In particular,
you can import the `annotation()` function from this module to easily produce annotations
whose CSS you can customize via keyword arguments.
Examples
--------
>>> annotated_text(
... "This ",
... ("is", "verb", "#8ef"),
... " some ",
... ("annotated", "adj", "#faa"),
... ("text", "noun", "#afa"),
... " for those of ",
... ("you", "pronoun", "#fea"),
... " who ",
... ("like", "verb", "#8ef"),
... " this sort of ",
... ("thing", "noun", "#afa"),
... )
>>> annotated_text(
... "Hello ",
... annotation("world!", "noun", color="#8ef", border="1px dashed red"),
... )
"""
out = div(style=styles(
font_family="sans-serif",
line_height="1.5",
font_size=px(16),
))
for arg in args:
if isinstance(arg, str):
out(arg)
elif isinstance(arg, HtmlElement):
out(arg)
elif isinstance(arg, tuple):
out(annotation(*arg))
else:
raise Exception("Oh noes!")
streamlit.components.v1.html(str(out))
| covid-sim-master | api/covid-ai2/annot.py |
import torch
from transformers import BertTokenizer, BertModel, BertForMaskedLM, BertConfig, RobertaModel, RobertaForMaskedLM, \
RobertaTokenizer, RobertaConfig
from transformers import AutoTokenizer, AutoModel, AutoConfig
#from transformers import AlbertTokenizer, AlbertModel, AlbertConfig
#from transformers import XLNetTokenizer, XLNetModel, XLNetConfig
#import numpy as np
from typing import List, Tuple, Dict
#import tqdm
class BertEncoder(object):
def __init__(self, device='cpu', model="bert"):
if model == "bert":
config = BertConfig.from_pretrained("bert-base-uncased", output_hidden_states=True)
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.model = BertForMaskedLM.from_pretrained('bert-base-uncased', config=config)
elif model == "scibert":
config = AutoConfig.from_pretrained('allenai/scibert_scivocab_uncased', output_hidden_states=True)
self.tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
self.model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased', config=config)
self.model.eval()
self.model.to(device)
self.device = device
def tokenize(self, original_sentence: List[str]) -> Tuple[List[str], Dict[int, int]]:
"""
Parameters
----------
Returns
-------
bert_tokens: The sentence, tokenized by BERT tokenizer.
orig_to_tok_map: An output dictionary consisting of a mapping (alignment) between indices in the original tokenized sentence, and indices in the sentence tokenized by the BERT tokenizer. See https://github.com/google-research/bert
"""
bert_tokens = ["[CLS]"]
orig_to_tok_map = {}
tok_to_orig_map = {}
has_subwords = False
is_subword = []
for i, w in enumerate(original_sentence):
tokenized_w = self.tokenizer.tokenize(w)
has_subwords = len(tokenized_w) > 1
is_subword.append(has_subwords)
bert_tokens.extend(tokenized_w)
orig_to_tok_map[i] = len(bert_tokens) - 1
tok_to_orig_map = {}
keys = list(sorted(orig_to_tok_map.keys()))
for k, k2 in zip(keys, keys[1:]):
for k3 in range(orig_to_tok_map[k], orig_to_tok_map[k2]):
tok_to_orig_map[k3] = k
bert_tokens.append("[SEP]")
return (bert_tokens, orig_to_tok_map, tok_to_orig_map)
def encode(self, sentence: str, layers: List[int]):
tokenized_text, orig2tok, tok_to_orig_map = self.tokenize(sentence.split(" "))
# pos_ind_bert = orig2tok[pos_ind]
# if np.random.random() < mask_prob:
# tokenized_text[pos_ind_bert] = self.mask
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
tokens_tensor = torch.tensor([indexed_tokens]).to(self.device)
with torch.no_grad():
outputs = self.model(tokens_tensor)
all_layers = outputs[-1]
layers_concat = torch.cat([all_layers[l] for l in layers], dim=-1)
return layers_concat[0].detach().cpu().numpy(), tokenized_text, tok_to_orig_map, orig2tok | covid-sim-master | api/covid-ai2/bert_all_seq.py |
import streamlit as st
import pandas as pd
import numpy as np
import pandas as pd
import faiss
import bert
from bert import BertEncoder
import pickle
import spike_queries
import sklearn
import random
import time
import alignment
import bert_all_seq
#import alignment_supervised2 as alignment_supervised
import alignment_supervised
from annot import annotation, annotated_text
import time
import SessionState
NUM_RESULTS_TO_ALIGN_DEFAULT = 50
DEFAULT_MAX_NGRAM = 5
BOOLEAN_QUERY_DEFAULT = "virus lemma=originate"
TOKEN_QUERY_DEFAULT = "novel coronavirus"
SYNTACTIC_QUERY_DEFAULT = "a1:[w]COVID-19 $causes a2:something" #"arg1:[e]paracetamol is the recommended $treatment for arg2:asthma."
SPIKE_RESULTS_DEFAULT = 75
must_include = ""
import base64
import plotly.graph_objects as go
from collections import Counter
st.set_page_config(layout="wide")
st.markdown(
f'''
<style>
.sidebar .sidebar-content {{
width: 375px;
}}
</style>
''',
unsafe_allow_html=True
)
st.markdown(
"""<style>
.dataframe {text-align: left !important}
</style>
""", unsafe_allow_html=True)
def plotly_table(results, title):
style=True
filter_table = results# _filter_results(results, number_of_rows, number_of_columns)
header_values = list(filter_table.columns)
cell_values = []
for index in range(0, len(filter_table.columns)):
cell_values.append(filter_table.iloc[:, index : index + 1])
if not style:
fig = go.Figure(
data=[
go.Table(
header=dict(values=header_values), cells=dict(values=cell_values)
)
]
)
else:
fig = go.Figure(
data=[
go.Table(
header=dict(
values=header_values, fill_color="paleturquoise", align="left"
),
cells=dict(values=cell_values, fill_color="lavender", align="left"),
)
]
)
with st.beta_expander(title):
st.plotly_chart(fig)
def print_spike_results(results, arg1_lst, arg2_lst, title):
st.markdown("<h3>{}</h3>".format(title), unsafe_allow_html = True)
html = """<ul>"""
for s,arg1,arg2 in zip(results,arg1_lst,arg2_lst):
arg1_first_idx,arg1_last_index = arg1
arg2_first_idx,arg2_last_index = arg2
arg1_str = s[arg1_first_idx:arg1_last_index]
arg2_str = s[arg2_first_idx:arg2_last_index]
arg1 = "<font color=‘orange’>{}</font>".format(arg1_str)
arg2 = "<font color=‘cyan’>{}</font>".format(arg2_str)
if arg1_first_idx > arg2_first_idx:
arg1,arg2 = arg2.replace("cyan","orange"), arg1.replace("cyan","orange")
arg1_first_idx,arg1_last_index, arg2_first_idx,arg2_last_index = arg2_first_idx,arg2_last_index, arg1_first_idx,arg1_last_index
#s = s[:arg1_first_idx] + " " + arg1 + s[arg1_last_index:arg2_first_idx] + " " + arg2 + s[arg2_last_index:]
html+= "<li>{}</li>".format(s)
html+="</ul>"
st.markdown(html, unsafe_allow_html = True)
@st.cache(allow_output_mutation=True)
def load_sents_and_ids():
with st.spinner('Loading sentences and IDs...'):
#df = pd.read_csv("data/results.tsv", sep = "\t")
#sents = df["sentence_text"].tolist()
with open("data/sents.txt", "r", encoding = "utf-8") as f:
sents = f.readlines()
sents = [s.strip() for s in sents]
ids = [hash(s) for s in sents]
id2ind = {ids[i]:i for i,s in enumerate(sents)}
ind2id = {i:ids[i] for i,s in enumerate(sents)}
return sents, ids, id2ind, ind2id
@st.cache(allow_output_mutation=True)
def load_index(similarity, pooling):
with st.spinner('Loading FAISS index...'):
fname = "data/output-" + pooling + ".index"
index = faiss.read_index(fname)
return index
@st.cache(allow_output_mutation=True)
def load_bert():
with st.spinner('Loading BERT...'):
model = bert.BertEncoder("cpu")
return model
@st.cache(allow_output_mutation=True)
def load_bert_all_seq():
with st.spinner('Loading BERT...'):
model = bert_all_seq.BertEncoder("cpu")
return model
@st.cache(allow_output_mutation=True)
def load_bert_alignment_supervised():
with st.spinner('Loading BERT...'):
model = alignment_supervised.BertModel("cpu")
return model
@st.cache(allow_output_mutation=True)
def load_pca(pooling):
fname = "data/output-" + pooling + ".pca.pickle"
with open(fname, "rb") as f:
return pickle.load(f)
@st.cache(allow_output_mutation=True)
def encode(input_sentence, pca, bert, pooling):
return pca.transform(bert.encode([input_sentence], [1], batch_size = 1, strategy = pooling, fname = "dummy.txt", write = False))
def zero_input():
input_sentence = placeholder.text_input('Enter a sentence for similarity search', value="", key = random.randint(0,int(1e16)))
def write_results_menu(results, session_state, keys="random"):
cols = st.beta_columns((8,1,1))
cols[0].markdown("<b>Sentence</b>", unsafe_allow_html = True)
cols[1].markdown("<b>Enhance?</b>", unsafe_allow_html = True)
cols[2].markdown("<b>Decrease?</b>", unsafe_allow_html = True)
for i in range(min(len(results), 50)):
if len(results[i]) < 3: continue
cols[0].write(results[i])
enhance = cols[1].checkbox('✓', key = "en"+str(i) if keys=="normal" else random.randint(0,int(1e16)),value=False)
decrease = cols[2].checkbox('✗', key = "de"+str(i) if keys == "normal" else random.randint(0,int(1e16)),value=False)
hash_val = hash(results[i])
if enhance:
#st.write("added sentence {}".format(results[i]))
session_state.enhance.add(hash_val)
else:
#st.write("removed sentence {}".format(results[i]))
if hash_val in session_state.enhance: session_state.enhance.remove(hash_val)
if decrease:
session_state.decrease.add(hash(results[i]))
else:
if hash_val in session_state.decrease: session_state.decrease.remove(hash_val)
def project_out(positive, negative):
positive,negative = np.array(positive), np.array(negative)
pos_basis = scipy.linalg.orth(positive.T)
P = pos_basis.dot(pos_basis.T)
st.write(P.shape, negative.shape, positive.shape)
negative_different = negative - negative@P
return positive - negative_different
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False, sep = "\t")
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}">Download csv file</a>'
return href
st.title('COVID-19 Similarity Search')
RESULT_FILTREATION = False
#a = st.empty()
mode = "Start with Query" #st.sidebar.radio("Mode", ("Start with Sentence", "Start with Query"))
similarity = "dot product" #st.sidebar.selectbox('Similarity', ('dot product', "l2"))
pooling = "cls"# st.sidebar.selectbox('Pooling', ('cls', 'mean-cls'))
to_decrease, to_enhance = [], []
session_state = SessionState.get(start=False, enhance=set(), decrease=set(), interactive = False, started = False, vec=None, current_query="")
sents, ids, id2ind, ind2id = load_sents_and_ids()
print("len sents", len(sents))
index = load_index(similarity, pooling)
bert = load_bert()
bert_all_seq = load_bert_all_seq()
bert_alignment_supervised = load_bert_alignment_supervised()
pca = load_pca(pooling)
my_expander = st.beta_expander("How to query?")
my_expander.markdown("""Start by writing a query that aims to capture a relation between two entities.
<ul>
<li>Use <b><font color=‘blue’>$</font></b> or <b><font color=‘blue’>:[w]</font></b> to mark words that <b>must appear</b>. </li>
<li>Mark the <b>arguments</b> with <b><font color=‘orange’>a2:</font></b> and <b><font color=‘orange’>a2:</font></b> </li>
<li>Mark with <b><font color=‘brown’>:</font></b> additional captures that fix the required synatctic structure. </li>
</ul>
For instance, in the query '<b><font color=‘orange’>a1:[w]</font></b>COVID-19 <b><font color=‘blue’>$</font></b>causes <b><font color=‘orange’>a2:</font></b>pain', we search for sentences where the syntactic relation between the first and second argument is the same as the relation between `COVID-19` and `pain` in this sentence (subject-object relation). We further request an exact match for the word `causes` and the argument `COVID-19`. <br> For more details on the query language, check out
<a href="https://spike.covid-19.apps.allenai.org/datasets/covid19/search/help">this</a> tutorial.""", unsafe_allow_html=True)
#st.write("Uses {}-dimensional vectors".format(pca.components_.shape[0]))
#st.write("Number of indexed sentences: {}".format(len(sents)))
print("Try accessing the demo under localhost:8080 (or the default port).")
if mode == "Start with Query":
query_type = "Syntactic" #st.radio("Query type", ("Boolean", "Token", "Syntactic"))
query_type = query_type.lower()
if query_type == "syntactic":
input_query = st.text_input('Query to augment', SYNTACTIC_QUERY_DEFAULT)
input_query = input_query.replace("a1:", "arg1:").replace("a2:", "arg2:")
max_results = 100 #st.slider('Max number of SPIKE results', 1, 1000, SPIKE_RESULTS_DEFAULT)
max_number_of_augmented_results = 1000 #st.slider('Number of Augmented results', 1, 250000, 1000)
if query_type == "syntactic":
perform_alignment = True #st.checkbox("Perform argument alignment", value=True, key=None)
if perform_alignment:
number_of_sentences_to_align = 50 #st.select_slider('Number of sentences to align.', options=[1, 10, 25, 50, 100, 200, 250, 500], value = NUM_RESULTS_TO_ALIGN_DEFAULT)
alignment_method = "Metric model" #st.radio("Alignment model", ('Metric model', 'Naive'))
if alignment_method != "Naive":
max_ngrams = 5 #st.select_slider('Maximum span size to align', options=[1, 2, 3, 4, 5, 6, 7, 8,9,10,11,12,13,14,15], value = DEFAULT_MAX_NGRAM)
must_include = st.text_input('Get only results containing the following words', '')
# #filter_by = st.selectbox('Filter results based on:', ('None', 'Boolean query', 'Token query', 'Syntactic query'))
# query_type_filtration = "syntactic" if "syntactic" in filter_by.lower() else "boolean" if "boolean" in filter_by.lower() else "token" if "token" in filter_by.lower() else None
# filter_by_spike = query_type_filtration is not None
# if filter_by_spike:
# message = "Get only results NOT captured by this query"
# if query_type_filtration == "syntactic":
# filter_query = st.text_input(message, SYNTACTIC_QUERY_DEFAULT)
# elif query_type_filtration == "boolean":
# filter_query = st.text_input(message, BOOLEAN_QUERY_DEFAULT)
# elif query_type_filtration == "token":
# filter_query = st.text_input(message, TOKEN_QUERY_DEFAULT)
# filtration_batch_size = st.slider('Filtration batch size', 1, 250, 50)
# RESULT_FILTREATION = True
show_results = True
start = st.button('Run')
#st.write("Current query: {}".format(session_state.current_query))
if start:
session_state.started = True
if (start or session_state.start) and session_state.started:
if mode == "Start with Query":
with st.spinner('Performing SPIKE query...'):
#st.write("Performing query '{}'".format(input_query))
results_df = spike_queries.perform_query(input_query, dataset_name = "covid19", num_results = max_results, query_type = query_type)
results_sents = results_df["sentence_text"].tolist()
results_sents = list(set(results_sents))
results_ids = [hash(s) for s in results_sents] #results_df["sentence_id"].tolist()
#st.write("Found {} matches".format(len(results_ids)))
if len(results_sents) > 0:
#st.write("First sentences retrieved:")
#st.table(results_sents[:10])
print_spike_results(results_sents[:10], list(zip(results_df["arg1_first_index"], results_df["arg1_last_index"])), list(zip(results_df["arg2_first_index"], results_df["arg2_last_index"])), title = "First Sentences Retrived:")
st.markdown("<h3>Neural Similarity Search Results:</h3>", unsafe_allow_html = True)
encoding = np.array([index.reconstruct(id2ind[i]) for i in results_ids if i in id2ind])
if encoding.shape[0] > 0:
with st.spinner('Retrieving similar sentences...'):
encoding = np.mean(encoding, axis = 0)
D,I = index.search(np.ascontiguousarray([encoding]).astype("float32"), max_number_of_augmented_results)
result_sents = [sents[i].replace("/","-") for i in I.squeeze()]
results_set = set()
result_sents_clean = []
for s in result_sents:
if s not in results_set:
results_set.add(s)
result_sents_clean.append(s)
result_sents = result_sents_clean
if must_include != "":
result_sents = [sents[i].replace("/","-") for i in I.squeeze() if must_include in sents[i]]
if query_type == "syntactic" and perform_alignment:
with st.spinner('Performing argument alignment...'):
#colored_sents, annotated_sents= alignment.main(bert_all_seq, result_sents, results_df, input_query, [-1], NUM_RESULTS_TO_ALIGN)
if alignment_method == "Naive":
colored_sents, annotated_sents = alignment.main(bert_all_seq, result_sents, results_df, input_query, [-1], number_of_sentences_to_align)
else:
annotated_sents, arg1_items, arg2_items, tuples_items, captures_tuples = alignment_supervised.main(bert_alignment_supervised, result_sents, results_df, number_of_sentences_to_align, max_ngrams+1)
tuples_items = [(t[0], t[1], count) for t, count in tuples_items]
arg1_counts_df = pd.DataFrame(arg1_items, columns =['ARG1', 'count'])
arg2_counts_df = pd.DataFrame(arg2_items, columns =['ARG2', 'count'])
tuples_counts_df = pd.DataFrame(tuples_items, columns =['ARG1', 'ARG2', 'count'])
captures_df = pd.DataFrame.from_records(captures_tuples, columns =['ARG1', 'ARG2'])
captures_df["sentence"] = result_sents[:len(captures_tuples)]
plotly_table(arg1_counts_df.head(50), "Argument 1 Aggregation")
plotly_table(arg2_counts_df.head(50), "Argument 2 Aggregation")
plotly_table(tuples_counts_df.head(50), "Tuples Aggregation")
#st.sidebar.write('ARG1 Aggregation:')
#st.sidebar.write(arg1_counts_df.head(30))
#st.sidebar.write('ARG2 Aggregation:')
#st.sidebar.write(arg2_counts_df.head(30))
#st.sidebar.write('Tuples Aggregation:')
#st.sidebar.write(tuples_counts_df.head(30))
st.markdown(get_table_download_link(captures_df), unsafe_allow_html=True) # download augmented results
for s in annotated_sents:
annotated_text(*s)
else:
show_results = False
st.write("SPIKE search results are not indexed.")
else:
show_results = False
st.write("No resutls found.")
# if show_results:
# pass
# results = [sents[i] for i in I.squeeze() if must_include in sents[i]]
# if RESULT_FILTREATION:
# results = result_sents
# st.write("Performed query of type '{}'. Similarity search results:".format(mode))
# st.write(st.table(results))
| covid-sim-master | api/covid-ai2/demo2.py |
import pandas as pd
import tqdm
import pickle
import random
import itertools
import torch
from transformers import BertTokenizer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import AutoTokenizer, AutoModel, AutoConfig
from transformers import BertForSequenceClassification, AdamW, BertConfig, BertModel, AutoTokenizer, AutoModel
import numpy as np
from typing import List
from torch import nn
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from typing import Dict, Tuple
from scipy.spatial.distance import cosine as cosine_distance
from collections import defaultdict
from nltk import ngrams as get_ngrams
from termcolor import colored
from torch.optim.lr_scheduler import ReduceLROnPlateau
class BertModel(pl.LightningModule):
def __init__(self, train_dataset: Dataset, dev_dataset: Dataset, batch_size, device: str, mode: str = "eval", alpha=0.1, lr = 1e-4, momentum=0.5, l2_loss = False, same_rel_weight = 1, pretrained = True, train_only_linear=True):
super().__init__()
self.device_to_use = device
if not pretrained:
config = AutoConfig.from_pretrained('allenai/scibert_scivocab_uncased', output_hidden_states=True)
self.tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
self.model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased', config=config)
else:
print("loading pretrained model")
config = AutoConfig.from_pretrained('Shauli/RE-metric-model-spike', output_hidden_states=True)
self.model = AutoModel.from_pretrained('Shauli/RE-metric-model-spike', config=config)
self.tokenizer = AutoTokenizer.from_pretrained('Shauli/RE-metric-model-spike')
self.train_dataset = train_dataset
self.dev_dataset = dev_dataset
self.linear_arg1_1 = torch.nn.Linear(768, 64) #torch.load("finetuned_model/metric_model/linear.pt") #torch.nn.Linear(768, 64)
if pretrained:
self.linear_arg1_1.load_state_dict(torch.load("linear.pt", map_location = torch.device('cpu')))
#if pretrained:
# self.linear_arg1_1.load_state_dict(torch.load("finetuned_model/metric_model/linear.pt"))
self.same_rel_mlp = torch.nn.Sequential(*[torch.nn.Linear(768, 1)])#, torch.nn.ReLU(), torch.nn.Linear(128, 1)])
#if pretrained:
# self.same_rel_mlp.load_state_dict(torch.load("finetuned_model/metric_model/same_rel_mlp.pt"))
self.bce_loss = torch.nn.BCEWithLogitsLoss()
self.alpha = alpha
self.lr = lr
self.train_only_linear = train_only_linear
self.l2_loss = l2_loss
self.momentum = momentum
self.same_rel_weight = same_rel_weight
if mode == "eval":
self.model.eval()
else:
self.model.train()
for p in self.model.parameters():
p.requires_grad = True
#if len(p.shape) == 1:
# p.requires_grad = True
for p in self.model.embeddings.parameters():
p.requires_grad = True
#for p in self.model.encoder.layer[-1].parameters():
# p.requires_grad = True
#for p in self.model.encoder.layer[-2].parameters():
# p.requires_grad = True
#for p in self.model.encoder.layer[-3].parameters():
# p.requires_grad = True
self.train_gen = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, drop_last=False, shuffle=True,
num_workers = 4)
self.dev_gen = torch.utils.data.DataLoader(self.dev_dataset, batch_size=batch_size, drop_last=False, shuffle=False,
num_workers = 4)
self.acc = None
self.total = 0
self.total_same_rel = 0
self.count_same_rel = 0
self.count = 0
def tokenize(self, original_sentence: List[str]) -> Tuple[List[str], Dict[int, int]]:
"""
Parameters
----------
Returns
-------
bert_tokens: The sentence, tokenized by BERT tokenizer.
orig_to_tok_map: An output dictionary consisting of a mapping (alignment) between indices in the original tokenized sentence, and indices in the sentence tokenized by the BERT tokenizer. See https://github.com/google-research/bert
"""
bert_tokens = ["[CLS]"]
orig_to_tok_map = {}
tok_to_orig_map = {}
has_subwords = False
is_subword = []
for i, w in enumerate(original_sentence):
tokenized_w = self.tokenizer.tokenize(w)
has_subwords = len(tokenized_w) > 1
is_subword.append(has_subwords)
bert_tokens.extend(tokenized_w)
orig_to_tok_map[i] = len(bert_tokens) - 1
tok_to_orig_map = {}
bert_tokens.append("[SEP]")
tok_to_orig_map = get_tok_to_orig_map(orig_to_tok_map, len(original_sentence), len(bert_tokens))
indexed_tokens = self.tokenizer.convert_tokens_to_ids(bert_tokens)
tokens_tensor = torch.tensor([indexed_tokens]).to(self.device_to_use)
return (bert_tokens, orig_to_tok_map, tok_to_orig_map, tokens_tensor)
def forward(self, x):
outputs = self.model(x)
states = outputs[0][0] #[seq_len, 768]
return states
def forward_with_loss_calculation(self, bert_tokens, x, range_sent1, range_sent2, orig_to_tok_map, l, l_tokens,
metric = "l2", n_max = 9, mode = "train", normalize=False, nb=0, h = None):
idx_arg1_all, idx_arg2_all, all_ngrams = None, None, None
if h is None:
if self.train_only_linear:
with torch.no_grad():
outputs = self.model(x)
else:
outputs = self.model(x)
states = outputs[0][0] #[seq_len, 768]
else:
states = h
if not self.l2_loss or normalize:
states = states / (torch.norm(states, dim = 1, keepdim = True)+1e-8)
is_neg_pred = self.same_rel_mlp(states[0])
states = self.linear_arg1_1(states)
arg1_sent1, arg2_sent1 = range_sent1
arg1_sent2, arg2_sent2 = range_sent2
sent1_arg1_vec, sent1_arg2_vec = states[arg1_sent1[0]:arg1_sent1[1]].mean(dim=0), states[arg2_sent1[0]:arg2_sent1[1]].mean(dim=0)
sent2_arg1_vec, sent2_arg2_vec = states[arg1_sent2[0]:arg1_sent2[1]].mean(dim=0), states[arg2_sent2[0]:arg2_sent2[1]].mean(dim=0)
all_false_ngrams_ranges = get_all_ngrams_spans(len(states), [arg1_sent1, arg1_sent2, arg2_sent1, arg2_sent2], start_ind = 0,
n_max = n_max)
negatives = [states[ngram[0]:ngram[1]].mean(dim=0) for ngram in all_false_ngrams_ranges]
negatives_arg1 = negatives + [sent1_arg2_vec, sent2_arg2_vec]
negatives_arg2 = negatives + [sent1_arg1_vec, sent2_arg1_vec]
negatives_arg1 = torch.stack(negatives_arg1).to(self.device_to_use)
negatives_arg2 = torch.stack(negatives_arg2).to(self.device_to_use)
if mode == "eval":
all_ngrams = get_all_ngrams_spans(len(states), [], start_ind = l_tokens,
n_max = n_max)
ngrams = [states[ngram[0]:ngram[1]].mean(dim=0) for ngram in all_ngrams]
ngrams = torch.stack(ngrams).to(self.device_to_use)
if self.l2_loss:
dists_arg1 = torch.sqrt(((negatives_arg1-sent1_arg1_vec)**2).sum(dim = 1))
dists_arg2 = torch.sqrt(((negatives_arg2-sent1_arg2_vec)**2).sum(dim = 1))
dist_arg1_gold = (sent1_arg1_vec - sent2_arg1_vec).norm()
dist_arg2_gold = (sent1_arg2_vec - sent2_arg2_vec).norm()
if mode == "eval":
dist_arg1_all = torch.sqrt(((ngrams-sent1_arg1_vec)**2).sum(dim = 1))
dist_arg2_all = torch.sqrt(((ngrams-sent1_arg2_vec)**2).sum(dim = 1))
idx_arg1_all = torch.argsort(dist_arg1_all).detach().cpu().numpy()
idx_arg2_all = torch.argsort(dist_arg2_all).detach().cpu().numpy()
else:
dists_arg1 = 1 - negatives_arg1@sent1_arg1_vec.T
dists_arg2 = 1 - negatives_arg2@sent1_arg2_vec.T
dist_arg1_gold = 1 - sent1_arg1_vec@sent2_arg1_vec.T
dist_arg2_gold = 1 - sent1_arg2_vec@sent2_arg2_vec.T
idx_arg1 = torch.argsort(dists_arg1).detach().cpu().numpy()
idx_arg2 = torch.argsort(dists_arg2).detach().cpu().numpy()
l = max(int(len(negatives)*0.3),1)
k = random.choice(range(min(len(negatives), 2))) if np.random.random() < 0.5 else random.choice(range(l))
dist_arg1_argmax = dists_arg1[idx_arg1[k]]
dist_arg2_argmax = dists_arg2[idx_arg2[k]]
if self.l2_loss:
loss_arg1 = torch.max(torch.zeros(1).to(self.device_to_use), dist_arg1_gold - dist_arg1_argmax + self.alpha)
loss_arg2 = torch.max(torch.zeros(1).to(self.device_to_use), dist_arg2_gold - dist_arg2_argmax + self.alpha)
# softmax triplet
else:
z = torch.max(dist_arg1_argmax, dist_arg1_gold)
temp = 1
pos_arg1 = torch.exp((dist_arg1_gold - z)/temp)
neg_arg1 = torch.exp((dist_arg1_argmax - z)/temp)
loss_arg1 = (pos_arg1 / (pos_arg1 + neg_arg1))#**2
z = torch.max(dist_arg2_argmax, dist_arg2_gold)
pos_arg2 = torch.exp((dist_arg2_gold - z)/temp)
neg_arg2 = torch.exp((dist_arg2_argmax - z)/temp)
loss_arg2 = (pos_arg2 / (pos_arg2 + neg_arg2))#**2
loss = states[0,0:1]**2 #torch.zeros(1).to(self.device_to_usedevice)
loss2_isnan = np.isnan(loss_arg2.detach().cpu().numpy().item())
loss1_isnan = np.isnan(loss_arg1.detach().cpu().numpy().item())
if not loss2_isnan:
loss += loss_arg2
if not loss1_isnan:
loss += loss_arg1
if loss1_isnan or loss2_isnan:
print("ERROR: nan loss", loss1_isnan, loss2_isnan, nb)
return
self.total += 1
#if loss.detach().cpu().numpy().item() < 1e-5:
if (dist_arg1_gold - dist_arg1_argmax).detach().cpu().numpy().item() < 0 and (dist_arg2_gold - dist_arg2_argmax).detach().cpu().numpy().item() < 0:
self.count += 1
return loss, idx_arg1, idx_arg2, idx_arg1_all, idx_arg2_all, all_false_ngrams_ranges, all_ngrams, is_neg_pred
#return loss, np.argsort(dists_arg1+mask_gold_arg1)
def forward_batch(self, batch):
tokenizer = self.tokenizer
sents = [s.split(" ") for s in batch[0]]
batch_encoded = tokenizer(list(sents), is_split_into_words = True, padding = True)
input_ids = torch.tensor(batch_encoded["input_ids"])
att_mask = torch.tensor(batch_encoded["attention_mask"])
with torch.no_grad():
outputs = self.model(input_ids = input_ids, attention_mask = att_mask)
return outputs, batch_encoded["input_ids"], att_mask
def training_step(self, batch, batch_nb):
outputs, input_ids, att_mask = self.forward_batch(batch)
H = outputs[0]
batch_loss = torch.zeros(1)
sents_concat, idx_all, l, sent2_with_args, is_negative, id1, id2 = batch
for batch_idx in range(len(batch[0])):
length = input_ids[batch_idx].index(0) if 0 in input_ids[batch_idx] else len(input_ids[batch_idx])
#for sents_concat, idx, l, sent2_with_args, is_negative, id1, id2 in batch:
print(f"batch index {batch_idx}/{len(batch[0])}")
idx = idx_all.detach().cpu().numpy()[batch_idx]
bert_tokens, orig_to_tok_map, tok_to_orig_map, tokens_tensor = self.tokenize(sents_concat[batch_idx].split(" "))
self.total_same_rel += 1
if not is_negative[batch_idx]:
l_tokens = len(bert_tokens[:orig_to_tok_map[l[batch_idx].detach().cpu().numpy().item()-1]])
sent1_range_arg1 = get_entity_range_multiword_expression(idx[0][0], orig_to_tok_map)
sent1_range_arg2 = get_entity_range_multiword_expression(idx[0][1], orig_to_tok_map)
sent2_range_arg1 = get_entity_range_multiword_expression(idx[1][0], orig_to_tok_map)
sent2_range_arg2 = get_entity_range_multiword_expression(idx[1][1], orig_to_tok_map)
range_sent1 = [sent1_range_arg1, sent1_range_arg2]
range_sent2 = [sent2_range_arg1, sent2_range_arg2]
loss, _, _, _, _, _, _, is_neg_pred = self.forward_with_loss_calculation(bert_tokens, tokens_tensor, range_sent1, range_sent2, orig_to_tok_map, l[batch_idx], l_tokens, nb = batch_nb, h = H[batch_idx, :length])
else:
loss = torch.zeros(1).to(self.device_to_use)
outputs = self.model(tokens_tensor)
states = outputs[0][0]
is_neg_pred = self.same_rel_mlp(states[0])
if (is_negative[batch_idx] and is_neg_pred.detach().cpu().numpy().item() > 0) or ((not is_negative[batch_idx]) and (is_neg_pred.detach().cpu().numpy().item() < 0)):
self.count_same_rel += 1
y = torch.ones(1).to(self.device_to_use) if is_negative[batch_idx] else torch.zeros(1).to(self.device_to_use)
loss += self.same_rel_weight * self.bce_loss(is_neg_pred, y)
#loss = self.same_rel_weight * self.bce_loss(is_neg_pred, y)
# if np.isnan(loss.detach().cpu().numpy().item()) or loss.detach().cpu().numpy().item() > 1e4:
# print("ERRROR!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# print(sents_concat, range_sent1, range_sent2, sent1_idx, sent2_idx)
# return {"loss": loss*0}
if self.total%500 == 0 and self.total > 1:
self.log('train_loss_1k', self.count/self.total)
self.log("train_loss_1k_same_rel", self.count_same_rel/self.total_same_rel)
print("argument identification accuracy", self.count/self.total)
print("same-relation identification accuracy", self.count_same_rel/self.total_same_rel)
self.count = 0
self.count_same_rel = 0
self.total = 0
self.total_same_rel = 0
batch_loss += loss
return {'loss': batch_loss/len(batch)}
"""
def validation_step(self, batch, batch_nb):
sents_concat, idx, l, sent2_with_args, is_negative, id1, id2 = batch
print(is_negative)
if is_negative:
return {'val_loss': torch.zeros(1).to(self.device)}
idx = idx.detach().cpu().numpy()[0]
bert_tokens, orig_to_tok_map, tok_to_orig_map, tokens_tensor = self.tokenize(sents_concat[0].split(" "))
l_tokens = len(bert_tokens[:orig_to_tok_map[l.detach().cpu().numpy().item()-1]])
sent1_range_arg1 = get_entity_range_multiword_expression(idx[0][0], orig_to_tok_map)
sent1_range_arg2 = get_entity_range_multiword_expression(idx[0][1], orig_to_tok_map)
sent2_range_arg1 = get_entity_range_multiword_expression(idx[1][0], orig_to_tok_map)
sent2_range_arg2 = get_entity_range_multiword_expression(idx[1][1], orig_to_tok_map)
range_sent1 = [sent1_range_arg1,sent1_range_arg2]
range_sent2 = [sent2_range_arg1,sent2_range_arg2]
loss, _, _, _, _, _, _, is_neg_pred = self.forward_with_loss_calculation(bert_tokens, tokens_tensor, range_sent1, range_sent2, orig_to_tok_map, l, l_tokens)
return {'val_loss': loss}
def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
print("Loss is {}".format(avg_loss))
return {'avg_val_loss': avg_loss}
"""
def configure_optimizers(self):
#return torch.optim.RMSprop(self.parameters())
#return torch.optim.ASGD(self.parameters())
optimizer = torch.optim.SGD(self.parameters(), lr=self.lr, momentum=self.momentum)
return {"optimizer": optimizer, 'scheduler': ReduceLROnPlateau(optimizer, patience = 1, factor = 0.5, verbose = True), 'monitor': 'train_loss_1k'}
#return torch.optim.Adam(self.parameters())
def evaluate_model(dev_dataset, model, max_ngrams = 2, num_examples = 200):
preds = []
count = 0
for batch in tqdm.tqdm(dev_dataset):
if count > num_examples: break
count += 1
sents_concat, idx, l, sent2_with_args, is_negative, id1, id2 = batch
if is_negative: continue
idx = idx.detach().cpu().numpy()
bert_tokens, orig_to_tok_map, tok_to_orig_map, tokens_tensor = model.tokenize(sents_concat.split(" "))
l_tokens = len(bert_tokens[:orig_to_tok_map[l-1]])
sent1_range_arg1 = get_entity_range_multiword_expression(idx[0][0], orig_to_tok_map)
sent1_range_arg2 = get_entity_range_multiword_expression(idx[0][1], orig_to_tok_map)
sent2_range_arg1 = get_entity_range_multiword_expression(idx[1][0], orig_to_tok_map)
sent2_range_arg2 = get_entity_range_multiword_expression(idx[1][1], orig_to_tok_map)
range_sent1 = (sent1_range_arg1,sent1_range_arg2)
range_sent2 = (sent2_range_arg1,sent2_range_arg2)
loss, idx_arg1, idx_arg2, idx_arg1_all, idx_arg2_all, all_false_ngrams_ranges, all_ngrams, is_neg_pred = model.forward_with_loss_calculation(bert_tokens, tokens_tensor, range_sent1, range_sent2, orig_to_tok_map, l, l_tokens, mode = "eval", n_max=max_ngrams)
is_neg_pred = torch.sigmoid(is_neg_pred).detach().cpu().numpy().item()
same_relation_score = 1 - is_neg_pred
preds.append({"sent": sents_concat, "tokens": bert_tokens, "tok2orig": tok_to_orig_map, "orig2tok": orig_to_tok_map,
"preds_arg1_tokens": idx_arg1_all, "preds_arg2_tokens": idx_arg2_all, "false_ngrams": all_false_ngrams_ranges,
"all_ngrams": all_ngrams, "gold_arg1_range_tokens": sent2_range_arg1, "gold_arg2_range_tokens": sent2_range_arg2, "same_rel_pred": same_relation_score,
"is_negative": is_negative, "id1": id1, "id2": id2})
return preds
def get_entity_range(index_orig, orig_to_tok_map):
m = min(orig_to_tok_map.keys())
if orig_to_tok_map[index_orig] == 1: return (1,2)
if index_orig == 0: return (1, orig_to_tok_map[index_orig] + 1)
before = index_orig - 1
tok_range = (orig_to_tok_map[before] + 1, orig_to_tok_map[index_orig] + 1)
return tok_range
def get_entity_range_multiword_expression(start_and_end, orig_to_tok_map):
start, end = start_and_end
start_range = get_entity_range(start, orig_to_tok_map)
end_range = get_entity_range(end, orig_to_tok_map)
return [start_range[0], end_range[1]]
def get_tok_to_orig_map(orig_to_tok_map, num_words, num_tokens):
ranges = [get_entity_range(i, orig_to_tok_map) for i in range(num_words)]
tok_to_orig_map = {}
for i in range(num_words):
min,max = ranges[i]
for tok in range(min,max):
tok_to_orig_map[tok] = i
for tok in range(num_tokens):
if tok not in tok_to_orig_map:
tok_to_orig_map[tok] = num_words -1
return tok_to_orig_map
def get_all_ngrams_spans(seq_len, forbidden_ranges: List[tuple], start_ind = 0, n_max = 15):
def is_intersecting(ngram, forbidden_ranges):
return [(r[1] > ngram[0] >= r[0]) or(r[1] > ngram[1] >= r[0]) for r in forbidden_ranges]
all_ngrams = []
for n in range(2,n_max+1):
ngrams = list(get_ngrams(range(start_ind, seq_len), n))
all_ngrams.extend(ngrams)
all_ngrams = [(ngram[0], ngram[-1]) for ngram in all_ngrams]
all_ngrams = [ngram for ngram in all_ngrams if not any(is_intersecting(ngram, forbidden_ranges))]
return all_ngrams
| covid-sim-master | api/covid-ai2/alignment_model.py |
import torch
from typing import List, Dict
import random
import numpy as np
class Dataset(torch.utils.data.Dataset):
"""Simple torch dataset class"""
def __init__(self, data: List[Dict], device = "cpu", negative_prob = 0.0):
self.data = data
self.device = device
self.negative_prob = negative_prob
def __len__(self):
return len(self.data)
def __getitem__(self, index):
with torch.no_grad():
d = self.data[index]
other = random.choice(range(len(self)))
if random.random() < (1-self.negative_prob) or (self.data[other]["query_id"] == d["query_id"]):
sent1, sent2 = d["first"], d["second"]
id1, id2 = 1,1
is_negative = False
else:
sent1 = d["first"]
sent2 = self.data[other]["second"]
id1, id2 = 1,1
is_negative = True
sent1_arg1, sent1_arg2 = list(d["first_arg1"]), list(d["first_arg2"])
sent2_arg1, sent2_arg2 = list(d["second_arg1"]), list(d["second_arg2"])
l = len(sent1.split(" ")) + 1
sent2_arg1[0] += l
sent2_arg1[1] += l
sent2_arg2[0] += l
sent2_arg2[1] += l
sent2 = sent2.replace("ARG1:", "").replace("ARG2:", "").replace("<<","").replace(">>","")
sents_concat = sent1 + " ***** " + sent2 #sents_concat.split(" ")[l] is the first token in the 2nd sent
#create idx tensor. # 1stdim: sents, 2st dim: arg, 3st dim: start and end
idx = [[[sent1_arg1[0], sent1_arg1[1]], [sent1_arg2[0], sent1_arg2[1]]], [[sent2_arg1[0], sent2_arg1[1]], [sent2_arg2[0], sent2_arg2[1]]] ]
sent2_with_args = sent2
return sents_concat, torch.tensor(idx).int(), l, sent2_with_args, is_negative, id1, id2
| covid-sim-master | api/covid-ai2/dataset.py |
import pandas as pd
import numpy as np
import spike_queries
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='download covid dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-results', dest='num_results', type=int,
default=5000,
help='how many results to download')
parser.add_argument('--output-filename', dest='output_filename', type=str,
default="results.tsv",
help='results filename')
parser.add_argument('--query', dest='query', type=str,
default="the [subj virus] [verb does] [obj something]",
help='query to perform')
parser.add_argument('--query-type', dest='query_type', type=str,
default="syntactic",
help='query type')
args = parser.parse_args()
df = spike_queries.perform_query(args.query, num_results = args.num_results, query_type = args.query_type)
df.to_csv(args.output_filename, sep = "\t")
| covid-sim-master | api/covid-ai2/download_data.py |
"""Hack to add per-session state to Streamlit.
Usage
-----
>>> import SessionState
>>>
>>> session_state = SessionState.get(user_name='', favorite_color='black')
>>> session_state.user_name
''
>>> session_state.user_name = 'Mary'
>>> session_state.favorite_color
'black'
Since you set user_name above, next time your script runs this will be the
result:
>>> session_state = get(user_name='', favorite_color='black')
>>> session_state.user_name
'Mary'
"""
try:
import streamlit.ReportThread as ReportThread
from streamlit.server.Server import Server
except Exception:
# Streamlit >= 0.65.0
import streamlit.report_thread as ReportThread
from streamlit.server.server import Server
class SessionState(object):
def __init__(self, **kwargs):
"""A new SessionState object.
Parameters
----------
**kwargs : any
Default values for the session state.
Example
-------
>>> session_state = SessionState(user_name='', favorite_color='black')
>>> session_state.user_name = 'Mary'
''
>>> session_state.favorite_color
'black'
"""
for key, val in kwargs.items():
setattr(self, key, val)
def get(**kwargs):
"""Gets a SessionState object for the current session.
Creates a new object if necessary.
Parameters
----------
**kwargs : any
Default values you want to add to the session state, if we're creating a
new one.
Example
-------
>>> session_state = get(user_name='', favorite_color='black')
>>> session_state.user_name
''
>>> session_state.user_name = 'Mary'
>>> session_state.favorite_color
'black'
Since you set user_name above, next time your script runs this will be the
result:
>>> session_state = get(user_name='', favorite_color='black')
>>> session_state.user_name
'Mary'
"""
# Hack to get the session object from Streamlit.
ctx = ReportThread.get_report_ctx()
this_session = None
current_server = Server.get_current()
if hasattr(current_server, '_session_infos'):
# Streamlit < 0.56
session_infos = Server.get_current()._session_infos.values()
else:
session_infos = Server.get_current()._session_info_by_id.values()
for session_info in session_infos:
s = session_info.session
if (
# Streamlit < 0.54.0
(hasattr(s, '_main_dg') and s._main_dg == ctx.main_dg)
or
# Streamlit >= 0.54.0
(not hasattr(s, '_main_dg') and s.enqueue == ctx.enqueue)
or
# Streamlit >= 0.65.2
(not hasattr(s, '_main_dg') and s._uploaded_file_mgr == ctx.uploaded_file_mgr)
):
this_session = s
if this_session is None:
raise RuntimeError(
"Oh noes. Couldn't get your Streamlit Session object. "
'Are you doing something fancy with threads?')
# Got the session object! Now let's attach some state into it.
if not hasattr(this_session, '_custom_session_state'):
this_session._custom_session_state = SessionState(**kwargs)
return this_session._custom_session_state
| covid-sim-master | api/covid-ai2/SessionState.py |
import torch
from transformers import BertTokenizer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, AdamW, BertConfig, BertModel, AutoTokenizer, AutoModel
import numpy as np
from typing import List
import tqdm
class BertEncoder(object):
def __init__(self, device = 'cpu'):
#self.tokenizer = BertTokenizer.from_pretrained('scibert_scivocab_uncased/vocab.txt')
#self.model = BertModel.from_pretrained('scibert_scivocab_uncased/')
self.tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
self.model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased')
self.model.eval()
self.model.to(device)
self.device = device
self.pad_token = self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0]
def tokenize_and_pad(self, texts: List[str]):
indexed_texts = [self.tokenizer.encode(text, add_special_tokens=True, max_length = 512) for text in texts] #
max_len = max(len(text) for text in indexed_texts)
indexed_texts = [text + [self.pad_token] * (max_len - len(text)) for text in indexed_texts]
idx_tensor = torch.LongTensor(indexed_texts).to(self.device)
att_tensor = idx_tensor != self.pad_token
return idx_tensor, att_tensor
def encode(self, sentences: List[str], sentence_ids: List[str], batch_size: int, strategy: str = "cls", fname="", write = False):
assert len(sentences) == len(sentence_ids)
vecs = []
with open(fname, "w", encoding = "utf-8") as f:
for batch_idx in tqdm.tqdm_notebook(range(0, len(sentences), batch_size), total = len(sentences)//batch_size):
batch_sents = sentences[batch_idx: batch_idx + batch_size]
batch_ids = sentence_ids[batch_idx: batch_idx + batch_size]
assert len(batch_sents) == len(batch_ids)
idx, att_mask = self.tokenize_and_pad(batch_sents)
with torch.no_grad():
outputs = self.model(idx, attention_mask = att_mask)
last_hidden = outputs[0]
if strategy == "cls":
h = last_hidden[:, 0, ...]
elif strategy == "mean-cls":
h = torch.cat([last_hidden[:, 0, ...], torch.mean(last_hidden, axis = 1)], axis = 1)
elif strategy == "mean-cls-max":
h_max = torch.max(last_hidden, axis = 1).values
h = torch.cat([last_hidden[:, 0, ...], torch.mean(last_hidden, axis = 1), h_max], axis = 1)
elif strategy == "mean":
h = torch.mean(last_hidden, axis = 1)
elif strategy == "median":
h = torch.median(last_hidden, axis = 1).values
elif strategy == "max":
h = torch.max(last_hidden, axis = 1).values
elif strategy == "min":
h = torch.min(last_hidden, axis = 1).values
batch_np = h.detach().cpu().numpy()
assert len(batch_np) == len(batch_sents)
sents_states_ids = zip(batch_sents, batch_np, batch_ids)
for sent, vec, sent_id in sents_states_ids:
vec_str = " ".join(["%.4f" % x for x in vec])
sent_dict = {"text": sent, "vec": vec_str, "id": sent_id}
if write:
f.write(json.dumps(sent_dict) + "\n")
else:
vecs.append(vec)
return np.array(vecs)
| covid-sim-master | api/covid-ai2/bert.py |
import requests
import pandas as pd
import streamlit as st
COVID_URL = "https://spike.staging.apps.allenai.org/api/3/search/query" #"http://35.242.203.108:5000/api/3/search/query"
COVID_BASE_URL = "https://spike.staging.apps.allenai.org" #"http://35.242.203.108:5000"
PUBMED_URL = "http://34.89.172.235:5000/api/3/search/query"
PUBMED_BASE_URL = "http://34.89.172.235:5000"
WIKIPEDIA_URL = "https://spike.staging.apps.allenai.org/api/3/search/query"
WIKIPEDIA_BASE_URL = "https://spike.staging.apps.allenai.org"
def get_tsv_url(response: requests.models.Response, results_limit: int, base_url) -> str:
print("\n\n{}\n\n".format(response))
tsv_location = response.headers["csv-location"]
tsv_url = base_url + tsv_location + "?sentence_text=True&capture_indices=True&sentence_id=True&limit={}".format(
results_limit)
print(tsv_url)
return tsv_url
def perform_query(query_str: str, dataset_name: str = "pubmed", num_results: int = 10, query_type: str = "syntactic",
remove_duplicates: bool = True, lucene_query="") -> pd.DataFrame:
template = """{{
"queries": {{"{query_type}": "{query_content}", "lucene": "{lucene_query}"}},
"data_set_name": "{dataset_name}"
}}"""
#template = """{{
# "queries": {{"{query_type}": "{query_content}", "lucene": "{lucene_query}"}},
# "data_set_name": "{dataset_name}"
#}}"""
template = """{{"queries": {{"{query_type}": "{query_content}", "parent":"","expansion":""}},"data_set_name":"covid19","context":{{"lists":{{}},"tables":{{}},"case_strategy":"ignore",
"attempt_fuzzy": false}}}}"""
query = template.format(query_content=query_str, query_type=query_type)
print("\n\n{}\n\n".format(query))
#query = template.format(query_content=query_str, dataset_name=dataset_name, query_type=query_type, lucene_query=lucene_query)
#st.write("******************")
#st.write(query)
#st.write("******************")
headers = {'content-type': 'application/json'}
if dataset_name == "pubmed":
url, base_url = PUBMED_URL, PUBMED_BASE_URL
elif dataset_name == "covid19":
url, base_url = COVID_URL, COVID_BASE_URL
elif dataset_name == "wiki":
url, base_url = WIKIPEDIA_URL, WIKIPEDIA_BASE_URL
response = requests.post(url, data=query.encode('utf-8'), headers=headers)
try:
tsv_url = get_tsv_url(response, results_limit=num_results, base_url=base_url)
except Exception as e:
st.write("Invalid SPIKE query. Please check query content and/or its type.")
raise e
df = pd.read_csv(tsv_url)
# if remove_duplicates:
# df = df.drop_duplicates("sentence_text")
return df
| covid-sim-master | api/covid-ai2/spike_queries.py |
import streamlit as st
import pandas as pd
import numpy as np
import pandas as pd
import faiss
import bert
from bert import BertEncoder
import pickle
import spike_queries
import sklearn
import random
import time
import alignment
import bert_all_seq
#import alignment_supervised2 as alignment_supervised
import alignment_supervised
from annot import annotation, annotated_text
import time
import SessionState
NUM_RESULTS_TO_ALIGN_DEFAULT = 200
DEFAULT_MAX_NGRAM = 5
BOOLEAN_QUERY_DEFAULT = "virus lemma=originate"
TOKEN_QUERY_DEFAULT = "novel coronavirus"
SYNTACTIC_QUERY_DEFAULT = "<>arg1:[e=CHEMICAL|SIMPLE_CHEMICAL]paracetamol $[lemma=word|act]works $by <>arg2:activation of something" #"<>arg1:[e]paracetamol is the recommended $treatment for <>arg2:[e]asthma."
SPIKE_RESULTS_DEFAULT = 75
must_include = ""
import base64
st.set_page_config(layout="wide")
@st.cache(allow_output_mutation=True)
def load_sents_and_ids():
with st.spinner('Loading sentences and IDs...'):
#df = pd.read_csv("data/results.tsv", sep = "\t")
#sents = df["sentence_text"].tolist()
with open("data/sents.txt", "r", encoding = "utf-8") as f:
sents = f.readlines()
sents = [s.strip() for s in sents]
ids = [hash(s) for s in sents]
id2ind = {ids[i]:i for i,s in enumerate(sents)}
ind2id = {i:ids[i] for i,s in enumerate(sents)}
return sents, ids, id2ind, ind2id
@st.cache(allow_output_mutation=True)
def load_index(similarity, pooling):
with st.spinner('Loading FAISS index...'):
fname = "data/output-" + pooling + ".index"
index = faiss.read_index(fname)
return index
@st.cache(allow_output_mutation=True)
def load_bert():
with st.spinner('Loading BERT...'):
model = bert.BertEncoder("cpu")
return model
@st.cache(allow_output_mutation=True)
def load_bert_all_seq():
with st.spinner('Loading BERT...'):
model = bert_all_seq.BertEncoder("cpu")
return model
@st.cache(allow_output_mutation=True)
def load_bert_alignment_supervised():
with st.spinner('Loading BERT...'):
model = alignment_supervised.BertModel("cpu")
return model
@st.cache(allow_output_mutation=True)
def load_pca(pooling):
fname = "data/output-" + pooling + ".pca.pickle"
with open(fname, "rb") as f:
return pickle.load(f)
@st.cache(allow_output_mutation=True)
def encode(input_sentence, pca, bert, pooling):
return pca.transform(bert.encode([input_sentence], [1], batch_size = 1, strategy = pooling, fname = "dummy.txt", write = False))
def zero_input():
input_sentence = placeholder.text_input('Enter a sentence for similarity search', value="", key = random.randint(0,int(1e16)))
def write_results_menu(results, session_state, keys="random"):
cols = st.beta_columns((8,1,1))
cols[0].markdown("<b>Sentence</b>", unsafe_allow_html = True)
cols[1].markdown("<b>Enhance?</b>", unsafe_allow_html = True)
cols[2].markdown("<b>Decrease?</b>", unsafe_allow_html = True)
for i in range(min(len(results), 50)):
if len(results[i]) < 3: continue
cols[0].write(results[i])
enhance = cols[1].checkbox('✓', key = "en"+str(i) if keys=="normal" else random.randint(0,int(1e16)),value=False)
decrease = cols[2].checkbox('✗', key = "de"+str(i) if keys == "normal" else random.randint(0,int(1e16)),value=False)
hash_val = hash(results[i])
if enhance:
#st.write("added sentence {}".format(results[i]))
session_state.enhance.add(hash_val)
else:
#st.write("removed sentence {}".format(results[i]))
if hash_val in session_state.enhance: session_state.enhance.remove(hash_val)
if decrease:
session_state.decrease.add(hash(results[i]))
else:
if hash_val in session_state.decrease: session_state.decrease.remove(hash_val)
def project_out(positive, negative):
positive,negative = np.array(positive), np.array(negative)
pos_basis = scipy.linalg.orth(positive.T)
P = pos_basis.dot(pos_basis.T)
st.write(P.shape, negative.shape, positive.shape)
negative_different = negative - negative@P
return positive - negative_different
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False, sep = "\t")
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}">Download csv file</a>'
return href
st.title('COVID-19 Similarity Search')
RESULT_FILTREATION = False
#a = st.empty()
mode = st.sidebar.radio("Mode", ("Start with Sentence", "Start with Query"))
similarity = "dot product" #st.sidebar.selectbox('Similarity', ('dot product', "l2"))
pooling = st.sidebar.selectbox('Pooling', ('cls', 'mean-cls'))
to_decrease, to_enhance = [], []
session_state = SessionState.get(start=False, enhance=set(), decrease=set(), interactive = False, started = False, vec=None, current_query="")
#if mode == "Sentencve":
# filter_by_spike = True if st.sidebar.selectbox('Filter by SPIKE query?', ('False', 'True'))=="True" else False
sents, ids, id2ind, ind2id = load_sents_and_ids()
#sents = df["sentence_text"].tolist()
#ids = [hash(s) for s in sents]
print("len sents", len(sents))
#print("Creating dicts...")
#id2ind = {ids[i]:i for i,s in enumerate(sents)}
#ind2id = {i:ids[i] for i,s in enumerate(sents)}
#print("Done.")
index = load_index(similarity, pooling)
bert = load_bert()
bert_all_seq = load_bert_all_seq()
bert_alignment_supervised = load_bert_alignment_supervised()
pca = load_pca(pooling)
st.write("Uses {}-dimensional vectors".format(pca.components_.shape[0]))
st.write("Number of indexed sentences: {}".format(len(sents)))
print("Try accessing the demo under localhost:8080 (or the default port).")
#"""
#if mode == "Sentence" and filter_by_spike:
#
# #filter_query = st.text_input('Enter a SPIKE query to filter by', 'This [nsubj drug] treats [obj:l coronavirus].')
#
# query_type = st.radio("Query type", ("syntactic", "boolean", "token"))
# if query_type == "syntactic":
# filter_query = st.text_input('Query', 'The [subj:l coronavirus] [copula:w is] prevalent among [w:e bats].')
# elif query_type == "boolean":
# filter_query = st.text_input('Query', 'virus lemma=persist on')
# elif query_type == "token":
# filter_query = st.text_input('Query', 'novel coronavirus')
#
# filter_size = int(st.text_input('How many SPIKE search results?', 3000))
# results_df = spike_queries.perform_query(filter_query, dataset_name = "covid19", num_results = filter_size, query_type = query_type)
# results_sents = np.array(results_df["sentence_text"].tolist())
# results_ids = [hash(s) for s in results_sents]
#"""
if mode == "Start with Sentence":
placeholder = st.empty()
input_sentence = placeholder.text_input('Enter a sentence for similarity search', '')
#input_sentence = st.text_input('Enter a sentence for similarity search', 'The virus can spread rapidly via different transimission vectors.')
#st.write("try", session_state.enhance, session_state.decrease)
filter_by = st.selectbox('Filter results based on:', ('None', 'Boolean query', 'Token query', 'Syntactic query'))
query_type = "syntactic" if "syntactic" in filter_by.lower() else "boolean" if "boolean" in filter_by.lower() else "token" if "token" in filter_by.lower() else None
filter_by_spike = query_type is not None
if query_type == "syntactic":
filter_query = st.text_input('SPIKE query', SYNTACTIC_QUERY_DEFAULT)
elif query_type == "boolean":
filter_query = st.text_input('SPIKE query', BOOLEAN_QUERY_DEFAULT)
elif query_type == "token":
filter_query = st.text_input('SPIKE query', TOKEN_QUERY_DEFAULT)
if query_type is not None:
filter_size = st.slider('Max number of results', 1, 10000, 3000)
results_df = spike_queries.perform_query(filter_query, dataset_name = "covid19", num_results = filter_size, query_type = query_type)
results_sents = np.array(results_df["sentence_text"].tolist())
results_ids = [hash(s) for s in results_sents]
st.write("Found {} matches".format(len(results_ids)))
else:
number_of_sentence_results = st.slider('Number of results', 1, 1000, 50) #int(st.text_input('Number of results', 100))
elif mode == "Start with Query":
query_type = st.radio("Query type", ("Boolean", "Token", "Syntactic"))
query_type = query_type.lower()
if query_type == "syntactic":
input_query = st.text_input('Query to augment', SYNTACTIC_QUERY_DEFAULT)
elif query_type == "boolean":
input_query = st.text_input('Query to augment', BOOLEAN_QUERY_DEFAULT)
elif query_type == "token":
input_query = st.text_input('Query to augment', TOKEN_QUERY_DEFAULT)
max_results = st.slider('Max number of SPIKE results', 1, 1000, SPIKE_RESULTS_DEFAULT) #int(st.text_input("Max number of results", 25))
max_number_of_augmented_results = st.slider('Number of Augmented results', 1, 250000, 1000)
if query_type == "syntactic":
perform_alignment = st.checkbox("Perform argument alignment", value=False, key=None)
else:
perform_alignment = False
if perform_alignment:
number_of_sentences_to_align = st.select_slider('Number of sentences to align.', options=[1, 10, 25, 50, 100, 200, 250, 500], value = NUM_RESULTS_TO_ALIGN_DEFAULT)
alignment_method = st.radio("Alignment model", ('Metric model', 'Naive'))
if alignment_method != "Naive":
max_ngrams = st.select_slider('Maximum span size to align', options=[1, 2, 3, 4, 5, 6, 7, 8,9,10,11,12,13,14,15], value = DEFAULT_MAX_NGRAM)
must_include = st.text_input('Get only results containing the following words', '')
filter_by = st.selectbox('Filter results based on:', ('None', 'Boolean query', 'Token query', 'Syntactic query'))
query_type_filtration = "syntactic" if "syntactic" in filter_by.lower() else "boolean" if "boolean" in filter_by.lower() else "token" if "token" in filter_by.lower() else None
filter_by_spike = query_type_filtration is not None
if filter_by_spike:
message = "Get only results NOT captured by this query"
if query_type_filtration == "syntactic":
filter_query = st.text_input(message, SYNTACTIC_QUERY_DEFAULT)
elif query_type_filtration == "boolean":
filter_query = st.text_input(message, BOOLEAN_QUERY_DEFAULT)
elif query_type_filtration == "token":
filter_query = st.text_input(message, TOKEN_QUERY_DEFAULT)
filtration_batch_size = st.slider('Filtration batch size', 1, 250, 50)
RESULT_FILTREATION = True
show_results = True
#is_interactive_button = st.radio("Interactive?", ('✓', '✗'), index=0 if session_state.interactive else 1)
#if is_interactive_button == "✓":
# session_state.interactive = True
#else:
# session_state.interactive = False
start = st.button('Run')
st.write("Current query: {}".format(session_state.current_query))
if start:
session_state.started = True
if (start or session_state.start) and session_state.started:
if mode == "Start with Sentence": session_state.start = True
if mode == "Start with Sentence":
if len(session_state.enhance) == 0 and input_sentence != "": #not is_interactive_button=="✓":
session_state.current_query = input_sentence
zero_input()
st.write("USING A USER-PROVIDED SENTENCE")
encoding = encode(input_sentence, pca, bert, pooling) #pca.transform(bert.encode([input_sentence], [1], batch_size = 1, strategy = pooling, fname = "dummy.txt", write = False))
session_state.vec = encoding
if start and len(session_state.enhance) != 0:
zero_input()
session_state.interactive = True
#st.write("USING THE {} VECTORS THE USER MARKED".format(len(session_state.enhance) + len(session_state.decrease)))
encoding_pos = np.array([index.reconstruct(id2ind[i]) for i in session_state.enhance if i in id2ind]) #np.array([index.reconstruct(i) for i in session_state.enhance])
encoding = np.mean(encoding_pos, axis = 0)
encoding_neg = np.zeros_like(encoding_pos)
if len(session_state.decrease) != 0:
encoding_neg += np.mean(np.array([index.reconstruct(id2ind[i]) for i in session_state.decrease if i in id2ind]), axis = 0)
encoding = encoding - encoding_neg
st.write(encoding.shape)
session_state.enhance = set()
session_state.decrease = set()
session_state.vec = encoding
#write_results_menu(results, session_state)
if ((not start) and len(session_state.enhance) != 0) or (input_sentence==""):
encoding = session_state.vec
if not filter_by_spike:
#st.write(encoding.shape, pca.components_.shape, index.d)
#st.write(help(index))
D,I = index.search(np.ascontiguousarray(encoding).astype("float32"), number_of_sentence_results)
else:
encoding_of_spike_results = np.array([index.reconstruct(id2ind[i]) for i in results_ids if i in id2ind])
if encoding_of_spike_results.shape[0] > 0:
show_results = False
with st.spinner('Retrieving simialr sentences...'):
sims = sklearn.metrics.pairwise.cosine_similarity(encoding, encoding_of_spike_results)
idx_sorted = sims.argsort()[0]
spike_sents_sorted = results_sents[idx_sorted][::-1]
I = np.array([[id2ind[hash(s)] for s in spike_sents_sorted if hash(s) in id2ind]])
else:
show_results = False
st.write("SPIKE search results are not indexed.")
# TODO: CHECK WHY ROWS RE ADDED TO I AFTER MARKING
#st.write("TRY", I.squeeze())
I = I.squeeze()
if len(I.shape) != 1:
I = I[0]
results = [sents[i] for i in I if must_include in sents[i]]
if RESULT_FILTREATION:
results = result_sents
cols = st.beta_columns((10,1,1))
cols[0].markdown("<b>Sentence</b>", unsafe_allow_html = True)
#cols[1].markdown("<b>Enhance?</b>", unsafe_allow_html = True)
#cols[2].markdown("<b>Decrease?</b>", unsafe_allow_html = True)
for i in range(min(len(results), 50)):
cols[0].write(results[i])
enhance = cols[0].checkbox('✓', key = "en"+str(i) ,value=False)
decrease = cols[0].checkbox('✗', key = "de"+str(i),value=False)
#cols[0].write("")
#cols[1].write("")
#cols[2].write("")
hash_val = hash(results[i])
if enhance:
#st.write("added sentence {}".format(results[i]))
session_state.enhance.add(hash_val)
else:
#st.write("removed sentence {}".format(results[i]))
if hash_val in session_state.enhance: session_state.enhance.remove(hash_val)
if decrease:
session_state.decrease.add(hash(results[i]))
else:
if hash_val in session_state.decrease: session_state.decrease.remove(hash_val)
elif mode == "IDs":
input_ids = st.text_input('Input ids', '39, 41, 49, 50, 112, 116, 119, 229, 286, 747')
input_ids = [int(x) for x in input_ids.replace(" ", "").split(",")]
st.write("First sentences corrsponding to those IDs:")
l = range(min(10, len(input_ids) ) )
query_sents = [sents[id2ind[input_ids[i]]] for i in l]
st.table(query_sents)
encoding = np.array([index.reconstruct(id2ind[i]) for i in input_ids])
encoding = np.mean(encoding, axis = 0)
D,I = index.search(np.ascontiguousarray([encoding]).astype("float32"), 150)
elif mode == "Start with Query":
with st.spinner('Performing SPIKE query...'):
results_df = spike_queries.perform_query(input_query, dataset_name = "covid19", num_results = max_results, query_type = query_type)
results_sents = results_df["sentence_text"].tolist()
results_ids = [hash(s) for s in results_sents] #results_df["sentence_id"].tolist()
st.write("Found {} matches".format(len(results_ids)))
if len(results_sents) > 0:
st.write("First sentences retrieved:")
st.table(results_sents[:10])
encoding = np.array([index.reconstruct(id2ind[i]) for i in results_ids if i in id2ind])
if encoding.shape[0] > 0:
with st.spinner('Retrieving similar sentences...'):
encoding = np.mean(encoding, axis = 0)
D,I = index.search(np.ascontiguousarray([encoding]).astype("float32"), max_number_of_augmented_results)
result_sents = [sents[i].replace("/","-") for i in I.squeeze()]
if must_include != "":
result_sents = [sents[i].replace("/","-") for i in I.squeeze() if must_include in sents[i]]
if filter_by_spike:
with st.spinner('Filtering...'):
start = time.time()
# filter by lucene queries
results_sents_filtered = []
def remove_all_words(s):
words_to_remove = [" is ", " are ", " the ", " a ", " an ", " to ", " as ", " from ",
" and ", " or ", " of ", " in ", " be ", " this ", " that ", " , ", " these ", " those ",
" with ", " within ", " can ", " / "]
s = s.replace("The ", "").replace("In ", "").replace("Although ", "").replace("It ", "").replace(" (", "").replace(" )", "").replace("A ", "").replace("An ", "").replace(" [", "").replace(" ]", "")
s = s.replace(' " ', ' ').replace(" ' ", " ")
s = s.replace(" 's "," ").replace("(","").replace(")", "").replace("[","").replace("]","")
for w in words_to_remove:
s = s.replace(w, " ")
#s = s.replace("/", "-")
while " " in s:
s = s.replace(" ", " ")
words = s.split(" ")
s = " ".join([w for w in words if "-" not in w and "/" not in w and "'" not in w and ")" not in w and "(" not in w and "]" not in w
and "[" not in w and "," not in w and not w=="has" and not w=="have" and not w=="been" and not w=="on"])
return s
filtration_sents = []
start_time = time.time()
for b in range(0, len(result_sents), filtration_batch_size):
start, end = b, b+filtration_batch_size
all_words = " OR ".join(["("+ " AND ".join(remove_all_words(s).split(" ")[:8])+")" for s in result_sents[start:end]][:])
#all_words = all_words.replace("AND AND", "AND")
results_df_filtration = spike_queries.perform_query(filter_query, dataset_name="covid19",
num_results=100000,
query_type=query_type_filtration,
lucene_query=all_words)
filtration_sents.extend(results_df_filtration["sentence_text"].tolist())
#filtration_sents = results_df_filtration["sentence_text"].tolist()
st.write("Num filtration results: {}".format(len(filtration_sents)))
#st.write("Filtration sentences:")
#st.write(st.table(filtration_sents))
st.write("==============================")
#st.write(all_words)
#st.write(len(results_df_filtration))
#st.write("------------")
#st.write(st.table(filtration_sents[:5]))
#st.write("=====================")
result_sents = [s for s in result_sents if s not in set(filtration_sents)] # take only sents not captured by the query
st.write("Filtration took {} seconds".format(time.time() - start_time))
# start = time.time()
# # filter by lucene queries
# results_sents_filtered = []
# for s in result_sents:
# words = " AND ".join(s.split(" ")[:12])
# results_df = spike_queries.perform_query(filter_query, dataset_name="covid19",
# num_results=100000,
# query_type=query_type_filtration,
# lucene_query=words)
# if len(results_df) == 0: # if not captured by the query
# results_sents_filtered.append(s)
# result_sents = results_sents_filtered
# st.write("filteration took {} seconds".format(time.time() - start))
# st.write(len(result_sents))
if query_type == "syntactic" and perform_alignment:
with st.spinner('Performing argument alignment...'):
#colored_sents, annotated_sents= alignment.main(bert_all_seq, result_sents, results_df, input_query, [-1], NUM_RESULTS_TO_ALIGN)
if alignment_method == "Naive":
colored_sents, annotated_sents = alignment.main(bert_all_seq, result_sents, results_df, input_query, [-1], number_of_sentences_to_align)
else:
annotated_sents, arg1_items, arg2_items, tuples_items, captures_tuples = alignment_supervised.main(bert_alignment_supervised, result_sents, results_df, number_of_sentences_to_align, max_ngrams+1)
arg1_counts_df = pd.DataFrame(arg1_items, columns =['entity', 'count'])
arg2_counts_df = pd.DataFrame(arg2_items, columns =['entity', 'count'])
tuples_counts_df = pd.DataFrame(tuples_items, columns =['entity', 'count'])
captures_df = pd.DataFrame.from_records(captures_tuples, columns =['ARG1', 'ARG2'])
captures_df["sentence"] = result_sents[:len(captures_tuples)]
st.sidebar.write('ARG1 Aggregation:')
st.sidebar.write(arg1_counts_df.head(30))
st.sidebar.write('ARG2 Aggregation:')
st.sidebar.write(arg2_counts_df.head(30))
st.sidebar.write('Tuples Aggregation:')
st.sidebar.write(tuples_counts_df.head(30))
st.markdown(get_table_download_link(captures_df), unsafe_allow_html=True) # download augmented results
for s in annotated_sents:
annotated_text(*s)
else:
show_results = False
st.write("SPIKE search results are not indexed.")
#encoding = pca.transform(bert.encode(results_sents, [1]*len(results_sents), batch_size = 8, strategy = pooling, fname = "dummy.txt", write = False))#.squeeze()
#encoding = np.mean(encoding, axis = 0)
#D,I = index.search(np.ascontiguousarray([encoding]), 100)
else:
show_results = False
st.write("No resutls found.")
if show_results:
pass
results = [sents[i] for i in I.squeeze() if must_include in sents[i]]
if RESULT_FILTREATION:
results = result_sents
st.write("Performed query of type '{}'. Similarity search results:".format(mode))
st.write(st.table(results))
| covid-sim-master | api/covid-ai2/demo.py |
import pandas as pd
import tqdm
import pickle
import random
import itertools
import torch
from transformers import BertTokenizer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import AutoTokenizer, AutoModel, AutoConfig
from transformers import BertForSequenceClassification, AdamW, BertConfig, BertModel, AutoTokenizer, AutoModel
import numpy as np
from typing import List
from torch import nn
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from typing import Dict, Tuple
from scipy.spatial.distance import cosine as cosine_distance
from collections import defaultdict, Counter
import nltk
from nltk import ngrams as get_ngrams
from termcolor import colored
import streamlit as st
from dataset import Dataset
import alignment_model
def get_result_dict(df):
result_dict = df.to_dict("records")
return result_dict
def generate_pairs_data(result_dict, k = 1000):
pairs_data = []
pairs = list(itertools.combinations(result_dict[:150], 2))
pairs = [p for p in pairs if p[0] != p[1]]
random.seed(0)
random.shuffle(pairs)
for j in range(min(k, len(pairs))):
first, second = pairs[j]
new_dict = {"first": first["sentence_text"], "second": second["sentence_text"],
"first_arg1": [first["arg1_first_index"], first["arg1_last_index"]],
"first_arg2": [first["arg2_first_index"], first["arg2_last_index"]],
"second_arg1": [second["arg1_first_index"], second["arg1_last_index"]],
"second_arg2": [second["arg2_first_index"], second["arg2_last_index"]]}
pairs_data.append(new_dict)
return pairs_data
def add_annotation(pairs_data):
def add(sent, idx, arg1=True):
string = "<<ARG1:" if arg1 else "<<ARG2:"
sent_new = sent.split(" ")
sent_new = sent_new[:idx[0]] + [string] + sent_new[idx[0]:idx[1]+1] + [">>"] + sent_new[idx[1]+1:]
new = " ".join(sent_new).replace("<<ARG1: ", "<<ARG1:").replace("<<ARG2: ", "<<ARG2:").replace(" >>", ">>")
return new
for i,pair in enumerate(pairs_data):
first_arg1, first_arg2 = pair["first_arg1"], pair["first_arg2"]
second_arg1, second_arg2 = pair["second_arg1"], pair["second_arg2"]
first_new = add(pair["first"], first_arg1, arg1=True)
first_new = add(first_new, first_arg2, arg1=False)
second_new = add(pair["second"], second_arg1, arg1=True)
second_new = add(second_new, second_arg2, arg1=False)
pair["first"] = first_new
pair["second"] = second_new
pairs_data[i] = pair
def finetune(model, df):
result_dict = get_result_dict(df)
pairs_data = generate_pairs_data(result_dict, k = 250)
add_annotation(pairs_data)
dataset = Dataset(pairs_data)
model_to_ft = alignment_model.BertModel(dataset, dataset, 16, "cpu", "train", alpha = 0.05, lr = 1e-3, momentum=0.5, l2_loss=True, same_rel_weight = 0.2, pretrained = True)
trainer = Trainer(max_epochs=1,min_epochs=1)
train_gen = torch.utils.data.DataLoader(dataset, batch_size=32, drop_last=False, shuffle=True,
num_workers = 8)
st.write("Fitting...")
trainer.fit(model_to_ft, train_gen)
model.linear_arg1_1 = model_to_ft.linear_arg1_1
class BertModel(torch.nn.Module):
def __init__(self, device: str, mode: str = "eval", load_existing = True):
super().__init__()
self.device = device
if load_existing:
config = AutoConfig.from_pretrained('Shauli/RE-metric-model-spike', output_hidden_states=True)
self.model = AutoModel.from_pretrained('Shauli/RE-metric-model-spike', config=config)
self.tokenizer = AutoTokenizer.from_pretrained('Shauli/RE-metric-model-spike')
else:
config = AutoConfig.from_pretrained('allenai/scibert_scivocab_uncased', output_hidden_states=True)
self.model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased', config=config)
self.tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
if load_existing:
self.linear_arg1_1 = torch.nn.Linear(768, 64)
self.linear_arg1_1.load_state_dict(torch.load("linear.pt", map_location = torch.device('cpu')))
else:
self.linear_arg1_1 = torch.nn.Linear(768, 64)
self.linear_arg2_1 = torch.nn.Linear(768, 64)
self.linear_arg1_2 = torch.nn.Linear(768, 64)
self.linear_arg2_2 = torch.nn.Linear(768, 64)
if mode == "eval":
self.model.eval()
else:
self.model.train()
for p in self.model.parameters():
p.requires_grad = False
def tokenize(self, original_sentence: List[str]) -> Tuple[List[str], Dict[int, int]]:
"""
Parameters
----------
Returns
-------
bert_tokens: The sentence, tokenized by BERT tokenizer.
orig_to_tok_map: An output dictionary consisting of a mapping (alignment) between indices in the original tokenized sentence, and indices in the sentence tokenized by the BERT tokenizer. See https://github.com/google-research/bert
"""
bert_tokens = ["[CLS]"]
orig_to_tok_map = {}
tok_to_orig_map = {}
has_subwords = False
is_subword = []
for i, w in enumerate(original_sentence):
tokenized_w = self.tokenizer.tokenize(w)
has_subwords = len(tokenized_w) > 1
is_subword.append(has_subwords)
bert_tokens.extend(tokenized_w)
orig_to_tok_map[i] = len(bert_tokens) - 1
tok_to_orig_map = {}
bert_tokens.append("[SEP]")
tok_to_orig_map = get_tok_to_orig_map(orig_to_tok_map, len(original_sentence), len(bert_tokens))
indexed_tokens = self.tokenizer.convert_tokens_to_ids(bert_tokens)
tokens_tensor = torch.tensor([indexed_tokens]).to(self.device)
return (bert_tokens, orig_to_tok_map, tok_to_orig_map, tokens_tensor)
def forward(self, x):
outputs = self.model(x)
states = outputs[0][0] #[seq_len, 768]
return states
def forward_with_loss_calculation(self, bert_tokens, x, range_sent1, range_sent2, orig_to_tok_map, l, l_tokens,
metric = "l2", n_max = 5, alpha = 0.075, mode = "train", normalize=False, nb=0):
idx_arg1_all, idx_arg2_all, all_ngrams = None, None, None
outputs = self.model(x)
states = outputs[0][0] #[seq_len, 768]
if metric == "cosine" or normalize:
states = states / (torch.norm(states, dim = 1, keepdim = True)+1e-8)
states = self.linear_arg1_1(states)
arg1_sent1, arg2_sent1 = range_sent1
arg1_sent2, arg2_sent2 = range_sent2
sent1_arg1_vec, sent1_arg2_vec = states[arg1_sent1[0]:arg1_sent1[1]].mean(dim=0), states[arg2_sent1[0]:arg2_sent1[1]].mean(dim=0)
sent2_arg1_vec, sent2_arg2_vec = states[arg1_sent2[0]:arg1_sent2[1]].mean(dim=0), states[arg2_sent2[0]:arg2_sent2[1]].mean(dim=0)
all_false_ngrams_ranges = get_all_ngrams_spans(len(states), [arg1_sent1, arg1_sent2, arg2_sent1, arg2_sent2], start_ind = l_tokens,
n_max = n_max)
negatives = [states[ngram[0]:ngram[1]].mean(dim=0) for ngram in all_false_ngrams_ranges]
negatives_arg1 = negatives + [sent1_arg2_vec, sent2_arg2_vec]
negatives_arg2 = negatives + [sent1_arg1_vec, sent2_arg1_vec]
negatives_arg1 = torch.stack(negatives_arg1).to(self.device)
negatives_arg2 = torch.stack(negatives_arg2).to(self.device)
if mode == "eval":
all_ngrams = get_all_ngrams_spans(len(states), [], start_ind = l_tokens,
n_max = n_max)
ngrams = [states[ngram[0]:ngram[1]].mean(dim=0) for ngram in all_ngrams]
ngrams = torch.stack(ngrams).to(self.device)
if metric == "l2":
dists_arg1 = torch.sqrt(((negatives_arg1-sent1_arg1_vec)**2).sum(dim = 1))
dists_arg2 = torch.sqrt(((negatives_arg2-sent1_arg2_vec)**2).sum(dim = 1))
dist_arg1_gold = (sent1_arg1_vec - sent2_arg1_vec).norm()
dist_arg2_gold = (sent1_arg2_vec - sent2_arg2_vec).norm()
if mode == "eval":
dist_arg1_all = torch.sqrt(((ngrams-sent1_arg1_vec)**2).sum(dim = 1))
dist_arg2_all = torch.sqrt(((ngrams-sent1_arg2_vec)**2).sum(dim = 1))
idx_arg1_all = torch.argsort(dist_arg1_all).detach().cpu().numpy()
idx_arg2_all = torch.argsort(dist_arg2_all).detach().cpu().numpy()
elif metric == "cosine":
dists_arg1 = 1 - negatives_arg1@sent1_arg1_vec.T
dists_arg2 = 1 - negatives_arg2@sent1_arg2_vec.T
dist_arg1_gold = 1 - sent1_arg1_vec@sent2_arg1_vec.T
dist_arg2_gold = 1 - sent1_arg2_vec@sent2_arg2_vec.T
idx_arg1 = torch.argsort(dists_arg1).detach().cpu().numpy()
idx_arg2 = torch.argsort(dists_arg2).detach().cpu().numpy()
l = max(int(len(negatives)*0.3),1)
k = random.choice(range(min(len(negatives), 2))) if np.random.random() < 0.5 else random.choice(range(l))
dist_arg1_argmax = dists_arg1[idx_arg1[k]]
dist_arg2_argmax = dists_arg2[idx_arg2[k]]
loss_arg1 = torch.max(torch.zeros(1).to(self.device), dist_arg1_gold - dist_arg1_argmax + alpha)
loss_arg2 = torch.max(torch.zeros(1).to(self.device), dist_arg2_gold - dist_arg2_argmax + alpha)
# softmax triplet
z = torch.max(dist_arg1_argmax, dist_arg1_gold)
temp = 1
pos_arg1 = torch.exp((dist_arg1_gold - z)/temp)
neg_arg1 = torch.exp((dist_arg1_argmax - z)/temp)
loss_arg1 = (pos_arg1 / (pos_arg1 + neg_arg1))**2
z = torch.max(dist_arg2_argmax, dist_arg2_gold)
pos_arg2 = torch.exp((dist_arg2_gold - z)/temp)
neg_arg2 = torch.exp((dist_arg2_argmax - z)/temp)
loss_arg2 = (pos_arg2 / (pos_arg2 + neg_arg2))**2
loss = states[0,0:1]**2 #torch.zeros(1).to(self.device)
loss2_isnan = np.isnan(loss_arg2.detach().cpu().numpy().item())
loss1_isnan = np.isnan(loss_arg1.detach().cpu().numpy().item())
if not loss2_isnan:
loss += loss_arg2
if not loss1_isnan:
loss += loss_arg1
if loss1_isnan or loss2_isnan:
print("ERROR: nan loss", loss1_isnan, loss2_isnan, nb)
return
return loss, idx_arg1, idx_arg2, idx_arg1_all, idx_arg2_all, all_false_ngrams_ranges, all_ngrams
#return loss, np.argsort(dists_arg1+mask_gold_arg1)
def get_entity_range(index_orig, orig_to_tok_map):
m = min(orig_to_tok_map.keys())
if orig_to_tok_map[index_orig] == 1: return (1,2)
if index_orig == 0: return (1, orig_to_tok_map[index_orig] + 1)
before = index_orig - 1
tok_range = (orig_to_tok_map[before] + 1, orig_to_tok_map[index_orig] + 1)
return tok_range
def get_entity_range_multiword_expression(start_and_end, orig_to_tok_map):
start, end = start_and_end
start_range = get_entity_range(start, orig_to_tok_map)
end_range = get_entity_range(end, orig_to_tok_map)
return [start_range[0], end_range[1]]
def get_tok_to_orig_map(orig_to_tok_map, num_words, num_tokens):
ranges = [get_entity_range(i, orig_to_tok_map) for i in range(num_words)]
tok_to_orig_map = {}
for i in range(num_words):
min,max = ranges[i]
for tok in range(min,max):
tok_to_orig_map[tok] = i
for tok in range(num_tokens):
if tok not in tok_to_orig_map:
tok_to_orig_map[tok] = num_words -1
return tok_to_orig_map
def get_all_ngrams_spans(seq_len, forbidden_ranges: List[tuple], start_ind = 0, n_max = 15):
def is_intersecting(ngram, forbidden_ranges):
return [(r[1] > ngram[0] >= r[0]) or(r[1] > ngram[1] >= r[0]) for r in forbidden_ranges]
all_ngrams = []
for n in range(2,n_max+1):
ngrams = list(get_ngrams(range(start_ind, seq_len), n))
all_ngrams.extend(ngrams)
all_ngrams = [(ngram[0], ngram[-1]) for ngram in all_ngrams]
all_ngrams = [ngram for ngram in all_ngrams if not any(is_intersecting(ngram, forbidden_ranges))]
return all_ngrams
def add_arguments(sent:str, arg1_start, arg1_end, arg2_start, arg2_end):
s_lst = sent.split(" ")
if arg1_start > arg2_start:
arg1_start, arg2_start = arg2_start, arg1_start
arg1_end, arg2_end = arg2_end, arg1_end
arg1_str, arg2_str = "<<ARG2:", "<<ARG1:"
else:
arg1_str, arg2_str = "<<ARG1:", "<<ARG2:"
s_with_args = s_lst[:arg1_start] + [arg1_str] + s_lst[arg1_start:arg1_end+1] + [">>"] + s_lst[arg1_end+1:arg2_start] + [arg2_str] + s_lst[arg2_start:arg2_end+1] + [">>"] +s_lst[arg2_end+1:]
#s_with_args = s_lst[:arg1_start] + [arg1_str+s_lst[arg1_ind]] + s_lst[arg1_ind+1:arg2_ind] + [arg2_str+s_lst[arg2_ind]] + s_lst[arg2_ind+1:]
s_with_args = " ".join(s_with_args).replace("ARG1: ", "ARG1:").replace("ARG2: ", "ARG2:")
s_with_args = s_with_args.replace(" >>", ">>")
return s_with_args
def prepare_example(sent1, sent2, arg1_sent1, arg2_sent1):
sent1 = add_arguments(sent1, arg1_sent1[0], arg1_sent1[1], arg2_sent1[0], arg2_sent1[1])
l = len(sent1.split(" ")) + 1
#arg1_sent1[0] += l
#arg1_sent1[1] += l
#arg2_sent1[0] += l
#arg2_sent1[1] += l
sents_concat = sent1 + " ***** " + sent2 #sents_concat.split(" ")[l] is the first token in the 2nd sent
#create idx tensor. # 1stdim: sents, 2st dim: arg, 3st dim: start and end
idx = [[[arg1_sent1[0], arg1_sent1[1]], [arg2_sent1[0], arg2_sent1[1]]], [[0, 1], [0, 1]] ]
sent2_with_args = sent2
return sents_concat, torch.tensor(idx).int(), l, sent2_with_args
def evaluate_model(sents1, sents2, arg1_sent1, arg2_sent1, model, max_ngrams = 5, num_examples = 200):
preds = []
count = 0
for i in range(len(sents1)):
sents_concat, idx, l, sent2_with_args = prepare_example(sents1[i], sents2[i], arg1_sent1[i], arg2_sent1[i])
idx = idx.detach().cpu().numpy()
bert_tokens, orig_to_tok_map, tok_to_orig_map, tokens_tensor = model.tokenize(sents_concat.split(" "))
l_tokens = len(bert_tokens[:orig_to_tok_map[l-1]])
sent1_range_arg1 = get_entity_range_multiword_expression(idx[0][0], orig_to_tok_map)
sent1_range_arg2 = get_entity_range_multiword_expression(idx[0][1], orig_to_tok_map)
sent2_range_arg1 = get_entity_range_multiword_expression(idx[1][0], orig_to_tok_map)
sent2_range_arg2 = get_entity_range_multiword_expression(idx[1][1], orig_to_tok_map)
range_sent1 = (sent1_range_arg1,sent1_range_arg2)
range_sent2 = (sent2_range_arg1,sent2_range_arg2)
with torch.no_grad():
loss, idx_arg1, idx_arg2, idx_arg1_all, idx_arg2_all, all_false_ngrams_ranges, all_ngrams = model.forward_with_loss_calculation(bert_tokens, tokens_tensor, range_sent1, range_sent2, orig_to_tok_map, l, l_tokens, mode = "eval", n_max=max_ngrams)
preds.append({"sent": sents_concat, "tokens": bert_tokens, "tok2orig": tok_to_orig_map, "orig2tok": orig_to_tok_map,
"preds_arg1_tokens": idx_arg1_all, "preds_arg2_tokens": idx_arg2_all, "false_ngrams": all_false_ngrams_ranges,
"all_ngrams": all_ngrams, "gold_arg1_range_tokens": sent2_range_arg1, "gold_arg2_range_tokens": sent2_range_arg2, "l": l})
return preds
def main(model, results_sents, spike_df, num_results, max_ngrams):
if len(spike_df) > 100:
finetune(model, spike_df)
captures = []
captures_tuples = []
def pretty_print(sent, idx_arg1, idx_arg2):
sent_lst = sent.split(" ")
sent = " ".join(sent_lst[:idx_arg1[0]]) + " " + colored(" ".join(sent_lst[idx_arg1[0]:idx_arg1[1]]), "red") + " " + " ".join(sent_lst[idx_arg1[1]:])
sent_lst = sent.split(" ")
sent = " ".join(sent_lst[:idx_arg2[0]]) + " " + colored(" ".join(sent_lst[idx_arg2[0]:idx_arg2[1]]), "blue") + " " + " ".join(sent_lst[idx_arg2[1]:])
return sent
def perform_annotation(sent, arg_borders):
def is_between(k, borders):
return len([(s, e) for (s, e) in borders if s <= k < e]) != 0
sent_lst = sent.split(" ")
sent_new = []
arg_colors = ["#8ef", "#fea", "#faa", "#fea", "#8ef", "#afa", "#d8ff35", "#8c443b", "#452963"]
for i, w in enumerate(sent_lst):
for arg in range(len(arg_borders)):
if is_between(i, [arg_borders[arg]]):
sent_new.append((w, "ARG{}".format(arg+1), arg_colors[arg]))
break
else:
sent_new.append(" " + w + " ")
return sent_new
results_sents = results_sents[:num_results]
query_sents = spike_df["sentence_text"].tolist()
query_arg1_starts = spike_df["arg1_first_index"]
query_arg1_ends = spike_df["arg1_last_index"]
query_arg2_starts = spike_df["arg2_first_index"]
query_arg2_ends = spike_df["arg2_last_index"]
query_used = query_sents[0] # use first query in all examples.
query_used_arg1 = [query_arg1_starts[0], query_arg1_ends[0]]
query_used_arg2 = [query_arg2_starts[0], query_arg2_ends[0]]
sents2 = results_sents
sents1 = [query_used] * len(sents2)
query_used_arg1 = [query_used_arg1] * len(sents2)
query_used_arg2 = [query_used_arg2] * len(sents2)
results = evaluate_model(sents1, sents2, query_used_arg1, query_used_arg2, model, max_ngrams = max_ngrams, num_examples = len(sents1))
annotated = []
for p in results:
pred_arg1, pred_arg2 = p["preds_arg1_tokens"], p["preds_arg2_tokens"]
ngram_pred_arg1_idx, ngram_pred_arg2_idx = p["all_ngrams"][pred_arg1[0]], p["all_ngrams"][pred_arg2[0]]
arg1_start = p["tok2orig"][ngram_pred_arg1_idx[0]]
arg1_end = p["tok2orig"][ngram_pred_arg1_idx[1]]
arg2_start = p["tok2orig"][ngram_pred_arg2_idx[0]]
arg2_end = p["tok2orig"][ngram_pred_arg2_idx[1]]
i = 0
while i < 4:
ngram_pred_arg2_idx = p["all_ngrams"][pred_arg2[i]]
arg2_start, arg2_end = p["tok2orig"][ngram_pred_arg2_idx[0]], p["tok2orig"][ngram_pred_arg2_idx[1]]
if ((arg2_start > arg1_end) and (arg2_end > arg1_end)) or ((arg2_start < arg1_start) and (arg2_end < arg1_start)):
break
i += 1
sent = p["sent"]
sent_lst = sent.split(" ")
arg1_str = " ".join(sent_lst[arg1_start:arg1_end])
arg2_str = " ".join(sent_lst[arg2_start:arg2_end])
captures.append((arg1_str, arg2_str))
#captures_tuples.append("{}; {}".format(arg1_str, arg2_str))
captures_tuples.append((arg1_str, arg2_str))
annotated_sent = perform_annotation(sent, [[arg1_start, arg1_end], [arg2_start, arg2_end]])
annotated_sent = annotated_sent[p["l"]:]
annotated.append(annotated_sent)
# aggregate arguments
args1, args2 = list(zip(*captures))
arg1_counter, arg2_counter, tuples_counter = Counter(args1), Counter(args2), Counter(captures_tuples)
return annotated, arg1_counter.most_common(10000), arg2_counter.most_common(10000), tuples_counter.most_common(10000), captures_tuples
| covid-sim-master | api/covid-ai2/alignment_supervised.py |
import os
from pathlib import Path
ABS_PATH_OF_REARRANGE_TOP_LEVEL_DIR = os.path.abspath(os.path.dirname(Path(__file__)))
IOU_THRESHOLD = 0.5
OPENNESS_THRESHOLD = 0.2
POSITION_DIFF_BARRIER = 2.0
| ai2thor-rearrangement-main | rearrange_constants.py |
"""Inference loop for the AI2-THOR object rearrangement task."""
from allenact.utils.misc_utils import NumpyJSONEncoder
from baseline_configs.one_phase.one_phase_rgb_base import (
OnePhaseRGBBaseExperimentConfig,
)
from baseline_configs.two_phase.two_phase_rgb_base import (
TwoPhaseRGBBaseExperimentConfig,
)
from rearrange.tasks import RearrangeTaskSampler, WalkthroughTask, UnshuffleTask
# First let's generate our task sampler that will let us run through all of the
# data points in our training set.
task_sampler_params = TwoPhaseRGBBaseExperimentConfig.stagewise_task_sampler_args(
stage="train", process_ind=0, total_processes=1,
)
two_phase_rgb_task_sampler: RearrangeTaskSampler = TwoPhaseRGBBaseExperimentConfig.make_sampler_fn(
**task_sampler_params,
force_cache_reset=True, # cache used for efficiency during training, should be True during inference
only_one_unshuffle_per_walkthrough=True, # used for efficiency during training, should be False during inference
epochs=1,
)
how_many_unique_datapoints = two_phase_rgb_task_sampler.total_unique
num_tasks_to_do = 5
print(
f"Sampling {num_tasks_to_do} tasks from the Two-Phase TRAINING dataset"
f" ({how_many_unique_datapoints} unique tasks) and taking random actions in them. "
)
for i_task in range(num_tasks_to_do):
print(f"\nStarting task {i_task}")
walkthrough_task = two_phase_rgb_task_sampler.next_task()
print(
f"Sampled task is from the "
f" '{two_phase_rgb_task_sampler.current_task_spec.stage}' stage and has"
f" unique id '{two_phase_rgb_task_sampler.current_task_spec.unique_id}'"
)
assert isinstance(walkthrough_task, WalkthroughTask)
# Take random actions in the walkthrough task until the task is done
while not walkthrough_task.is_done():
observations = walkthrough_task.get_observations()
# Take a random action
action_ind = walkthrough_task.action_space.sample()
if walkthrough_task.num_steps_taken() % 10 == 0:
print(
f"Walkthrough phase (step {walkthrough_task.num_steps_taken()}):"
f" taking action {walkthrough_task.action_names()[action_ind]}"
)
walkthrough_task.step(action=action_ind)
# Get the next task from the task sampler, this will be the task
# of rearranging the environment so that it is back in the same configuration as
# it was during the walkthrough task.
unshuffle_task: UnshuffleTask = two_phase_rgb_task_sampler.next_task()
while not unshuffle_task.is_done():
observations = unshuffle_task.get_observations()
# Take a random action
action_ind = unshuffle_task.action_space.sample()
if unshuffle_task.num_steps_taken() % 10 == 0:
print(
f"Unshuffle phase (step {unshuffle_task.num_steps_taken()}):"
f" taking action {unshuffle_task.action_names()[action_ind]}"
)
unshuffle_task.step(action=action_ind)
print(f"Both phases complete, metrics: '{unshuffle_task.metrics()}'")
print(f"\nFinished {num_tasks_to_do} Two-Phase tasks.")
two_phase_rgb_task_sampler.close()
# Now let's create a One Phase task sampler on the validation dataset.
task_sampler_params = OnePhaseRGBBaseExperimentConfig.stagewise_task_sampler_args(
stage="valid", process_ind=0, total_processes=1,
)
one_phase_rgb_task_sampler: RearrangeTaskSampler = (
OnePhaseRGBBaseExperimentConfig.make_sampler_fn(
**task_sampler_params, force_cache_reset=False, epochs=1,
)
)
how_many_unique_datapoints = one_phase_rgb_task_sampler.total_unique
print(
f"\n\nSampling {num_tasks_to_do} tasks from the One-Phase VALIDATION dataset"
f" ({how_many_unique_datapoints} unique tasks) and taking random actions in them. "
)
for i_task in range(num_tasks_to_do):
print(f"\nStarting task {i_task}")
# Get the next task from the task sampler, for One Phase Rearrangement
# there is only the unshuffle phase (walkthrough happens at the same time implicitly).
unshuffle_task: UnshuffleTask = one_phase_rgb_task_sampler.next_task()
print(
f"Sampled task is from the "
f" '{one_phase_rgb_task_sampler.current_task_spec.stage}' stage and has"
f" unique id '{one_phase_rgb_task_sampler.current_task_spec.unique_id}'"
)
while not unshuffle_task.is_done():
observations = unshuffle_task.get_observations()
# Take a random action
action_ind = unshuffle_task.action_space.sample()
if unshuffle_task.num_steps_taken() % 10 == 0:
print(
f"Unshuffle phase (step {unshuffle_task.num_steps_taken()}):"
f" taking action {unshuffle_task.action_names()[action_ind]}"
)
unshuffle_task.step(action=action_ind)
print(f"Both phases complete, metrics: '{unshuffle_task.metrics()}'")
one_phase_rgb_task_sampler.close()
print(f"\nFinished {num_tasks_to_do} One-Phase tasks.")
# When submitting to the leaderboard we will expect you to have evaluated your model on (1) a subset of the
# train set, (2) a subset of the train_unseen, (3) the validation set, and (4) the test set. Running each of these
# evaluations separately is a bit tedious and so we provide a "combined" dataset that combine the above four
# collections together and allows for running through each of them sequentially.
#
# In the following we show how you can iterate through the combined dataset and how we expect your
# agent's results to be saved (see `my_leaderboard_submission` below) before they can be submitted
# to the leaderboard. In practice, sequentially evaluating your agent on each task might be quite slow
# and we recommend paralleling your evaluation. Note that this is done automatically if you run your inference
# using AllenAct, see the last section of our README for details on how this can be done (note that this
# requires that your model/agent is compatible with AllenAct, this is easiest if you trained your agent with
# AllenAct initially).
task_sampler_params = OnePhaseRGBBaseExperimentConfig.stagewise_task_sampler_args(
stage="combined", process_ind=0, total_processes=1,
)
one_phase_rgb_combined_task_sampler: RearrangeTaskSampler = (
OnePhaseRGBBaseExperimentConfig.make_sampler_fn(
**task_sampler_params, force_cache_reset=True, epochs=1,
)
)
how_many_unique_datapoints = one_phase_rgb_combined_task_sampler.total_unique
print(
f"\n\nSampling {num_tasks_to_do} tasks from the One-Phase COMBINED dataset"
f" ({how_many_unique_datapoints} unique tasks) and taking random actions in them. "
)
my_leaderboard_submission = {}
for i_task in range(num_tasks_to_do):
print(f"\nStarting task {i_task}")
# Get the next task from the task sampler, for One Phase Rearrangement
# there is only the unshuffle phase (walkthrough happens at the same time implicitly).
unshuffle_task: UnshuffleTask = one_phase_rgb_combined_task_sampler.next_task()
print(
f"Sampled task is from the "
f" '{one_phase_rgb_combined_task_sampler.current_task_spec.stage}' stage and has"
f" unique id '{one_phase_rgb_combined_task_sampler.current_task_spec.unique_id}'"
)
while not unshuffle_task.is_done():
observations = unshuffle_task.get_observations()
# Take a random action
action_ind = unshuffle_task.action_space.sample()
if unshuffle_task.num_steps_taken() % 10 == 0:
print(
f"Unshuffle phase (step {unshuffle_task.num_steps_taken()}):"
f" taking action {unshuffle_task.action_names()[action_ind]}"
)
unshuffle_task.step(action=action_ind)
metrics = unshuffle_task.metrics()
print(f"Both phases complete, metrics: '{metrics}'")
task_info = metrics["task_info"]
del metrics["task_info"]
my_leaderboard_submission[task_info["unique_id"]] = {**task_info, **metrics}
# Example of saving a gzip'ed file that can be submitted to the leaderboard. Note that we're only
# iterating over `num_tasks_to_do` datapoints in the above loop, to actually make a submission you'd
# have to iterate over all of them.
import json
import gzip
import os
save_path = "/YOUR/FAVORITE/SAVE/PATH/submission.json.gz"
if os.path.exists(os.path.dirname(save_path)):
print(f"Saving example submission file to {save_path}")
submission_json_str = json.dumps(my_leaderboard_submission, cls=NumpyJSONEncoder)
with gzip.open(save_path, "w") as f:
f.write(submission_json_str.encode("utf-8"))
else:
print(
f"If you'd like to save an example leaderboard submission, you'll need to edit"
"`/YOUR/FAVORITE/SAVE/PATH/` so that it references an existing directory."
)
one_phase_rgb_combined_task_sampler.close()
print(f"\nFinished {num_tasks_to_do} One-Phase tasks.")
| ai2thor-rearrangement-main | example.py |
"""Include the Task and TaskSampler to train on a single unshuffle instance."""
import copy
import itertools
import os
import random
import traceback
from abc import ABC
from typing import Any, Tuple, Optional, Dict, Sequence, List, Union, cast, Set
import canonicaljson
import compress_pickle
import gym.spaces
import numpy as np
import stringcase
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.misc_utils import md5_hash_str_as_int
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_util import round_to_factor
from rearrange.constants import STARTER_DATA_DIR, STEP_SIZE
from rearrange.environment import (
RearrangeTHOREnvironment,
RearrangeTaskSpec,
)
from rearrange.expert import (
GreedyUnshuffleExpert,
ShortestPathNavigatorTHOR,
)
from rearrange.utils import (
RearrangeActionSpace,
include_object_data,
)
from rearrange_constants import OPENNESS_THRESHOLD
class AbstractRearrangeTask(Task, ABC):
@staticmethod
def agent_location_to_tuple(
agent_loc: Dict[str, Union[Dict[str, float], bool, float, int]]
) -> Tuple[float, float, int, int, int]:
if "position" in agent_loc:
agent_loc = {
"x": agent_loc["position"]["x"],
"y": agent_loc["position"]["y"],
"z": agent_loc["position"]["z"],
"rotation": agent_loc["rotation"]["y"],
"horizon": agent_loc["cameraHorizon"],
"standing": agent_loc.get("isStanding"),
}
return (
round(agent_loc["x"], 2),
round(agent_loc["z"], 2),
round_to_factor(agent_loc["rotation"], 90) % 360,
1 * agent_loc["standing"],
round_to_factor(agent_loc["horizon"], 30) % 360,
)
@property
def agent_location_tuple(self) -> Tuple[float, float, int, int, int]:
return self.agent_location_to_tuple(self.env.get_agent_location())
class UnshuffleTask(AbstractRearrangeTask):
def __init__(
self,
sensors: SensorSuite,
unshuffle_env: RearrangeTHOREnvironment,
walkthrough_env: RearrangeTHOREnvironment,
max_steps: int,
discrete_actions: Tuple[str, ...],
require_done_action: bool = False,
locations_visited_in_walkthrough: Optional[np.ndarray] = None,
object_names_seen_in_walkthrough: Set[str] = None,
metrics_from_walkthrough: Optional[Dict[str, Any]] = None,
task_spec_in_metrics: bool = False,
) -> None:
"""Create a new unshuffle task."""
super().__init__(
env=unshuffle_env, sensors=sensors, task_info=dict(), max_steps=max_steps
)
self.unshuffle_env = unshuffle_env
self.walkthrough_env = walkthrough_env
self.discrete_actions = discrete_actions
self.require_done_action = require_done_action
self.locations_visited_in_walkthrough = locations_visited_in_walkthrough
self.object_names_seen_in_walkthrough = object_names_seen_in_walkthrough
self.metrics_from_walkthrough = metrics_from_walkthrough
self.task_spec_in_metrics = task_spec_in_metrics
self._took_end_action: bool = False
# TODO: add better typing to the dicts
self._previous_state_trackers: Optional[Dict[str, Any]] = None
self.states_visited: dict = dict(
picked_up=dict(soap_bottle=False, pan=False, knife=False),
opened_drawer=False,
successfully_placed=dict(soap_bottle=False, pan=False, knife=False),
)
ups, gps, cps = self.unshuffle_env.poses
self.unshuffle_task_spec_hash = md5_hash_str_as_int(
canonicaljson.encode_canonical_json(
self.unshuffle_env.current_task_spec.__dict__
).decode("utf-8")
)
seeded_rand = random.Random(self.unshuffle_task_spec_hash)
self.openable_obj_name_to_openness_iter = {}
openable_onames = []
priority_onames = []
for up, gp in zip(ups, gps):
if up["openness"] is not None:
openable_onames.append(up["name"])
openness_0 = up["openness"]
if gp["openness"] == up["openness"]:
o = up["openness"]
ot = OPENNESS_THRESHOLD
a, b, c, d = 0, max(o - ot, 0.0), min(o + ot, 1.0), 1.0
openness_1 = (
seeded_rand.uniform(a, b)
if seeded_rand.random() < (b - a) / ((b - a) + (d - c))
else seeded_rand.uniform(c, d)
)
else:
priority_onames.append(up["name"])
openness_1 = gp["openness"]
# Creates an iterator that toggles between the values a and b indefinitely. The idea
# here is that we want to toggle between the two openness values for the object
self.openable_obj_name_to_openness_iter[up["name"]] = (lambda a, b: iter(
(
a if i % 2 == 0 else b
for i in itertools.count()
)
))(openness_1, openness_0)
priority_onames.sort()
seeded_rand.shuffle(priority_onames)
openable_onames = sorted(list(set(openable_onames) - set(priority_onames)))
seeded_rand.shuffle(openable_onames)
self.openable_obj_name_to_priority = {
oname: i for i, oname in enumerate(priority_onames + openable_onames)
}
self.start_energies = self.unshuffle_env.pose_difference_energy(
goal_pose=gps, cur_pose=cps
)
self.last_pose_energy = self.start_energies.sum()
self.greedy_expert: Optional[GreedyUnshuffleExpert] = None
self.actions_taken = []
self.actions_taken_success = []
self.agent_locs = [self.unshuffle_env.get_agent_location()]
def query_expert(self, **kwargs) -> Tuple[Any, bool]:
if self.greedy_expert is None:
if not hasattr(self.unshuffle_env, "shortest_path_navigator"):
# TODO: This is a bit hacky
self.unshuffle_env.shortest_path_navigator = ShortestPathNavigatorTHOR(
controller=self.unshuffle_env.controller,
grid_size=STEP_SIZE,
include_move_left_right=all(
f"move_{k}" in self.action_names() for k in ["left", "right"]
),
)
self.greedy_expert = GreedyUnshuffleExpert(
task=self,
shortest_path_navigator=self.unshuffle_env.shortest_path_navigator,
)
if self.object_names_seen_in_walkthrough is not None:
# The expert shouldn't act on objects the walkthrougher hasn't seen!
c = self.unshuffle_env.controller
with include_object_data(c):
for o in c.last_event.metadata["objects"]:
if o["name"] not in self.object_names_seen_in_walkthrough:
self.greedy_expert.object_name_to_priority[o["name"]] = (
self.greedy_expert.max_priority_per_object + 1
)
action = self.greedy_expert.expert_action
if action is None:
return 0, False
else:
return action, True
@property
def action_space(self) -> gym.spaces.Discrete:
"""Return the simplified action space in RearrangeMode.SNAP mode."""
return gym.spaces.Discrete(len(self.action_names()))
def close(self) -> None:
"""Close the AI2-THOR rearrangement environment controllers."""
try:
self.unshuffle_env.stop()
except Exception as _:
pass
try:
self.walkthrough_env.stop()
except Exception as _:
pass
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
env = self.unshuffle_env
ips, gps, cps = env.poses
start_energies = self.start_energies
end_energies = env.pose_difference_energy(gps, cps)
start_energy = start_energies.sum()
end_energy = end_energies.sum()
start_misplaceds = start_energies > 0.0
end_misplaceds = end_energies > 0.0
num_broken = sum(cp["broken"] for cp in cps)
num_initially_misplaced = start_misplaceds.sum()
num_fixed = num_initially_misplaced - (start_misplaceds & end_misplaceds).sum()
num_newly_misplaced = (end_misplaceds & np.logical_not(start_misplaceds)).sum()
prop_fixed = (
1.0 if num_initially_misplaced == 0 else num_fixed / num_initially_misplaced
)
metrics = {
**super().metrics(),
**{
"start_energy": start_energy,
"end_energy": end_energy,
"success": float(end_energy == 0),
"prop_fixed": prop_fixed,
"prop_fixed_strict": float((num_newly_misplaced == 0) * prop_fixed),
"num_misplaced": end_misplaceds.sum(),
"num_newly_misplaced": num_newly_misplaced.sum(),
"num_initially_misplaced": num_initially_misplaced,
"num_fixed": num_fixed.sum(),
"num_broken": num_broken,
},
}
try:
change_energies = env.pose_difference_energy(ips, cps)
change_energy = change_energies.sum()
changeds = change_energies > 0.0
metrics["change_energy"] = change_energy
metrics["num_changed"] = changeds.sum()
except AssertionError as _:
pass
if num_initially_misplaced > 0:
metrics["prop_misplaced"] = end_misplaceds.sum() / num_initially_misplaced
if start_energy > 0:
metrics["energy_prop"] = end_energy / start_energy
task_info = metrics["task_info"]
task_info["scene"] = self.unshuffle_env.scene
task_info["index"] = self.unshuffle_env.current_task_spec.metrics.get("index")
task_info["stage"] = self.unshuffle_env.current_task_spec.stage
del metrics["task_info"]
if self.task_spec_in_metrics:
task_info["task_spec"] = {**self.unshuffle_env.current_task_spec.__dict__}
task_info["poses"] = self.unshuffle_env.poses
task_info["gps_vs_cps"] = self.unshuffle_env.compare_poses(gps, cps)
task_info["ips_vs_cps"] = self.unshuffle_env.compare_poses(ips, cps)
task_info["gps_vs_ips"] = self.unshuffle_env.compare_poses(gps, ips)
task_info["unshuffle_actions"] = self.actions_taken
task_info["unshuffle_action_successes"] = self.actions_taken_success
task_info["unique_id"] = self.unshuffle_env.current_task_spec.unique_id
if self.metrics_from_walkthrough is not None:
mes = {**self.metrics_from_walkthrough}
task_info["walkthrough_actions"] = mes["task_info"]["walkthrough_actions"]
task_info["walkthrough_action_successes"] = mes["task_info"][
"walkthrough_action_successes"
]
del mes[
"task_info"
] # Otherwise already summarized by the unshuffle task info
metrics = {
"task_info": task_info,
"ep_length": metrics["ep_length"] + mes["walkthrough/ep_length"],
**{f"unshuffle/{k}": v for k, v in metrics.items()},
**mes,
}
else:
metrics = {
"task_info": task_info,
**{f"unshuffle/{k}": v for k, v in metrics.items()},
}
return metrics
def class_action_names(self, **kwargs) -> Tuple[str, ...]:
raise RuntimeError("This should not be called, use `action_names` instead.")
def action_names(self, **kwargs) -> Tuple[str, ...]:
"""Return the easy, simplified task's class names."""
return self.discrete_actions
def render(self, *args, **kwargs) -> Dict[str, Dict[str, np.array]]:
"""Return the rgb/depth obs from both walkthrough and unshuffle."""
# TODO: eventually update when the phases are separated.
# walkthrough_obs = self.walkthrough_env.observation
unshuffle_obs = self.unshuffle_env.observation
return {
# "walkthrough": {"rgb": walkthrough_obs[0], "depth": walkthrough_obs[1]},
"unshuffle": {"rgb": unshuffle_obs[0], "depth": unshuffle_obs[1]},
}
def reached_terminal_state(self) -> bool:
"""Return if end of current episode has been reached."""
return (self.require_done_action and self._took_end_action) or (
(not self.require_done_action)
and self.unshuffle_env.all_rearranged_or_broken
)
def _judge(self) -> float:
"""Return the reward from a new (s, a, s')."""
# TODO: Log reward scenarios.
_, gps, cps = self.unshuffle_env.poses
cur_pose_energy = self.unshuffle_env.pose_difference_energy(
goal_pose=gps, cur_pose=cps
).sum()
if self.is_done():
return -cur_pose_energy
energy_change = self.last_pose_energy - cur_pose_energy
self.last_pose_energy = cur_pose_energy
self.last_poses = cps
return energy_change
def _step(self, action: int) -> RLStepResult:
"""
action : is the index of the action from self.action_names()
"""
# parse the action data
action_name = self.action_names()[action]
if action_name.startswith("pickup"):
# NOTE: due to the object_id's not being in the metadata for speedups,
# they cannot be targeted with interactible actions. Hence, why
# we're resetting the object filter before targeting by object id.
with include_object_data(self.unshuffle_env.controller):
metadata = self.unshuffle_env.last_event.metadata
if len(metadata["inventoryObjects"]) != 0:
action_success = False
else:
object_type = stringcase.pascalcase(
action_name.replace("pickup_", "")
)
possible_objects = [
o
for o in metadata["objects"]
if o["visible"] and o["objectType"] == object_type
]
possible_objects = sorted(
possible_objects, key=lambda po: (po["distance"], po["name"])
)
object_before = None
if len(possible_objects) > 0:
object_before = possible_objects[0]
object_id = object_before["objectId"]
if object_before is not None:
self.unshuffle_env.controller.step(
"PickupObject",
objectId=object_id,
**self.unshuffle_env.physics_step_kwargs,
)
action_success = self.unshuffle_env.last_event.metadata[
"lastActionSuccess"
]
else:
action_success = False
if action_success and self.unshuffle_env.held_object is None:
get_logger().warning(
f"`PickupObject` was successful in picking up {object_id} but we're not holding"
f" any objects! Current task spec:\n{self.unshuffle_env.current_task_spec}."
)
action_success = False
elif action_name.startswith("open_by_type"):
object_type = stringcase.pascalcase(
action_name.replace("open_by_type_", "")
)
with include_object_data(self.unshuffle_env.controller):
openable_candidates = [
o
for o in self.unshuffle_env.last_event.metadata["objects"]
if (
o["visible"]
and o["objectType"] == object_type
and o["openable"]
)
]
openable_candidates.sort(
key=lambda o: self.openable_obj_name_to_priority[o["name"]]
)
if len(openable_candidates) > 0:
o = openable_candidates[0]
object_id = o["objectId"]
target_openness = next(self.openable_obj_name_to_openness_iter[o["name"]])
if o["openness"] > 0.0:
self.unshuffle_env.controller.step(
"CloseObject",
objectId=object_id,
**self.unshuffle_env.physics_step_kwargs,
)
self.unshuffle_env.controller.step(
"OpenObject",
objectId=object_id,
openness=target_openness,
**self.unshuffle_env.physics_step_kwargs,
)
action_success = self.unshuffle_env.last_event.metadata[
"lastActionSuccess"
]
else:
action_success = False
elif action_name.startswith(("move", "rotate", "look", "stand", "crouch")):
# apply to only the unshuffle env as the walkthrough agent's position
# must now be managed by the whichever sensor is trying to read data from it.
action_success = getattr(self.unshuffle_env, action_name)()
elif action_name == "drop_held_object_with_snap":
action_success = getattr(self.unshuffle_env, action_name)()
elif action_name == "done":
self._took_end_action = True
action_success = True
elif action_name == "pass":
action_success = True
else:
raise RuntimeError(
f"Action '{action_name}' is not in the action space {RearrangeActionSpace}"
)
self.actions_taken.append(action_name)
self.actions_taken_success.append(action_success)
if self.task_spec_in_metrics:
self.agent_locs.append(self.unshuffle_env.get_agent_location())
return RLStepResult(
observation=None,
reward=self._judge(),
done=self.is_done(),
info={"action_name": action_name, "action_success": action_success},
)
def step(self, action: int) -> RLStepResult:
step_result = super().step(action=action)
if self.greedy_expert is not None:
self.greedy_expert.update(
action_taken=action, action_success=step_result.info["action_success"]
)
step_result = RLStepResult(
observation=self.get_observations(),
reward=step_result.reward,
done=step_result.done,
info=step_result.info,
)
return step_result
class WalkthroughTask(AbstractRearrangeTask):
def __init__(
self,
sensors: SensorSuite,
walkthrough_env: RearrangeTHOREnvironment,
max_steps: int,
discrete_actions: Tuple[str, ...],
disable_metrics: bool = False,
) -> None:
"""Create a new walkthrough task."""
super().__init__(
env=walkthrough_env, sensors=sensors, task_info=dict(), max_steps=max_steps
)
self.walkthrough_env = walkthrough_env
self.discrete_actions = discrete_actions
self.disable_metrics = disable_metrics
self._took_end_action: bool = False
self.actions_taken = []
self.actions_taken_success = []
self.visited_positions_xzrsh = {self.agent_location_tuple}
self.visited_positions_xz = {self.agent_location_tuple[:2]}
self.seen_pickupable_objects = set(
o["name"] for o in self.pickupable_objects(visible_only=True)
)
self.seen_openable_objects = set(
o["name"] for o in self.openable_not_pickupable_objects(visible_only=True)
)
self.total_pickupable_or_openable_objects = len(
self.pickupable_or_openable_objects(visible_only=False)
)
self.walkthrough_env.controller.step("GetReachablePositions")
assert self.walkthrough_env.last_event.metadata["lastActionSuccess"]
self.reachable_positions = self.walkthrough_env.last_event.metadata[
"actionReturn"
]
def query_expert(self, **kwargs) -> Tuple[Any, bool]:
return 0, False
@property
def action_space(self) -> gym.spaces.Discrete:
"""Return the simplified action space in RearrangeMode.SNAP mode."""
return gym.spaces.Discrete(len(self.action_names()))
def close(self) -> None:
"""Close the AI2-THOR rearrangement environment controllers."""
try:
self.walkthrough_env.stop()
except Exception as _:
pass
def metrics(self, force_return: bool = False) -> Dict[str, Any]:
if (not force_return) and (self.disable_metrics or not self.is_done()):
return {}
nreachable = len(self.reachable_positions)
prop_visited_xz = len(self.visited_positions_xz) / nreachable
nreachable_xzr = 4 * nreachable # 4 rotations
visited_xzr = {p[:3] for p in self.visited_positions_xzrsh}
prop_visited_xzr = len(visited_xzr) / nreachable_xzr
n_obj_seen = len(self.seen_openable_objects) + len(self.seen_pickupable_objects)
metrics = super().metrics()
metrics["task_info"]["walkthrough_actions"] = self.actions_taken
metrics["task_info"][
"walkthrough_action_successes"
] = self.actions_taken_success
metrics = {
**metrics,
**{
"num_explored_xz": len(self.visited_positions_xz),
"num_explored_xzr": len(visited_xzr),
"prop_visited_xz": prop_visited_xz,
"prop_visited_xzr": prop_visited_xzr,
"num_obj_seen": n_obj_seen,
"prop_obj_seen": n_obj_seen / self.total_pickupable_or_openable_objects,
},
}
return {
f"walkthrough/{k}" if k != "task_info" else k: v for k, v in metrics.items()
}
def class_action_names(self, **kwargs) -> Tuple[str, ...]:
raise RuntimeError("This should not be called, use `action_names` instead.")
def action_names(self, **kwargs) -> Tuple[str, ...]:
"""Return the easy, simplified task's class names."""
return self.discrete_actions
def render(self, *args, **kwargs) -> Dict[str, Dict[str, np.array]]:
"""Return the rgb/depth obs from both walkthrough and unshuffle."""
# TODO: eventually update when the phases are separated.
walkthrough_obs = self.walkthrough_env.observation
return {
"walkthrough": {"rgb": walkthrough_obs[0], "depth": walkthrough_obs[1]},
}
def reached_terminal_state(self) -> bool:
"""Return if end of current episode has been reached."""
return self._took_end_action
def pickupable_objects(self, visible_only: bool = True):
with include_object_data(self.walkthrough_env.controller):
return [
o
for o in self.walkthrough_env.last_event.metadata["objects"]
if ((o["visible"] or not visible_only) and o["pickupable"])
]
def openable_not_pickupable_objects(self, visible_only: bool = True):
with include_object_data(self.walkthrough_env.controller):
return [
o
for o in self.walkthrough_env.last_event.metadata["objects"]
if (
(o["visible"] or not visible_only)
and (o["openable"] and not o["pickupable"])
)
]
def pickupable_or_openable_objects(self, visible_only: bool = True):
with include_object_data(self.walkthrough_env.controller):
return [
o
for o in self.walkthrough_env.last_event.metadata["objects"]
if (
(o["visible"] or not visible_only)
and (o["pickupable"] or (o["openable"] and not o["pickupable"]))
)
]
def _judge(self, action_name: str, action_success: bool) -> float:
"""Return the reward from a new (s, a, s')."""
total_seen_before = len(self.seen_pickupable_objects) + len(
self.seen_openable_objects
)
prop_seen_before = (
total_seen_before
) / self.total_pickupable_or_openable_objects
# Updating seen openable
for obj in self.openable_not_pickupable_objects(visible_only=True):
if obj["name"] not in self.seen_openable_objects:
self.seen_openable_objects.add(obj["name"])
# Updating seen pickupable
for obj in self.pickupable_objects(visible_only=True):
if obj["name"] not in self.seen_pickupable_objects:
self.seen_pickupable_objects.add(obj["name"])
# Updating visited locations
agent_loc_tuple = self.agent_location_tuple
self.visited_positions_xzrsh.add(agent_loc_tuple)
if agent_loc_tuple[:2] not in self.visited_positions_xz:
self.visited_positions_xz.add(agent_loc_tuple[:2])
total_seen_after = len(self.seen_pickupable_objects) + len(
self.seen_openable_objects
)
prop_seen_after = total_seen_after / self.total_pickupable_or_openable_objects
reward = 5 * (prop_seen_after - prop_seen_before)
if self._took_end_action and prop_seen_after > 0.5:
reward += 5 * (prop_seen_after + (prop_seen_after > 0.98))
return reward
def _step(self, action: int) -> RLStepResult:
"""Take a step in the task.
# Parameters
action: is the index of the action from self.action_names()
"""
# parse the action data
action_name = self.action_names()[action]
if action_name.startswith("pickup"):
# Don't allow the exploration agent to pickup objects
action_success = False
elif action_name.startswith("open_by_type"):
# Don't allow the exploration agent to open objects
action_success = False
elif action_name.startswith(("move", "rotate", "look", "stand", "crouch")):
# take the movement action
action_success = getattr(self.walkthrough_env, action_name)()
elif action_name == "drop_held_object_with_snap":
# Don't allow the exploration agent to drop objects (not that it can hold any)
action_success = False
elif action_name == "done":
self._took_end_action = True
action_success = True
else:
raise RuntimeError(
f"Action '{action_name}' is not in the action space {RearrangeActionSpace}"
)
self.actions_taken.append(action_name)
self.actions_taken_success.append(action_success)
return RLStepResult(
observation=self.get_observations(),
reward=self._judge(action_name=action_name, action_success=action_success),
done=self.is_done(),
info={"action_name": action_name, "action_success": action_success},
)
class RearrangeTaskSpecIterable:
"""Iterate through a collection of scenes and pose specifications for the
rearrange task."""
def __init__(
self,
scenes_to_task_spec_dicts: Dict[str, List[Dict]],
seed: int,
epochs: Union[int, float],
shuffle: bool = True,
):
assert epochs >= 1
self.scenes_to_task_spec_dicts = {
k: [*v] for k, v in scenes_to_task_spec_dicts.items()
}
assert len(self.scenes_to_task_spec_dicts) != 0 and all(
len(self.scenes_to_task_spec_dicts[scene]) != 0
for scene in self.scenes_to_task_spec_dicts
)
self._seed = seed
self.random = random.Random(self.seed)
self.start_epochs = epochs
self.remaining_epochs = epochs
self.shuffle = shuffle
self.remaining_scenes: List[str] = []
self.task_spec_dicts_for_current_scene: List[Dict[str, Any]] = []
self.current_scene: Optional[str] = None
self.reset()
@property
def seed(self) -> int:
return self._seed
@seed.setter
def seed(self, seed: int):
self._seed = seed
self.random.seed(seed)
@property
def length(self):
if self.remaining_epochs == float("inf"):
return float("inf")
return (
len(self.task_spec_dicts_for_current_scene)
+ sum(
len(self.scenes_to_task_spec_dicts[scene])
for scene in self.remaining_scenes
)
+ self.remaining_epochs
* (sum(len(v) for v in self.scenes_to_task_spec_dicts.values()))
)
@property
def total_unique(self):
return sum(len(v) for v in self.scenes_to_task_spec_dicts.values())
def reset(self):
self.random.seed(self.seed)
self.remaining_epochs = self.start_epochs
self.remaining_scenes.clear()
self.task_spec_dicts_for_current_scene.clear()
self.current_scene = None
def refresh_remaining_scenes(self):
if self.remaining_epochs <= 0:
raise StopIteration
self.remaining_epochs -= 1
self.remaining_scenes = list(
sorted(
self.scenes_to_task_spec_dicts.keys(),
key=lambda s: int(s.replace("FloorPlan", "")),
)
)
if self.shuffle:
self.random.shuffle(self.remaining_scenes)
return self.remaining_scenes
def __next__(self) -> RearrangeTaskSpec:
if len(self.task_spec_dicts_for_current_scene) == 0:
if len(self.remaining_scenes) == 0:
self.refresh_remaining_scenes()
self.current_scene = self.remaining_scenes.pop()
self.task_spec_dicts_for_current_scene = [
*self.scenes_to_task_spec_dicts[self.current_scene]
]
if self.shuffle:
self.random.shuffle(self.task_spec_dicts_for_current_scene)
new_task_spec_dict = self.task_spec_dicts_for_current_scene.pop()
if "scene" not in new_task_spec_dict:
new_task_spec_dict["scene"] = self.current_scene
else:
assert self.current_scene == new_task_spec_dict["scene"]
return RearrangeTaskSpec(**new_task_spec_dict)
class RearrangeTaskSampler(TaskSampler):
def __init__(
self,
run_walkthrough_phase: bool,
run_unshuffle_phase: bool,
stage: str,
scenes_to_task_spec_dicts: Dict[str, List[Dict[str, Any]]],
rearrange_env_kwargs: Optional[Dict[str, Any]],
sensors: SensorSuite,
max_steps: Union[Dict[str, int], int],
discrete_actions: Tuple[str, ...],
require_done_action: bool,
force_axis_aligned_start: bool,
epochs: Union[int, float, str] = "default",
seed: Optional[int] = None,
unshuffle_runs_per_walkthrough: Optional[int] = None,
task_spec_in_metrics: bool = False,
) -> None:
assert isinstance(run_walkthrough_phase, bool) and isinstance(
run_unshuffle_phase, bool
), (
f"Both `run_walkthrough_phase` (== {run_walkthrough_phase})"
f" and `run_unshuffle_phase` (== {run_unshuffle_phase})"
f" must be boolean valued."
)
assert (
run_walkthrough_phase or run_unshuffle_phase
), "One of `run_walkthrough_phase` or `run_unshuffle_phase` must be `True`."
assert (unshuffle_runs_per_walkthrough is None) or (
run_walkthrough_phase and run_unshuffle_phase
), (
"`unshuffle_runs_per_walkthrough` should be `None` if either `run_walkthrough_phase` or"
" `run_unshuffle_phase` is `False`."
)
assert (
unshuffle_runs_per_walkthrough is None
) or unshuffle_runs_per_walkthrough >= 1, f"`unshuffle_runs_per_walkthrough` (=={unshuffle_runs_per_walkthrough}) must be >= 1."
self.run_walkthrough_phase = run_walkthrough_phase
self.run_unshuffle_phase = run_unshuffle_phase
self.sensors = sensors
self.stage = stage
self.main_seed = seed if seed is not None else random.randint(0, 2 * 30 - 1)
self.unshuffle_runs_per_walkthrough = (
1
if unshuffle_runs_per_walkthrough is None
else unshuffle_runs_per_walkthrough
)
self.cur_unshuffle_runs_count = 0
self.task_spec_in_metrics = task_spec_in_metrics
self.scenes_to_task_spec_dicts = copy.deepcopy(scenes_to_task_spec_dicts)
if isinstance(epochs, str):
if epochs.lower().strip() != "default":
raise NotImplementedError(f"Unknown value for `epochs` (=={epochs})")
epochs = float("inf") if stage == "train" else 1
self.task_spec_iterator = RearrangeTaskSpecIterable(
scenes_to_task_spec_dicts=self.scenes_to_task_spec_dicts,
seed=self.main_seed,
epochs=epochs,
shuffle=epochs == float("inf"),
)
self.walkthrough_env = RearrangeTHOREnvironment(**rearrange_env_kwargs)
self.unshuffle_env: Optional[RearrangeTHOREnvironment] = None
if self.run_unshuffle_phase:
self.unshuffle_env = RearrangeTHOREnvironment(**rearrange_env_kwargs)
self.scenes = list(self.scenes_to_task_spec_dicts.keys())
if isinstance(max_steps, int):
max_steps = {"unshuffle": max_steps, "walkthrough": max_steps}
self.max_steps: Dict[str, int] = max_steps
self.discrete_actions = discrete_actions
self.require_done_action = require_done_action
self.force_axis_aligned_start = force_axis_aligned_start
self._last_sampled_task: Optional[Union[UnshuffleTask, WalkthroughTask]] = None
self._last_sampled_walkthrough_task: Optional[WalkthroughTask] = None
self.was_in_exploration_phase: bool = False
@classmethod
def from_fixed_dataset(
cls,
run_walkthrough_phase: bool,
run_unshuffle_phase: bool,
stage: str,
allowed_scenes: Optional[Sequence[str]] = None,
scene_to_allowed_rearrange_inds: Optional[Dict[str, Sequence[int]]] = None,
randomize_start_rotation: bool = False,
**init_kwargs,
):
scenes_to_task_spec_dicts = cls._filter_scenes_to_task_spec_dicts(
scenes_to_task_spec_dicts=cls.load_rearrange_data_from_path(
stage=stage, base_dir=STARTER_DATA_DIR
),
allowed_scenes=allowed_scenes,
scene_to_allowed_rearrange_inds=scene_to_allowed_rearrange_inds,
)
if randomize_start_rotation:
random_gen = random.Random(1)
for scene in sorted(scenes_to_task_spec_dicts.keys()):
for task_spec_dict in scenes_to_task_spec_dicts[scene]:
task_spec_dict["agent_rotation"] = 360.0 * random_gen.random()
return cls(
run_walkthrough_phase=run_walkthrough_phase,
run_unshuffle_phase=run_unshuffle_phase,
stage=stage,
scenes_to_task_spec_dicts=scenes_to_task_spec_dicts,
**init_kwargs,
)
@classmethod
def from_scenes_at_runtime(
cls,
run_walkthrough_phase: bool,
run_unshuffle_phase: bool,
stage: str,
allowed_scenes: Sequence[str],
repeats_before_scene_change: int,
**init_kwargs,
):
assert "scene_to_allowed_rearrange_inds" not in init_kwargs
assert repeats_before_scene_change >= 1
return cls(
run_walkthrough_phase=run_walkthrough_phase,
run_unshuffle_phase=run_unshuffle_phase,
stage=stage,
scenes_to_task_spec_dicts={
scene: tuple(
{scene: scene, "runtime_sample": True}
for _ in range(repeats_before_scene_change)
)
for scene in allowed_scenes
},
**init_kwargs,
)
@classmethod
def _filter_scenes_to_task_spec_dicts(
cls,
scenes_to_task_spec_dicts: Dict[str, List[Dict[str, Any]]],
allowed_scenes: Optional[Sequence[str]],
scene_to_allowed_rearrange_inds: Optional[Dict[str, Sequence[int]]],
) -> Dict[str, List[Dict[str, Any]]]:
if allowed_scenes is not None:
scenes_to_task_spec_dicts = {
scene: scenes_to_task_spec_dicts[scene] for scene in allowed_scenes
}
if scene_to_allowed_rearrange_inds is not None:
scenes_to_task_spec_dicts = {
scene: [
scenes_to_task_spec_dicts[scene][ind]
for ind in sorted(scene_to_allowed_rearrange_inds[scene])
]
for scene in scene_to_allowed_rearrange_inds
if scene in scenes_to_task_spec_dicts
}
return scenes_to_task_spec_dicts
@classmethod
def load_rearrange_data_from_path(
cls, stage: str, base_dir: Optional[str] = None,
) -> Dict[str, List[Dict[str, Any]]]:
stage = stage.lower()
if stage == "valid":
stage = "val"
data_path = os.path.abspath(os.path.join(base_dir, f"{stage}.pkl.gz"))
if not os.path.exists(data_path):
raise RuntimeError(f"No data at path {data_path}")
data = compress_pickle.load(path=data_path)
for scene in data:
for ind, task_spec_dict in enumerate(data[scene]):
task_spec_dict["scene"] = scene
if "index" not in task_spec_dict:
task_spec_dict["index"] = ind
if "stage" not in task_spec_dict:
task_spec_dict["stage"] = stage
return data
@property
def length(self) -> float:
"""Return the total number of allowable next_task calls."""
count = self.run_walkthrough_phase + self.run_unshuffle_phase
if count == 1:
return self.task_spec_iterator.length
elif count == 2:
mult = self.unshuffle_runs_per_walkthrough
count = (1 + mult) * self.task_spec_iterator.length
if self.last_sampled_task is not None and (
isinstance(self.last_sampled_task, WalkthroughTask)
or self.cur_unshuffle_runs_count < mult
):
count += mult - self.cur_unshuffle_runs_count
return count
else:
raise NotImplementedError
@property
def total_unique(self):
return self.task_spec_iterator.total_unique
@property
def last_sampled_task(self) -> Optional[UnshuffleTask]:
"""Return the most recent sampled task."""
return self._last_sampled_task
@property
def all_observation_spaces_equal(self) -> bool:
"""Return if the observation space remains the same across steps."""
return True
def close(self) -> None:
"""Close the open AI2-THOR controllers."""
try:
self.unshuffle_env.stop()
except Exception as _:
pass
try:
self.walkthrough_env.stop()
except Exception as _:
pass
def reset(self) -> None:
"""Restart the unshuffle iteration setup order."""
self.task_spec_iterator.reset()
self.cur_unshuffle_runs_count = 0
self._last_sampled_task = None
self._last_sampled_walkthrough_task = None
def set_seed(self, seed: int) -> None:
self.task_spec_iterator.seed = seed
self.main_seed = seed
@property
def current_task_spec(self) -> RearrangeTaskSpec:
if self.run_unshuffle_phase:
return self.unshuffle_env.current_task_spec
else:
return self.walkthrough_env.current_task_spec
def next_task(
self, forced_task_spec: Optional[RearrangeTaskSpec] = None, **kwargs
) -> Optional[UnshuffleTask]:
"""Return a fresh UnshuffleTask setup."""
walkthrough_finished_and_should_run_unshuffle = (
forced_task_spec is None
and self.run_unshuffle_phase
and self.run_walkthrough_phase
and (
self.was_in_exploration_phase
or self.cur_unshuffle_runs_count < self.unshuffle_runs_per_walkthrough
)
)
if (
self.last_sampled_task is None
or not walkthrough_finished_and_should_run_unshuffle
):
self.cur_unshuffle_runs_count = 0
try:
if forced_task_spec is None:
task_spec: RearrangeTaskSpec = next(self.task_spec_iterator)
else:
task_spec = forced_task_spec
except StopIteration:
self._last_sampled_task = None
return self._last_sampled_task
runtime_sample = task_spec.runtime_sample
try:
if self.run_unshuffle_phase:
self.unshuffle_env.reset(
task_spec=task_spec,
force_axis_aligned_start=self.force_axis_aligned_start,
)
self.unshuffle_env.shuffle()
if runtime_sample:
unshuffle_task_spec = self.unshuffle_env.current_task_spec
starting_objects = unshuffle_task_spec.runtime_data[
"starting_objects"
]
openable_data = [
{
"name": o["name"],
"objectName": o["name"],
"objectId": o["objectId"],
"start_openness": o["openness"],
"target_openness": o["openness"],
}
for o in starting_objects
if o["isOpen"] and not o["pickupable"]
]
starting_poses = [
{
"name": o["name"],
"objectName": o["name"],
"position": o["position"],
"rotation": o["rotation"],
}
for o in starting_objects
if o["pickupable"]
]
task_spec = RearrangeTaskSpec(
scene=unshuffle_task_spec.scene,
agent_position=task_spec.agent_position,
agent_rotation=task_spec.agent_rotation,
openable_data=openable_data,
starting_poses=starting_poses,
target_poses=starting_poses,
)
self.walkthrough_env.reset(
task_spec=task_spec,
force_axis_aligned_start=self.force_axis_aligned_start,
)
if self.run_walkthrough_phase:
self.was_in_exploration_phase = True
self._last_sampled_task = WalkthroughTask(
sensors=self.sensors,
walkthrough_env=self.walkthrough_env,
max_steps=self.max_steps["walkthrough"],
discrete_actions=self.discrete_actions,
disable_metrics=self.run_unshuffle_phase,
)
self._last_sampled_walkthrough_task = self._last_sampled_task
else:
self.cur_unshuffle_runs_count += 1
self._last_sampled_task = UnshuffleTask(
sensors=self.sensors,
unshuffle_env=self.unshuffle_env,
walkthrough_env=self.walkthrough_env,
max_steps=self.max_steps["unshuffle"],
discrete_actions=self.discrete_actions,
require_done_action=self.require_done_action,
task_spec_in_metrics=self.task_spec_in_metrics,
)
except Exception as e:
if runtime_sample:
get_logger().error(
"Encountered exception while sampling a next task."
" As this next task was a 'runtime sample' we are"
" simply returning the next task."
)
get_logger().error(traceback.format_exc())
return self.next_task()
else:
raise e
else:
self.cur_unshuffle_runs_count += 1
self.was_in_exploration_phase = False
walkthrough_task = cast(
WalkthroughTask, self._last_sampled_walkthrough_task
)
if self.cur_unshuffle_runs_count != 1:
self.unshuffle_env.reset(
task_spec=self.unshuffle_env.current_task_spec,
force_axis_aligned_start=self.force_axis_aligned_start,
)
self.unshuffle_env.shuffle()
self._last_sampled_task = UnshuffleTask(
sensors=self.sensors,
unshuffle_env=self.unshuffle_env,
walkthrough_env=self.walkthrough_env,
max_steps=self.max_steps["unshuffle"],
discrete_actions=self.discrete_actions,
require_done_action=self.require_done_action,
locations_visited_in_walkthrough=np.array(
tuple(walkthrough_task.visited_positions_xzrsh)
),
object_names_seen_in_walkthrough=copy.copy(
walkthrough_task.seen_pickupable_objects
| walkthrough_task.seen_openable_objects
),
metrics_from_walkthrough=walkthrough_task.metrics(force_return=True),
task_spec_in_metrics=self.task_spec_in_metrics,
)
return self._last_sampled_task
| ai2thor-rearrangement-main | rearrange/tasks.py |
from typing import Any, Optional, Union
import gym.spaces
import numpy as np
from allenact.base_abstractions.sensor import Sensor
try:
from allenact.embodiedai.sensors.vision_sensors import RGBSensor
except ImportError:
raise ImportError("Please update to allenact>=0.4.0.")
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from rearrange.constants import STEP_SIZE
from rearrange.environment import RearrangeTHOREnvironment
from rearrange.tasks import (
UnshuffleTask,
WalkthroughTask,
AbstractRearrangeTask,
)
class RGBRearrangeSensor(
RGBSensor[RearrangeTHOREnvironment, Union[WalkthroughTask, UnshuffleTask]]
):
def frame_from_env(
self, env: RearrangeTHOREnvironment, task: Union[WalkthroughTask, UnshuffleTask]
) -> np.ndarray:
if isinstance(task, WalkthroughTask):
return task.walkthrough_env.last_event.frame.copy()
elif isinstance(task, UnshuffleTask):
return task.unshuffle_env.last_event.frame.copy()
else:
raise NotImplementedError(
f"Unknown task type {type(task)}, must be an `WalkthroughTask` or an `UnshuffleTask`."
)
class DepthRearrangeSensor(DepthSensorThor):
def frame_from_env(
self, env: RearrangeTHOREnvironment, task: Union[WalkthroughTask, UnshuffleTask]
) -> np.ndarray:
if isinstance(task, WalkthroughTask):
return task.walkthrough_env.last_event.depth_frame.copy()
elif isinstance(task, UnshuffleTask):
return task.unshuffle_env.last_event.depth_frame.copy()
else:
raise NotImplementedError(
f"Unknown task type {type(task)}, must be an `WalkthroughTask` or an `UnshuffleTask`."
)
class UnshuffledRGBRearrangeSensor(
RGBSensor[RearrangeTHOREnvironment, Union[WalkthroughTask, UnshuffleTask]]
):
def frame_from_env(
self, env: RearrangeTHOREnvironment, task: Union[WalkthroughTask, UnshuffleTask]
) -> np.ndarray:
walkthrough_env = task.walkthrough_env
if not isinstance(task, WalkthroughTask):
unshuffle_loc = task.unshuffle_env.get_agent_location()
walkthrough_agent_loc = walkthrough_env.get_agent_location()
unshuffle_loc_tuple = AbstractRearrangeTask.agent_location_to_tuple(
unshuffle_loc
)
walkthrough_loc_tuple = AbstractRearrangeTask.agent_location_to_tuple(
walkthrough_agent_loc
)
if unshuffle_loc_tuple != walkthrough_loc_tuple:
walkthrough_env.controller.step(
"TeleportFull",
x=unshuffle_loc["x"],
y=unshuffle_loc["y"],
z=unshuffle_loc["z"],
horizon=unshuffle_loc["horizon"],
rotation={"x": 0, "y": unshuffle_loc["rotation"], "z": 0},
standing=unshuffle_loc["standing"] == 1,
forceAction=True,
)
return walkthrough_env.last_event.frame.copy()
class ClosestUnshuffledRGBRearrangeSensor(
RGBSensor[RearrangeTHOREnvironment, Union[WalkthroughTask, UnshuffleTask]]
):
ROT_TO_FORWARD = np.array([[0, 1], [1, 0], [0, -1], [-1, 0]])
def frame_from_env(
self, env: RearrangeTHOREnvironment, task: Union[WalkthroughTask, UnshuffleTask]
) -> np.ndarray:
walkthrough_env = task.walkthrough_env
if not isinstance(task, WalkthroughTask):
walkthrough_visited_locs = (
task.locations_visited_in_walkthrough
) # A (num unique visited) x 4 matrix
assert walkthrough_visited_locs is not None
current_loc = np.array(task.agent_location_tuple).reshape((1, -1))
diffs = walkthrough_visited_locs - current_loc
xz_dist = np.sqrt(diffs[:, 0] ** 2 + diffs[:, 1] ** 2)
rot_diff = np.array(diffs[:, 2].round(), dtype=int) % 360
rot_diff = np.minimum(rot_diff, 360 - rot_diff)
rot_dist = 100 * (180 == rot_diff) + 2 * (90 == rot_diff)
stand_dist = np.abs(diffs[:, 3]) * STEP_SIZE / 2
horizon_dist = np.abs(diffs[:, 4]) * STEP_SIZE / 2
x, z, rotation, standing, horizon = tuple(
walkthrough_visited_locs[
np.argmin(xz_dist + rot_dist + stand_dist + horizon_dist), :
]
)
walkthrough_env = task.walkthrough_env
assert task.unshuffle_env.scene == walkthrough_env.scene
walkthrough_agent_loc = walkthrough_env.get_agent_location()
walkthrough_loc_tuple = AbstractRearrangeTask.agent_location_to_tuple(
walkthrough_agent_loc
)
if walkthrough_loc_tuple != (x, z, rotation, standing, horizon):
walkthrough_env.controller.step(
"TeleportFull",
x=x,
y=walkthrough_agent_loc["y"],
z=z,
horizon=horizon,
rotation={"x": 0, "y": rotation, "z": 0},
standing=standing == 1,
forceAction=True,
)
return walkthrough_env.last_event.frame.copy()
class InWalkthroughPhaseSensor(
Sensor[RearrangeTHOREnvironment, Union[UnshuffleTask, WalkthroughTask]]
):
def __init__(self, uuid: str = "in_walkthrough_phase", **kwargs: Any):
observation_space = gym.spaces.Box(
low=False, high=True, shape=(1,), dtype=np.bool
)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self,
env: RearrangeTHOREnvironment,
task: Optional[UnshuffleTask],
*args: Any,
**kwargs: Any,
) -> np.ndarray:
if isinstance(task, WalkthroughTask):
return np.array([True], dtype=bool)
elif isinstance(task, UnshuffleTask):
return np.array([False], dtype=bool)
else:
raise NotImplementedError(
f"Unknown task type {type(task)}, must be an `WalkthroughTask` or an `UnshuffleTask`."
)
| ai2thor-rearrangement-main | rearrange/sensors.py |
"""Definitions for a greedy expert for the `Unshuffle` task."""
import copy
import random
from collections import defaultdict
from typing import (
Dict,
Tuple,
Any,
Optional,
Union,
List,
Sequence,
TYPE_CHECKING,
)
import ai2thor.controller
import ai2thor.server
import networkx as nx
import stringcase
from torch.distributions.utils import lazy_property
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.ithor_plugin.ithor_util import (
round_to_factor,
include_object_data,
)
from rearrange.constants import STEP_SIZE
from rearrange.environment import (
RearrangeTHOREnvironment,
RearrangeMode,
)
if TYPE_CHECKING:
from rearrange.tasks import UnshuffleTask
AgentLocKeyType = Tuple[float, float, int, int]
class ShortestPathNavigatorTHOR:
"""Tracks shortest paths in AI2-THOR environments.
Assumes 90 degree rotations and fixed step sizes.
# Attributes
controller : The AI2-THOR controller in which shortest paths are computed.
"""
def __init__(
self,
controller: ai2thor.controller.Controller,
grid_size: float,
include_move_left_right: bool = False,
):
"""Create a `ShortestPathNavigatorTHOR` instance.
# Parameters
controller : An AI2-THOR controller which represents the environment in which shortest paths should be
computed.
grid_size : The distance traveled by an AI2-THOR agent when taking a single navigational step.
include_move_left_right : If `True` the navigational actions will include `MoveLeft` and `MoveRight`, otherwise
they wil not.
"""
self._cached_graphs: Dict[str, nx.DiGraph] = {}
self._current_scene: Optional[nx.DiGraph] = None
self._current_graph: Optional[nx.DiGraph] = None
self._grid_size = grid_size
self.controller = controller
self._include_move_left_right = include_move_left_right
@lazy_property
def nav_actions_set(self) -> frozenset:
"""Navigation actions considered when computing shortest paths."""
nav_actions = [
"LookUp",
"LookDown",
"RotateLeft",
"RotateRight",
"MoveAhead",
]
if self._include_move_left_right:
nav_actions.extend(["MoveLeft", "MoveRight"])
return frozenset(nav_actions)
@property
def scene_name(self) -> str:
"""Current ai2thor scene."""
return self.controller.last_event.metadata["sceneName"]
@property
def last_action_success(self) -> bool:
"""Was the last action taken by the agent a success?"""
return self.controller.last_event.metadata["lastActionSuccess"]
@property
def last_event(self) -> ai2thor.server.Event:
"""Last event returned by the controller."""
return self.controller.last_event
def on_reset(self):
"""Function that must be called whenever the AI2-THOR controller is
reset."""
self._current_scene = None
@property
def graph(self) -> nx.DiGraph:
"""A directed graph representing the navigation graph of the current
scene."""
if self._current_scene == self.scene_name:
return self._current_graph
if self.scene_name not in self._cached_graphs:
g = nx.DiGraph()
points = self.reachable_points_with_rotations_and_horizons()
for p in points:
self._add_node_to_graph(g, self.get_key(p))
self._cached_graphs[self.scene_name] = g
self._current_scene = self.scene_name
self._current_graph = self._cached_graphs[self.scene_name].copy()
return self._current_graph
def reachable_points_with_rotations_and_horizons(
self,
) -> List[Dict[str, Union[float, int]]]:
"""Get all the reaachable positions in the scene along with possible
rotation/horizons."""
self.controller.step(action="GetReachablePositions")
assert self.last_action_success
points_slim = self.last_event.metadata["actionReturn"]
points = []
for r in [0, 90, 180, 270]:
for horizon in [-30, 0, 30, 60]:
for p in points_slim:
p = copy.copy(p)
p["rotation"] = r
p["horizon"] = horizon
points.append(p)
return points
@staticmethod
def location_for_key(key, y_value=0.0) -> Dict[str, Union[float, int]]:
"""Return a agent location dictionary given a graph node key."""
x, z, rot, hor = key
loc = dict(x=x, y=y_value, z=z, rotation=rot, horizon=hor)
return loc
@staticmethod
def get_key(input_dict: Dict[str, Any], ndigits: int = 2) -> AgentLocKeyType:
"""Return a graph node key given an input agent location dictionary."""
if "x" in input_dict:
x = input_dict["x"]
z = input_dict["z"]
rot = input_dict["rotation"]
hor = input_dict["horizon"]
else:
x = input_dict["position"]["x"]
z = input_dict["position"]["z"]
rot = input_dict["rotation"]["y"]
hor = input_dict["cameraHorizon"]
return (
round(x, ndigits),
round(z, ndigits),
round_to_factor(rot, 90) % 360,
round_to_factor(hor, 30) % 360,
)
def update_graph_with_failed_action(self, failed_action: str):
"""If an action failed, update the graph to let it know this happened
so it won't try again."""
if (
self.scene_name not in self._cached_graphs
or failed_action not in self.nav_actions_set
):
return
source_key = self.get_key(self.last_event.metadata["agent"])
self._check_contains_key(source_key)
edge_dict = self.graph[source_key]
to_remove_key = None
for target_key in self.graph[source_key]:
if edge_dict[target_key]["action"] == failed_action:
to_remove_key = target_key
break
if to_remove_key is not None:
self.graph.remove_edge(source_key, to_remove_key)
def _add_from_to_edge(
self, g: nx.DiGraph, s: AgentLocKeyType, t: AgentLocKeyType,
):
"""Add an edge to the graph."""
def ae(x, y):
return abs(x - y) < 0.001
s_x, s_z, s_rot, s_hor = s
t_x, t_z, t_rot, t_hor = t
l1_dist = round(abs(s_x - t_x) + abs(s_z - t_z), 2)
angle_dist = (round_to_factor(t_rot - s_rot, 90) % 360) // 90
horz_dist = (round_to_factor(t_hor - s_hor, 30) % 360) // 30
# If source and target differ by more than one action, continue
if sum(x != 0 for x in [l1_dist, angle_dist, horz_dist]) != 1:
return
grid_size = self._grid_size
action = None
if angle_dist != 0:
if angle_dist == 1:
action = "RotateRight"
elif angle_dist == 3:
action = "RotateLeft"
elif horz_dist != 0:
if horz_dist == 11:
action = "LookUp"
elif horz_dist == 1:
action = "LookDown"
elif ae(l1_dist, grid_size):
if s_rot == 0:
forward = round((t_z - s_z) / grid_size)
right = round((t_x - s_x) / grid_size)
elif s_rot == 90:
forward = round((t_x - s_x) / grid_size)
right = -round((t_z - s_z) / grid_size)
elif s_rot == 180:
forward = -round((t_z - s_z) / grid_size)
right = -round((t_x - s_x) / grid_size)
elif s_rot == 270:
forward = -round((t_x - s_x) / grid_size)
right = round((t_z - s_z) / grid_size)
else:
raise NotImplementedError(f"source rotation == {s_rot} unsupported.")
if forward > 0:
g.add_edge(s, t, action="MoveAhead")
elif self._include_move_left_right:
if forward < 0:
# Allowing MoveBack results in some really unintuitive
# expert trajectories (i.e. moving backwards to the goal and the
# rotating, for now it's disabled.
pass # g.add_edge(s, t, action="MoveBack")
elif right > 0:
g.add_edge(s, t, action="MoveRight")
elif right < 0:
g.add_edge(s, t, action="MoveLeft")
if action is not None:
g.add_edge(s, t, action=action)
@lazy_property
def possible_neighbor_offsets(self) -> Tuple[AgentLocKeyType, ...]:
"""Offsets used to generate potential neighbors of a node."""
grid_size = round(self._grid_size, 2)
offsets = []
for rot_diff in [-90, 0, 90]:
for horz_diff in [-30, 0, 30, 60]:
for x_diff in [-grid_size, 0, grid_size]:
for z_diff in [-grid_size, 0, grid_size]:
if (rot_diff != 0) + (horz_diff != 0) + (x_diff != 0) + (
z_diff != 0
) == 1:
offsets.append((x_diff, z_diff, rot_diff, horz_diff))
return tuple(offsets)
def _add_node_to_graph(self, graph: nx.DiGraph, s: AgentLocKeyType):
"""Add a node to the graph along with any adjacent edges."""
if s in graph:
return
existing_nodes = set(graph.nodes())
graph.add_node(s)
for x_diff, z_diff, rot_diff, horz_diff in self.possible_neighbor_offsets:
t = (
s[0] + x_diff,
s[1] + z_diff,
(s[2] + rot_diff) % 360,
(s[3] + horz_diff) % 360,
)
if t in existing_nodes:
self._add_from_to_edge(graph, s, t)
self._add_from_to_edge(graph, t, s)
def _check_contains_key(self, key: AgentLocKeyType, add_if_not=True) -> bool:
"""Check if a node key is in the graph.
# Parameters
key : The key to check.
add_if_not : If the key doesn't exist and this is `True`, the key will be added along with
edges to any adjacent nodes.
"""
key_in_graph = key in self.graph
if not key_in_graph:
get_logger().debug(
"{} was not in the graph for scene {}.".format(key, self.scene_name)
)
if add_if_not:
self._add_node_to_graph(self.graph, key)
if key not in self._cached_graphs[self.scene_name]:
self._add_node_to_graph(self._cached_graphs[self.scene_name], key)
return key_in_graph
def shortest_state_path(
self, source_state_key: AgentLocKeyType, goal_state_key: AgentLocKeyType
) -> Optional[Sequence[AgentLocKeyType]]:
"""Get the shortest path between node keys."""
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
# noinspection PyBroadException
path = nx.shortest_path(
G=self.graph, source=source_state_key, target=goal_state_key
)
return path
def action_transitioning_between_keys(self, s: AgentLocKeyType, t: AgentLocKeyType):
"""Get the action that takes the agent from node s to node t."""
self._check_contains_key(s)
self._check_contains_key(t)
if self.graph.has_edge(s, t):
return self.graph.get_edge_data(s, t)["action"]
else:
return None
def shortest_path_next_state(
self, source_state_key: AgentLocKeyType, goal_state_key: AgentLocKeyType
):
"""Get the next node key on the shortest path from the source to the
goal."""
if source_state_key == goal_state_key:
raise RuntimeError("called next state on the same source and goal state")
state_path = self.shortest_state_path(source_state_key, goal_state_key)
return state_path[1]
def shortest_path_next_action(
self, source_state_key: AgentLocKeyType, goal_state_key: AgentLocKeyType
):
"""Get the next action along the shortest path from the source to the
goal."""
next_state_key = self.shortest_path_next_state(source_state_key, goal_state_key)
return self.graph.get_edge_data(source_state_key, next_state_key)["action"]
def shortest_path_next_action_multi_target(
self,
source_state_key: AgentLocKeyType,
goal_state_keys: Sequence[AgentLocKeyType],
):
"""Get the next action along the shortest path from the source to the
closest goal."""
self._check_contains_key(source_state_key)
terminal_node = (-1.0, -1.0, -1, -1)
self.graph.add_node(terminal_node)
for gsk in goal_state_keys:
self._check_contains_key(gsk)
self.graph.add_edge(gsk, terminal_node, action=None)
next_state_key = self.shortest_path_next_state(source_state_key, terminal_node)
action = self.graph.get_edge_data(source_state_key, next_state_key)["action"]
self.graph.remove_node(terminal_node)
return action
def shortest_path_length(
self, source_state_key: AgentLocKeyType, goal_state_key: AgentLocKeyType
):
"""Get the path shorest path length between the source and the goal."""
self._check_contains_key(source_state_key)
self._check_contains_key(goal_state_key)
try:
return nx.shortest_path_length(self.graph, source_state_key, goal_state_key)
except nx.NetworkXNoPath as _:
return float("inf")
def _are_agent_locations_equal(
ap0: Dict[str, Union[float, int, bool]],
ap1: Dict[str, Union[float, int, bool]],
ignore_standing: bool,
tol=1e-2,
ignore_y: bool = True,
):
"""Determines if two agent locations are equal up to some tolerance."""
def rot_dist(r0: float, r1: float):
diff = abs(r0 - r1) % 360
return min(diff, 360 - diff)
return (
all(
abs(ap0[k] - ap1[k]) <= tol
for k in (["x", "z"] if ignore_y else ["x", "y", "z"])
)
and rot_dist(ap0["rotation"], ap1["rotation"]) <= tol
and rot_dist(ap0["horizon"], ap1["horizon"]) <= tol
and (ignore_standing or (ap0["standing"] == ap1["standing"]))
)
class GreedyUnshuffleExpert:
"""An agent which greedily attempts to complete a given unshuffle task."""
def __init__(
self,
task: "UnshuffleTask",
shortest_path_navigator: ShortestPathNavigatorTHOR,
max_priority_per_object: int = 3,
):
"""Initializes a `GreedyUnshuffleExpert` object.
# Parameters
task : An `UnshuffleTask` that the greedy expert should attempt to complete.
shortest_path_navigator : A `ShortestPathNavigatorTHOR` object defined on the same
AI2-THOR controller used by the `task`.
max_priority_per_object : The maximum number of times we should try to unshuffle an object
before giving up.
"""
self.task = task
self.shortest_path_navigator = shortest_path_navigator
self.max_priority_per_object = max_priority_per_object
assert self.task.num_steps_taken() == 0
self.expert_action_list: List[Optional[int]] = []
self._last_held_object_name: Optional[str] = None
self._last_to_interact_object_pose: Optional[Dict[str, Any]] = None
self._name_of_object_we_wanted_to_pickup: Optional[str] = None
self.object_name_to_priority: defaultdict = defaultdict(lambda: 0)
self.shortest_path_navigator.on_reset()
self.update(action_taken=None, action_success=None)
@property
def expert_action(self) -> int:
"""Get the current greedy expert action.
# Returns An integer specifying the expert action in the current
state. This corresponds to the order of actions in
`self.task.action_names()`. For this action to be available the
`update` function must be called after every step.
"""
assert self.task.num_steps_taken() == len(self.expert_action_list) - 1
return self.expert_action_list[-1]
def update(self, action_taken: Optional[int], action_success: Optional[bool]):
"""Update the expert with the last action taken and whether or not that
action succeeded."""
if action_taken is not None:
assert action_success is not None
action_names = self.task.action_names()
last_expert_action = self.expert_action_list[-1]
agent_took_expert_action = action_taken == last_expert_action
action_str = action_names[action_taken]
was_nav_action = any(k in action_str for k in ["move", "rotate", "look"])
if (
"pickup_" in action_str
and action_taken == last_expert_action
and action_success
):
self._name_of_object_we_wanted_to_pickup = self._last_to_interact_object_pose[
"name"
]
if "drop_held_object_with_snap" in action_str and agent_took_expert_action:
if self._name_of_object_we_wanted_to_pickup is not None:
self.object_name_to_priority[
self._name_of_object_we_wanted_to_pickup
] += 1
else:
self.object_name_to_priority[self._last_held_object_name] += 1
if "open_by_type" in action_str and agent_took_expert_action:
self.object_name_to_priority[
self._last_to_interact_object_pose["name"]
] += 1
if not action_success:
if was_nav_action:
self.shortest_path_navigator.update_graph_with_failed_action(
stringcase.pascalcase(action_str)
)
elif (
("pickup_" in action_str or "open_by_type_" in action_str)
) and action_taken == last_expert_action:
assert self._last_to_interact_object_pose is not None
self._invalidate_interactable_loc_for_pose(
location=self.task.unshuffle_env.get_agent_location(),
obj_pose=self._last_to_interact_object_pose,
)
elif (
("crouch" in action_str or "stand" in action_str)
and self.task.unshuffle_env.held_object is not None
) and action_taken == last_expert_action:
held_object_name = self.task.unshuffle_env.held_object["name"]
agent_loc = self.task.unshuffle_env.get_agent_location()
agent_loc["standing"] = not agent_loc["standing"]
self._invalidate_interactable_loc_for_pose(
location=agent_loc,
obj_pose=self.task.unshuffle_env.obj_name_to_walkthrough_start_pose[
held_object_name
],
)
else:
# If the action succeeded and was not a move action then let's force an update
# of our currently targeted object
if not was_nav_action:
self._last_to_interact_object_pose = None
held_object = self.task.unshuffle_env.held_object
if self.task.unshuffle_env.held_object is not None:
self._last_held_object_name = held_object["name"]
self._generate_and_record_expert_action()
def _expert_nav_action_to_obj(self, obj: Dict[str, Any]) -> Optional[str]:
"""Get the shortest path navigational action towards the object obj.
The navigational action takes us to a position from which the
object is interactable.
"""
env: RearrangeTHOREnvironment = self.task.env
agent_loc = env.get_agent_location()
shortest_path_navigator = self.shortest_path_navigator
interactable_positions = env._interactable_positions_cache.get(
scene_name=env.scene, obj=obj, controller=env.controller,
)
target_keys = [
shortest_path_navigator.get_key(loc) for loc in interactable_positions
]
if len(target_keys) == 0:
return None
source_state_key = shortest_path_navigator.get_key(env.get_agent_location())
action = "Pass"
if source_state_key not in target_keys:
try:
action = shortest_path_navigator.shortest_path_next_action_multi_target(
source_state_key=source_state_key, goal_state_keys=target_keys,
)
except nx.NetworkXNoPath as _:
# Could not find the expert actions
return None
if action != "Pass":
return action
else:
agent_x = agent_loc["x"]
agent_z = agent_loc["z"]
for gdl in interactable_positions:
d = round(abs(agent_x - gdl["x"]) + abs(agent_z - gdl["z"]), 2)
if d <= 1e-2:
if _are_agent_locations_equal(agent_loc, gdl, ignore_standing=True):
if agent_loc["standing"] != gdl["standing"]:
return "Crouch" if agent_loc["standing"] else "Stand"
else:
# We are already at an interactable position
return "Pass"
return None
def _invalidate_interactable_loc_for_pose(
self, location: Dict[str, Any], obj_pose: Dict[str, Any]
) -> bool:
"""Invalidate a given location in the `interactable_positions_cache` as
we tried to interact but couldn't."""
env = self.task.unshuffle_env
interactable_positions = env._interactable_positions_cache.get(
scene_name=env.scene, obj=obj_pose, controller=env.controller
)
for i, loc in enumerate([*interactable_positions]):
if (
self.shortest_path_navigator.get_key(loc)
== self.shortest_path_navigator.get_key(location)
and loc["standing"] == location["standing"]
):
interactable_positions.pop(i)
return True
return False
def _generate_expert_action_dict(self) -> Dict[str, Any]:
"""Generate a dictionary describing the next greedy expert action."""
env = self.task.unshuffle_env
if env.mode != RearrangeMode.SNAP:
raise NotImplementedError(
f"Expert only defined for 'easy' mode (current mode: {env.mode}"
)
held_object = env.held_object
agent_loc = env.get_agent_location()
if held_object is not None:
self._last_to_interact_object_pose = None
# Should navigate to a position where the held object can be placed
expert_nav_action = self._expert_nav_action_to_obj(
obj={
**held_object,
**{
k: env.obj_name_to_walkthrough_start_pose[held_object["name"]][
k
]
for k in ["position", "rotation"]
},
},
)
if expert_nav_action is None:
# Could not find a path to the target, let's just immediately drop the held object
return dict(action="DropHeldObjectWithSnap")
elif expert_nav_action is "Pass":
# We are in a position where we can drop the object, let's do that
return dict(action="DropHeldObjectWithSnap")
else:
return dict(action=expert_nav_action)
else:
_, goal_poses, cur_poses = env.poses
assert len(goal_poses) == len(cur_poses)
failed_places_and_min_dist = (float("inf"), float("inf"))
obj_pose_to_go_to = None
goal_obj_pos = None
for gp, cp in zip(goal_poses, cur_poses):
if (
(gp["broken"] == cp["broken"] == False)
and self.object_name_to_priority[gp["name"]]
<= self.max_priority_per_object
and not RearrangeTHOREnvironment.are_poses_equal(gp, cp)
):
priority = self.object_name_to_priority[gp["name"]]
priority_and_dist_to_object = (
priority,
IThorEnvironment.position_dist(
agent_loc, gp["position"], ignore_y=True, l1_dist=True
),
)
if (
self._last_to_interact_object_pose is not None
and self._last_to_interact_object_pose["name"] == gp["name"]
):
# Set distance to -1 for the currently targeted object
priority_and_dist_to_object = (
priority_and_dist_to_object[0],
-1,
)
if priority_and_dist_to_object < failed_places_and_min_dist:
failed_places_and_min_dist = priority_and_dist_to_object
obj_pose_to_go_to = cp
goal_obj_pos = gp
self._last_to_interact_object_pose = obj_pose_to_go_to
if obj_pose_to_go_to is None:
# There are no objects we need to change
return dict(action="Done")
expert_nav_action = self._expert_nav_action_to_obj(obj=obj_pose_to_go_to)
if expert_nav_action is None:
interactable_positions = self.task.env._interactable_positions_cache.get(
scene_name=env.scene,
obj=obj_pose_to_go_to,
controller=env.controller,
)
if len(interactable_positions) != 0:
# Could not find a path to the object, increment the place count of the object and
# try generating a new action.
get_logger().debug(
f"Could not find a path to {obj_pose_to_go_to}"
f" in scene {self.task.unshuffle_env.scene}"
f" when at position {self.task.unshuffle_env.get_agent_location()}."
)
else:
get_logger().debug(
f"Object {obj_pose_to_go_to} in scene {self.task.unshuffle_env.scene}"
f" has no interactable positions."
)
self.object_name_to_priority[obj_pose_to_go_to["name"]] += 1
return self._generate_expert_action_dict()
elif expert_nav_action == "Pass":
with include_object_data(env.controller):
visible_objects = {
o["name"]
for o in env.last_event.metadata["objects"]
if o["visible"]
}
if obj_pose_to_go_to["name"] not in visible_objects:
if self._invalidate_interactable_loc_for_pose(
location=agent_loc, obj_pose=obj_pose_to_go_to
):
return self._generate_expert_action_dict()
raise RuntimeError("This should not be possible.")
# The object of interest is interactable at the moment
if (
obj_pose_to_go_to["openness"] is not None
and obj_pose_to_go_to["openness"] != goal_obj_pos["openness"]
):
return dict(
action="OpenByType",
objectId=obj_pose_to_go_to["objectId"],
openness=goal_obj_pos["openness"],
)
elif obj_pose_to_go_to["pickupable"]:
return dict(
action="Pickup", objectId=obj_pose_to_go_to["objectId"],
)
else:
# We (likely) have an openable object which has been moved somehow but is not
# pickupable. We don't know what to do with such an object so we'll set its
# place count to a large value and try again.
get_logger().warning(
f"{obj_pose_to_go_to['name']} has moved but is not pickupable."
)
self.object_name_to_priority[goal_obj_pos["name"]] = (
self.max_priority_per_object + 1
)
return self._generate_expert_action_dict()
else:
# If we are not looking at the object to change, then we should navigate to it
return dict(action=expert_nav_action)
def _generate_and_record_expert_action(self):
"""Generate the next greedy expert action and save it to the
`expert_action_list`."""
if self.task.num_steps_taken() == len(self.expert_action_list) + 1:
get_logger().warning(
f"Already generated the expert action at step {self.task.num_steps_taken()}"
)
return
assert self.task.num_steps_taken() == len(
self.expert_action_list
), f"{self.task.num_steps_taken()} != {len(self.expert_action_list)}"
expert_action_dict = self._generate_expert_action_dict()
action_str = stringcase.snakecase(expert_action_dict["action"])
if action_str not in self.task.action_names():
obj_type = stringcase.snakecase(
expert_action_dict["objectId"].split("|")[0]
)
action_str = f"{action_str}_{obj_type}"
try:
self.expert_action_list.append(self.task.action_names().index(action_str))
except ValueError:
get_logger().error(
f"{action_str} is not a valid action for the given task."
)
self.expert_action_list.append(None)
def __test():
# noinspection PyUnresolvedReferences
from baseline_configs.one_phase.one_phase_rgb_base import (
OnePhaseRGBBaseExperimentConfig,
)
# noinspection PyUnresolvedReferences
from rearrange.utils import save_frames_to_mp4
task_sampler = OnePhaseRGBBaseExperimentConfig.make_sampler_fn(
stage="train", seed=0, force_cache_reset=True, allowed_scenes=None, epochs=1,
)
random_action_prob = 0.0
shortest_path_navigator = ShortestPathNavigatorTHOR(
controller=task_sampler.unshuffle_env.controller, grid_size=STEP_SIZE
)
k = 0
# If you want to restart from a particular k:
# task_sampler.reset()
# for _ in range(k):
# next(task_sampler.task_spec_iterator)
all_metrics = []
while task_sampler.length > 0:
print(k)
random.seed(k)
k += 1
task = task_sampler.next_task()
assert task is not None
greedy_expert = GreedyUnshuffleExpert(
task=task, shortest_path_navigator=shortest_path_navigator
)
controller = task_sampler.unshuffle_env.controller
frames = [controller.last_event.frame]
while not task.is_done():
if random.random() < random_action_prob:
assert task.action_names()[0] == "done"
action_to_take = random.randint(1, len(task.action_names()) - 1)
else:
action_to_take = greedy_expert.expert_action
# print(task.action_names()[action_to_take])
step_result = task.step(action_to_take)
task.unshuffle_env.controller.step("Pass")
task.walkthrough_env.controller.step("Pass")
greedy_expert.update(
action_taken=action_to_take,
action_success=step_result.info["action_success"],
)
frames.append(controller.last_event.frame)
if task.metrics()["unshuffle/prop_fixed"] == 1:
print("Greedy expert success")
# save_frames_to_mp4(frames=frames, file_name=f"rearrange_expert_{k}.mp4")
else:
print("Greedy expert failure")
metrics = task.metrics()
# print(metrics)
all_metrics.append(metrics)
print(f"{len(all_metrics)} tasks evaluated with expert.")
if __name__ == "__main__":
__test()
| ai2thor-rearrangement-main | rearrange/expert.py |
import os
from pathlib import Path
MAX_HAND_METERS = 0.5
FOV = 90
REQUIRED_THOR_VERSION = "5.0.0"
STARTER_DATA_DIR = os.path.join(
os.path.abspath(os.path.dirname(Path(__file__))), "../data", "2023",
)
THOR_COMMIT_ID = "a9ccb07faf771377c9ff1615bfe7e0ad01968663"
STEP_SIZE = 0.25
# fmt: off
REARRANGE_SIM_OBJECTS = [
# A
"AlarmClock", "AluminumFoil", "Apple", "AppleSliced", "ArmChair",
"BaseballBat", "BasketBall", "Bathtub", "BathtubBasin", "Bed", "Blinds", "Book", "Boots", "Bottle", "Bowl", "Box",
# B
"Bread", "BreadSliced", "ButterKnife",
# C
"Cabinet", "Candle", "CD", "CellPhone", "Chair", "Cloth", "CoffeeMachine", "CoffeeTable", "CounterTop", "CreditCard",
"Cup", "Curtains",
# D
"Desk", "DeskLamp", "Desktop", "DiningTable", "DishSponge", "DogBed", "Drawer", "Dresser", "Dumbbell",
# E
"Egg", "EggCracked",
# F
"Faucet", "Floor", "FloorLamp", "Footstool", "Fork", "Fridge",
# G
"GarbageBag", "GarbageCan",
# H
"HandTowel", "HandTowelHolder", "HousePlant", "Kettle", "KeyChain", "Knife",
# L
"Ladle", "Laptop", "LaundryHamper", "Lettuce", "LettuceSliced", "LightSwitch",
# M
"Microwave", "Mirror", "Mug",
# N
"Newspaper",
# O
"Ottoman",
# P
"Painting", "Pan", "PaperTowel", "Pen", "Pencil", "PepperShaker", "Pillow", "Plate", "Plunger", "Poster", "Pot",
"Potato", "PotatoSliced",
# R
"RemoteControl", "RoomDecor",
# S
"Safe", "SaltShaker", "ScrubBrush", "Shelf", "ShelvingUnit", "ShowerCurtain", "ShowerDoor", "ShowerGlass",
"ShowerHead", "SideTable", "Sink", "SinkBasin", "SoapBar", "SoapBottle", "Sofa", "Spatula", "Spoon", "SprayBottle",
"Statue", "Stool", "StoveBurner", "StoveKnob",
# T
"TableTopDecor", "TargetCircle", "TeddyBear", "Television", "TennisRacket", "TissueBox", "Toaster", "Toilet",
"ToiletPaper", "ToiletPaperHanger", "Tomato", "TomatoSliced", "Towel", "TowelHolder", "TVStand",
# V
"VacuumCleaner", "Vase",
# W
"Watch", "WateringCan", "Window", "WineBottle",
]
# fmt: on
# fmt: off
OBJECT_TYPES_WITH_PROPERTIES = {
"StoveBurner": {"openable": False, "receptacle": True, "pickupable": False},
"Drawer": {"openable": True, "receptacle": True, "pickupable": False},
"CounterTop": {"openable": False, "receptacle": True, "pickupable": False},
"Cabinet": {"openable": True, "receptacle": True, "pickupable": False},
"StoveKnob": {"openable": False, "receptacle": False, "pickupable": False},
"Window": {"openable": False, "receptacle": False, "pickupable": False},
"Sink": {"openable": False, "receptacle": True, "pickupable": False},
"Floor": {"openable": False, "receptacle": True, "pickupable": False},
"Book": {"openable": True, "receptacle": False, "pickupable": True},
"Bottle": {"openable": False, "receptacle": False, "pickupable": True},
"Knife": {"openable": False, "receptacle": False, "pickupable": True},
"Microwave": {"openable": True, "receptacle": True, "pickupable": False},
"Bread": {"openable": False, "receptacle": False, "pickupable": True},
"Fork": {"openable": False, "receptacle": False, "pickupable": True},
"Shelf": {"openable": False, "receptacle": True, "pickupable": False},
"Potato": {"openable": False, "receptacle": False, "pickupable": True},
"HousePlant": {"openable": False, "receptacle": False, "pickupable": False},
"Toaster": {"openable": False, "receptacle": True, "pickupable": False},
"SoapBottle": {"openable": False, "receptacle": False, "pickupable": True},
"Kettle": {"openable": True, "receptacle": False, "pickupable": True},
"Pan": {"openable": False, "receptacle": True, "pickupable": True},
"Plate": {"openable": False, "receptacle": True, "pickupable": True},
"Tomato": {"openable": False, "receptacle": False, "pickupable": True},
"Vase": {"openable": False, "receptacle": False, "pickupable": True},
"GarbageCan": {"openable": False, "receptacle": True, "pickupable": False},
"Egg": {"openable": False, "receptacle": False, "pickupable": True},
"CreditCard": {"openable": False, "receptacle": False, "pickupable": True},
"WineBottle": {"openable": False, "receptacle": False, "pickupable": True},
"Pot": {"openable": False, "receptacle": True, "pickupable": True},
"Spatula": {"openable": False, "receptacle": False, "pickupable": True},
"PaperTowelRoll": {"openable": False, "receptacle": False, "pickupable": True},
"Cup": {"openable": False, "receptacle": True, "pickupable": True},
"Fridge": {"openable": True, "receptacle": True, "pickupable": False},
"CoffeeMachine": {"openable": False, "receptacle": True, "pickupable": False},
"Bowl": {"openable": False, "receptacle": True, "pickupable": True},
"SinkBasin": {"openable": False, "receptacle": True, "pickupable": False},
"SaltShaker": {"openable": False, "receptacle": False, "pickupable": True},
"PepperShaker": {"openable": False, "receptacle": False, "pickupable": True},
"Lettuce": {"openable": False, "receptacle": False, "pickupable": True},
"ButterKnife": {"openable": False, "receptacle": False, "pickupable": True},
"Apple": {"openable": False, "receptacle": False, "pickupable": True},
"DishSponge": {"openable": False, "receptacle": False, "pickupable": True},
"Spoon": {"openable": False, "receptacle": False, "pickupable": True},
"LightSwitch": {"openable": False, "receptacle": False, "pickupable": False},
"Mug": {"openable": False, "receptacle": True, "pickupable": True},
"ShelvingUnit": {"openable": False, "receptacle": True, "pickupable": False},
"Statue": {"openable": False, "receptacle": False, "pickupable": True},
"Stool": {"openable": False, "receptacle": True, "pickupable": False},
"Faucet": {"openable": False, "receptacle": False, "pickupable": False},
"Ladle": {"openable": False, "receptacle": False, "pickupable": True},
"CellPhone": {"openable": False, "receptacle": False, "pickupable": True},
"Chair": {"openable": False, "receptacle": True, "pickupable": False},
"SideTable": {"openable": False, "receptacle": True, "pickupable": False},
"DiningTable": {"openable": False, "receptacle": True, "pickupable": False},
"Pen": {"openable": False, "receptacle": False, "pickupable": True},
"SprayBottle": {"openable": False, "receptacle": False, "pickupable": True},
"Curtains": {"openable": False, "receptacle": False, "pickupable": False},
"Pencil": {"openable": False, "receptacle": False, "pickupable": True},
"Blinds": {"openable": True, "receptacle": False, "pickupable": False},
"GarbageBag": {"openable": False, "receptacle": False, "pickupable": False},
"Safe": {"openable": True, "receptacle": True, "pickupable": False},
"Painting": {"openable": False, "receptacle": False, "pickupable": False},
"Box": {"openable": True, "receptacle": True, "pickupable": True},
"Laptop": {"openable": True, "receptacle": False, "pickupable": True},
"Television": {"openable": False, "receptacle": False, "pickupable": False},
"TissueBox": {"openable": False, "receptacle": False, "pickupable": True},
"KeyChain": {"openable": False, "receptacle": False, "pickupable": True},
"FloorLamp": {"openable": False, "receptacle": False, "pickupable": False},
"DeskLamp": {"openable": False, "receptacle": False, "pickupable": False},
"Pillow": {"openable": False, "receptacle": False, "pickupable": True},
"RemoteControl": {"openable": False, "receptacle": False, "pickupable": True},
"Watch": {"openable": False, "receptacle": False, "pickupable": True},
"Newspaper": {"openable": False, "receptacle": False, "pickupable": True},
"ArmChair": {"openable": False, "receptacle": True, "pickupable": False},
"CoffeeTable": {"openable": False, "receptacle": True, "pickupable": False},
"TVStand": {"openable": False, "receptacle": True, "pickupable": False},
"Sofa": {"openable": False, "receptacle": True, "pickupable": False},
"WateringCan": {"openable": False, "receptacle": False, "pickupable": True},
"Boots": {"openable": False, "receptacle": False, "pickupable": True},
"Ottoman": {"openable": False, "receptacle": True, "pickupable": False},
"Desk": {"openable": False, "receptacle": True, "pickupable": False},
"Dresser": {"openable": False, "receptacle": True, "pickupable": False},
"Mirror": {"openable": False, "receptacle": False, "pickupable": False},
"DogBed": {"openable": False, "receptacle": True, "pickupable": False},
"Candle": {"openable": False, "receptacle": False, "pickupable": True},
"RoomDecor": {"openable": False, "receptacle": False, "pickupable": False},
"Bed": {"openable": False, "receptacle": True, "pickupable": False},
"BaseballBat": {"openable": False, "receptacle": False, "pickupable": True},
"BasketBall": {"openable": False, "receptacle": False, "pickupable": True},
"AlarmClock": {"openable": False, "receptacle": False, "pickupable": True},
"CD": {"openable": False, "receptacle": False, "pickupable": True},
"TennisRacket": {"openable": False, "receptacle": False, "pickupable": True},
"TeddyBear": {"openable": False, "receptacle": False, "pickupable": True},
"Poster": {"openable": False, "receptacle": False, "pickupable": False},
"Cloth": {"openable": False, "receptacle": False, "pickupable": True},
"Dumbbell": {"openable": False, "receptacle": False, "pickupable": True},
"LaundryHamper": {"openable": True, "receptacle": True, "pickupable": False},
"TableTopDecor": {"openable": False, "receptacle": False, "pickupable": True},
"Desktop": {"openable": False, "receptacle": False, "pickupable": False},
"Footstool": {"openable": False, "receptacle": True, "pickupable": True},
"BathtubBasin": {"openable": False, "receptacle": True, "pickupable": False},
"ShowerCurtain": {"openable": True, "receptacle": False, "pickupable": False},
"ShowerHead": {"openable": False, "receptacle": False, "pickupable": False},
"Bathtub": {"openable": False, "receptacle": True, "pickupable": False},
"Towel": {"openable": False, "receptacle": False, "pickupable": True},
"HandTowel": {"openable": False, "receptacle": False, "pickupable": True},
"Plunger": {"openable": False, "receptacle": False, "pickupable": True},
"TowelHolder": {"openable": False, "receptacle": True, "pickupable": False},
"ToiletPaperHanger": {"openable": False, "receptacle": True, "pickupable": False},
"SoapBar": {"openable": False, "receptacle": False, "pickupable": True},
"ToiletPaper": {"openable": False, "receptacle": False, "pickupable": True},
"HandTowelHolder": {"openable": False, "receptacle": True, "pickupable": False},
"ScrubBrush": {"openable": False, "receptacle": False, "pickupable": True},
"Toilet": {"openable": True, "receptacle": True, "pickupable": False},
"ShowerGlass": {"openable": False, "receptacle": False, "pickupable": False},
"ShowerDoor": {"openable": True, "receptacle": False, "pickupable": False},
"AluminumFoil": {"openable": False, "receptacle": False, "pickupable": True},
"VacuumCleaner": {"openable": False, "receptacle": False, "pickupable": False}
}
# fmt: on
PICKUPABLE_OBJECTS = list(
sorted(
[
object_type
for object_type, properties in OBJECT_TYPES_WITH_PROPERTIES.items()
if properties["pickupable"]
]
)
)
OPENABLE_OBJECTS = list(
sorted(
[
object_type
for object_type, properties in OBJECT_TYPES_WITH_PROPERTIES.items()
if properties["openable"] and not properties["pickupable"]
]
)
)
| ai2thor-rearrangement-main | rearrange/constants.py |
ai2thor-rearrangement-main | rearrange/__init__.py |
|
from typing import (
Optional,
Tuple,
Sequence,
Union,
Dict,
Any,
)
import gym
import gym.spaces
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
DistributionType,
LinearActorCriticHead,
)
from allenact.algorithms.onpolicy_sync.policy import (
LinearCriticHead,
LinearActorHead,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact.embodiedai.mapping.mapping_models.active_neural_slam import (
ActiveNeuralSLAM,
)
from allenact.embodiedai.models.basic_models import SimpleCNN, RNNStateEncoder
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact.utils.model_utils import simple_conv_and_linear_weights_init
class RearrangeActorCriticSimpleConvRNN(ActorCriticModel[CategoricalDistr]):
"""A CNN->RNN actor-critic model for rearrangement tasks."""
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: gym.spaces.Dict,
rgb_uuid: str,
unshuffled_rgb_uuid: str,
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
):
"""Initialize a `RearrangeActorCriticSimpleConvRNN` object.
# Parameters
action_space : The action space of the agent.
Should equal `gym.spaces.Discrete(# actions available to the agent)`.
observation_space : The observation space available to the agent.
rgb_uuid : The unique id of the RGB image sensor (see `RGBSensor`).
unshuffled_rgb_uuid : The unique id of the `UnshuffledRGBRearrangeSensor` available to the agent.
hidden_size : The size of the hidden layer of the RNN.
num_rnn_layers: The number of hidden layers in the RNN.
rnn_type : The RNN type, should be "GRU" or "LSTM".
"""
super().__init__(action_space=action_space, observation_space=observation_space)
self._hidden_size = hidden_size
self.rgb_uuid = rgb_uuid
self.unshuffled_rgb_uuid = unshuffled_rgb_uuid
self.concat_rgb_uuid = "concat_rgb"
assert self.concat_rgb_uuid not in observation_space
self.visual_encoder = self._create_visual_encoder()
self.state_encoder = RNNStateEncoder(
self.recurrent_hidden_state_size,
self._hidden_size,
num_layers=num_rnn_layers,
rnn_type=rnn_type,
)
self.actor = LinearActorHead(self._hidden_size, action_space.n)
self.critic = LinearCriticHead(self._hidden_size)
self.train()
def _create_visual_encoder(self) -> nn.Module:
"""Create the visual encoder for the model."""
img_space: gym.spaces.Box = self.observation_space[self.rgb_uuid]
return SimpleCNN(
observation_space=gym.spaces.Dict(
{
self.concat_rgb_uuid: gym.spaces.Box(
low=np.tile(img_space.low, (1, 1, 2)),
high=np.tile(img_space.high, (1, 1, 2)),
shape=img_space.shape[:2] + (img_space.shape[2] * 2,),
)
}
),
output_size=self._hidden_size,
rgb_uuid=self.concat_rgb_uuid,
depth_uuid=None,
)
@property
def output_size(self):
return self._hidden_size
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
@property
def recurrent_hidden_state_size(self):
return self._hidden_size
def _recurrent_memory_specification(self):
return dict(
rnn=(
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
)
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
cur_img = observations[self.rgb_uuid]
unshuffled_img = observations[self.unshuffled_rgb_uuid]
concat_img = torch.cat((cur_img, unshuffled_img), dim=-1)
x = self.visual_encoder({self.concat_rgb_uuid: concat_img})
x, rnn_hidden_states = self.state_encoder(x, memory.tensor("rnn"), masks)
ac_output = ActorCriticOutput(
distributions=self.actor(x), values=self.critic(x), extras={}
)
return ac_output, memory.set_tensor("rnn", rnn_hidden_states)
class ResNetRearrangeActorCriticRNN(RearrangeActorCriticSimpleConvRNN):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: gym.spaces.Dict,
rgb_uuid: str,
unshuffled_rgb_uuid: str,
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
):
"""A CNN->RNN rearrangement model that expects ResNet features instead
of RGB images.
Nearly identical to `RearrangeActorCriticSimpleConvRNN` but
`rgb_uuid` should now be the unique id of the ResNetPreprocessor
used to featurize RGB images using a pretrained ResNet before
they're passed to this model.
"""
self.visual_attention: Optional[nn.Module] = None
super().__init__(**prepare_locals_for_super(locals()))
def _create_visual_encoder(self) -> nn.Module:
a, b = [
self.observation_space[k].shape[0]
for k in [self.rgb_uuid, self.unshuffled_rgb_uuid]
]
assert a == b
self.visual_attention = nn.Sequential(
nn.Conv2d(3 * a, 32, 1,), nn.ReLU(inplace=True), nn.Conv2d(32, 1, 1,),
)
visual_encoder = nn.Sequential(
nn.Conv2d(3 * a, self._hidden_size, 1,), nn.ReLU(inplace=True),
)
self.visual_attention.apply(simple_conv_and_linear_weights_init)
visual_encoder.apply(simple_conv_and_linear_weights_init)
return visual_encoder
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
cur_img_resnet = observations[self.rgb_uuid]
unshuffled_img_resnet = observations[self.unshuffled_rgb_uuid]
concat_img = torch.cat(
(
cur_img_resnet,
unshuffled_img_resnet,
cur_img_resnet * unshuffled_img_resnet,
),
dim=-3,
)
batch_shape, features_shape = concat_img.shape[:-3], concat_img.shape[-3:]
concat_img_reshaped = concat_img.view(-1, *features_shape)
attention_probs = torch.softmax(
self.visual_attention(concat_img_reshaped).view(
concat_img_reshaped.shape[0], -1
),
dim=-1,
).view(concat_img_reshaped.shape[0], 1, *concat_img_reshaped.shape[-2:])
x = (
(self.visual_encoder(concat_img_reshaped) * attention_probs)
.mean(-1)
.mean(-1)
)
x = x.view(*batch_shape, -1)
x, rnn_hidden_states = self.state_encoder(x, memory.tensor("rnn"), masks)
ac_output = ActorCriticOutput(
distributions=self.actor(x), values=self.critic(x), extras={}
)
return ac_output, memory.set_tensor("rnn", rnn_hidden_states)
class TwoPhaseRearrangeActorCriticSimpleConvRNN(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: gym.spaces.Dict,
rgb_uuid: str,
unshuffled_rgb_uuid: str,
in_walkthrough_phase_uuid: str,
is_walkthrough_phase_embedding_dim: int,
done_action_index: int,
walkthrougher_should_ignore_action_mask: Optional[Sequence[float]] = None,
prev_action_embedding_dim: int = 32,
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
):
"""A CNN->RNN model for joint training of the Walkthrough and Unshuffle
tasks.
Similar to `RearrangeActorCriticSimpleConvRNN` but with some
additional sensor inputs (e.g. the `InWalkthroughPhaseSensor` is
used to tell the agent which phase it is in).
"""
super().__init__(action_space=action_space, observation_space=observation_space)
self._hidden_size = hidden_size
self.rgb_uuid = rgb_uuid
self.unshuffled_rgb_uuid = unshuffled_rgb_uuid
self.in_walkthrough_phase_uuid = in_walkthrough_phase_uuid
self.done_action_index = done_action_index
self.prev_action_embedder = nn.Embedding(
action_space.n + 1, embedding_dim=prev_action_embedding_dim
)
self.is_walkthrough_phase_embedder = nn.Embedding(
num_embeddings=2, embedding_dim=is_walkthrough_phase_embedding_dim
)
self.walkthrough_good_action_logits: Optional[torch.Tensor]
if walkthrougher_should_ignore_action_mask is not None:
self.register_buffer(
"walkthrough_good_action_logits",
-1000 * torch.FloatTensor(walkthrougher_should_ignore_action_mask),
persistent=False,
)
else:
self.walkthrough_good_action_logits = None
self.concat_rgb_uuid = "concat_rgb"
assert self.concat_rgb_uuid not in observation_space
self.visual_encoder = self._create_visual_encoder()
self.state_encoder = RNNStateEncoder(
prev_action_embedding_dim
+ is_walkthrough_phase_embedding_dim
+ 2 * self.recurrent_hidden_state_size,
self._hidden_size,
num_layers=num_rnn_layers,
rnn_type=rnn_type,
)
self.walkthrough_encoder = RNNStateEncoder(
self._hidden_size, self._hidden_size, num_layers=1, rnn_type="GRU",
)
self.apply(simple_conv_and_linear_weights_init)
self.walkthrough_ac = LinearActorCriticHead(self._hidden_size, action_space.n)
self.walkthrough_ac.actor_and_critic.bias.data[self.done_action_index] -= 3
self.unshuffle_ac = LinearActorCriticHead(self._hidden_size, action_space.n)
self.train()
def _create_visual_encoder(self) -> nn.Module:
img_space: gym.spaces.Box = self.observation_space[self.rgb_uuid]
return SimpleCNN(
observation_space=gym.spaces.Dict(
{
self.concat_rgb_uuid: gym.spaces.Box(
low=np.tile(img_space.low, (1, 1, 2)),
high=np.tile(img_space.high, (1, 1, 2)),
shape=img_space.shape[:2] + (img_space.shape[2] * 2,),
)
}
),
output_size=self._hidden_size,
rgb_uuid=self.concat_rgb_uuid,
depth_uuid=None,
)
def load_state_dict(
self,
state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]],
strict: bool = True,
):
# For backwards compatability, renames "explore" to "walkthrough"
# in state dict keys.
for key in list(state_dict.keys()):
if "explore" in key:
new_key = key.replace("explore", "walkthrough")
assert new_key not in state_dict
state_dict[new_key] = state_dict[key]
del state_dict[key]
if "walkthrough_good_action_logits" in state_dict:
del state_dict["walkthrough_good_action_logits"]
return super(TwoPhaseRearrangeActorCriticSimpleConvRNN, self).load_state_dict(
state_dict=state_dict, strict=strict
)
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
@property
def recurrent_hidden_state_size(self):
return self._hidden_size
def _recurrent_memory_specification(self):
return dict(
rnn=(
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
),
walkthrough_encoding=(
(
("layer", self.walkthrough_encoder.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
),
)
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
in_walkthrough_phase_mask = observations[self.in_walkthrough_phase_uuid]
in_unshuffle_phase_mask = ~in_walkthrough_phase_mask
in_walkthrough_float = in_walkthrough_phase_mask.float()
in_unshuffle_float = in_unshuffle_phase_mask.float()
# Don't reset hidden state at start of the unshuffle task
masks_no_unshuffle_reset = (masks.bool() | in_unshuffle_phase_mask).float()
cur_img = observations[self.rgb_uuid]
unshuffled_img = observations[self.unshuffled_rgb_uuid]
concat_img = torch.cat((cur_img, unshuffled_img), dim=-1)
# Various embeddings
vis_features = self.visual_encoder({self.concat_rgb_uuid: concat_img})
prev_action_embeddings = self.prev_action_embedder(
((~masks.bool()).long() * (prev_actions.unsqueeze(-1) + 1))
).squeeze(-2)
is_walkthrough_phase_embedding = self.is_walkthrough_phase_embedder(
in_walkthrough_phase_mask.long()
).squeeze(-2)
to_cat = [
vis_features,
prev_action_embeddings,
is_walkthrough_phase_embedding,
]
rnn_hidden_states = memory.tensor("rnn")
rnn_outs = []
obs_for_rnn = torch.cat(to_cat, dim=-1)
last_walkthrough_encoding = memory.tensor("walkthrough_encoding")
for step in range(masks.shape[0]):
rnn_out, rnn_hidden_states = self.state_encoder(
torch.cat(
(obs_for_rnn[step : step + 1], last_walkthrough_encoding), dim=-1
),
rnn_hidden_states,
masks[step : step + 1],
)
rnn_outs.append(rnn_out)
walkthrough_encoding, _ = self.walkthrough_encoder(
rnn_out,
last_walkthrough_encoding,
masks_no_unshuffle_reset[step : step + 1],
)
last_walkthrough_encoding = (
last_walkthrough_encoding * in_unshuffle_float[step : step + 1]
+ walkthrough_encoding * in_walkthrough_float[step : step + 1]
)
memory = memory.set_tensor("walkthrough_encoding", last_walkthrough_encoding)
rnn_out = torch.cat(rnn_outs, dim=0)
walkthrough_dist, walkthrough_vals = self.walkthrough_ac(rnn_out)
unshuffle_dist, unshuffle_vals = self.unshuffle_ac(rnn_out)
assert len(in_walkthrough_float.shape) == len(walkthrough_dist.logits.shape)
if self.walkthrough_good_action_logits is not None:
walkthrough_logits = (
walkthrough_dist.logits
+ self.walkthrough_good_action_logits.view(
*((1,) * (len(walkthrough_dist.logits.shape) - 1)), -1
)
)
else:
walkthrough_logits = walkthrough_dist.logits
actor = CategoricalDistr(
logits=in_walkthrough_float * walkthrough_logits
+ in_unshuffle_float * unshuffle_dist.logits
)
values = (
in_walkthrough_float * walkthrough_vals
+ in_unshuffle_float * unshuffle_vals
)
ac_output = ActorCriticOutput(distributions=actor, values=values, extras={})
return ac_output, memory.set_tensor("rnn", rnn_hidden_states)
class ResNetTwoPhaseRearrangeActorCriticRNN(TwoPhaseRearrangeActorCriticSimpleConvRNN):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: gym.spaces.Dict,
rgb_uuid: str,
unshuffled_rgb_uuid: str,
in_walkthrough_phase_uuid: str,
is_walkthrough_phase_embedding_dim: int,
done_action_index: int,
walkthrougher_should_ignore_action_mask: Optional[Sequence[float]] = None,
prev_action_embedding_dim: int = 32,
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
):
"""A CNN->RNN for joint training of the Walkthrough and Unshuffle tasks
that expects ResNet features instead of RGB images.
Nearly identical to `TwoPhaseRearrangeActorCriticSimpleConvRNN`
but `rgb_uuid` should now be the unique id of the
ResNetPreprocessor used to featurize RGB images using a
pretrained ResNet before they're passed to this model.
"""
self.visual_attention: Optional[nn.Module] = None
super().__init__(**prepare_locals_for_super(locals()))
def _create_visual_encoder(self) -> nn.Module:
a, b = [
self.observation_space[k].shape[0]
for k in [self.rgb_uuid, self.unshuffled_rgb_uuid]
]
assert a == b
self.visual_attention = nn.Sequential(
nn.Conv2d(3 * a, 32, 1,), nn.ReLU(inplace=True), nn.Conv2d(32, 1, 1,),
)
visual_encoder = nn.Sequential(
nn.Conv2d(3 * a, self._hidden_size, 1,), nn.ReLU(inplace=True),
)
self.visual_attention.apply(simple_conv_and_linear_weights_init)
visual_encoder.apply(simple_conv_and_linear_weights_init)
return visual_encoder
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
in_walkthrough_phase_mask = observations[self.in_walkthrough_phase_uuid]
in_unshuffle_phase_mask = ~in_walkthrough_phase_mask
in_walkthrough_float = in_walkthrough_phase_mask.float()
in_unshuffle_float = in_unshuffle_phase_mask.float()
# Don't reset hidden state at start of the unshuffle task
masks_no_unshuffle_reset = (masks.bool() | in_unshuffle_phase_mask).float()
masks_with_unshuffle_reset = masks.float()
del masks # Just to make sure we don't accidentally use `masks when we want `masks_no_unshuffle_reset`
# Visual features
cur_img_resnet = observations[self.rgb_uuid]
unshuffled_img_resnet = observations[self.unshuffled_rgb_uuid]
concat_img = torch.cat(
(
cur_img_resnet,
unshuffled_img_resnet,
cur_img_resnet * unshuffled_img_resnet,
),
dim=-3,
)
batch_shape, features_shape = concat_img.shape[:-3], concat_img.shape[-3:]
concat_img_reshaped = concat_img.view(-1, *features_shape)
attention_probs = torch.softmax(
self.visual_attention(concat_img_reshaped).view(
concat_img_reshaped.shape[0], -1
),
dim=-1,
).view(concat_img_reshaped.shape[0], 1, *concat_img_reshaped.shape[-2:])
vis_features = (
(self.visual_encoder(concat_img_reshaped) * attention_probs)
.mean(-1)
.mean(-1)
)
vis_features = vis_features.view(*batch_shape, -1)
# Various embeddings
prev_action_embeddings = self.prev_action_embedder(
(
(~masks_with_unshuffle_reset.bool()).long()
* (prev_actions.unsqueeze(-1) + 1)
)
).squeeze(-2)
is_walkthrough_phase_embedding = self.is_walkthrough_phase_embedder(
in_walkthrough_phase_mask.long()
).squeeze(-2)
to_cat = [
vis_features,
prev_action_embeddings,
is_walkthrough_phase_embedding,
]
rnn_hidden_states = memory.tensor("rnn")
rnn_outs = []
obs_for_rnn = torch.cat(to_cat, dim=-1)
last_walkthrough_encoding = memory.tensor("walkthrough_encoding")
for step in range(masks_with_unshuffle_reset.shape[0]):
rnn_out, rnn_hidden_states = self.state_encoder(
torch.cat(
(
obs_for_rnn[step : step + 1],
last_walkthrough_encoding
* masks_no_unshuffle_reset[step : step + 1],
),
dim=-1,
),
rnn_hidden_states,
masks_with_unshuffle_reset[step : step + 1],
)
rnn_outs.append(rnn_out)
walkthrough_encoding, _ = self.walkthrough_encoder(
rnn_out,
last_walkthrough_encoding,
masks_no_unshuffle_reset[step : step + 1],
)
last_walkthrough_encoding = (
last_walkthrough_encoding * in_unshuffle_float[step : step + 1]
+ walkthrough_encoding * in_walkthrough_float[step : step + 1]
)
memory = memory.set_tensor("walkthrough_encoding", last_walkthrough_encoding)
rnn_out = torch.cat(rnn_outs, dim=0)
walkthrough_dist, walkthrough_vals = self.walkthrough_ac(rnn_out)
unshuffle_dist, unshuffle_vals = self.unshuffle_ac(rnn_out)
assert len(in_walkthrough_float.shape) == len(walkthrough_dist.logits.shape)
if self.walkthrough_good_action_logits is not None:
walkthrough_logits = (
walkthrough_dist.logits
+ self.walkthrough_good_action_logits.view(
*((1,) * (len(walkthrough_dist.logits.shape) - 1)), -1
)
)
else:
walkthrough_logits = walkthrough_dist.logits
actor = CategoricalDistr(
logits=in_walkthrough_float * walkthrough_logits
+ in_unshuffle_float * unshuffle_dist.logits
)
values = (
in_walkthrough_float * walkthrough_vals
+ in_unshuffle_float * unshuffle_vals
)
ac_output = ActorCriticOutput(distributions=actor, values=values, extras={})
return ac_output, memory.set_tensor("rnn", rnn_hidden_states)
class WalkthroughActorCriticResNetWithPassiveMap(RearrangeActorCriticSimpleConvRNN):
"""A CNN->RNN actor-critic model for rearrangement tasks."""
def __init__(
self,
height_map_channels: int,
semantic_map_channels: int,
map_kwargs: Dict[str, Any],
**kwargs
):
super().__init__(**kwargs)
assert "n_map_channels" not in map_kwargs
map_kwargs["n_map_channels"] = height_map_channels + semantic_map_channels
self.height_map_channels = height_map_channels
self.semantic_map_channels = semantic_map_channels
self.map = ActiveNeuralSLAM(**map_kwargs)
self.resnet_features_downsampler = nn.Sequential(
nn.Conv2d(512, 64, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, 128, 3),
nn.LeakyReLU(inplace=True),
nn.Conv2d(128, 256, 3),
nn.LeakyReLU(inplace=True),
nn.Conv2d(256, 512, 3),
nn.LeakyReLU(inplace=True),
nn.Flatten(),
)
self.resnet_features_downsampler.apply(simple_conv_and_linear_weights_init)
self.resnet_normalizer = nn.Sequential(
nn.Conv2d(512, 512, 1),
nn.LayerNorm(normalized_shape=[512, 7, 7], elementwise_affine=True,),
)
self.resnet_normalizer.apply(simple_conv_and_linear_weights_init)
assert self.rgb_uuid == self.unshuffled_rgb_uuid
def _create_visual_encoder(self) -> Optional[nn.Module]:
"""Create the visual encoder for the model."""
return None
@property
def visual_encoder(self):
# We make this a property as we don't want to register
# self.map.resnet_l5 as a submodule of this module, doing
# so would potentially overwriting the point of setting
# `freeze_resnet_batchnorm` to `True` in the `ActiveNeuralSLAM`.
return self.map.resnet_l5
@visual_encoder.setter
def visual_encoder(self, val: None):
assert val is None, "Setting the visual encoder is not allowed."
def _recurrent_memory_specification(self):
return dict(
rnn=(
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
)
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
img = observations[self.rgb_uuid]
nsteps, nsamplers, _, _, _ = img.shape
img = img.permute(0, 1, 4, 2, 3)
resnet_encoding = self.resnet_normalizer(
self.visual_encoder(img.view(nsteps * nsamplers, *img.shape[-3:]))
)
x, rnn_hidden_states = self.state_encoder(
self.resnet_features_downsampler(resnet_encoding.detach().clone()).view(
nsteps, nsamplers, 512
),
memory.tensor("rnn"),
masks,
)
ac_output = ActorCriticOutput(
distributions=self.actor(x), values=self.critic(x), extras={}
)
ego_map_logits = self.map.image_to_egocentric_map_logits(
images=None, resnet_image_features=resnet_encoding
)
ego_map_logits = ego_map_logits.view(
nsteps, nsamplers, *ego_map_logits.shape[-3:]
)
ac_output.extras["ego_height_binned_map_logits"] = ego_map_logits[
:, :, : self.height_map_channels
].view(nsteps, nsamplers, -1, *ego_map_logits.shape[-2:])
ac_output.extras["ego_semantic_map_logits"] = ego_map_logits[
:, :, self.height_map_channels :
].view(nsteps, nsamplers, -1, *ego_map_logits.shape[-2:])
return ac_output, memory.set_tensor("rnn", rnn_hidden_states)
class OnePhaseRearrangeActorCriticFrozenMap(ActorCriticModel[CategoricalDistr]):
"""A (IMG, MAP)->CNN->RNN actor-critic model for rearrangement tasks."""
def __init__(
self,
map: ActiveNeuralSLAM,
height_map_channels: int,
semantic_map_channels: int,
action_space: gym.spaces.Discrete,
observation_space: gym.spaces.Dict,
rgb_uuid: str,
unshuffled_rgb_uuid: str,
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
):
super().__init__(action_space=action_space, observation_space=observation_space)
self._hidden_size = hidden_size
self.rgb_uuid = rgb_uuid
self.unshuffled_rgb_uuid = unshuffled_rgb_uuid
self.concat_rgb_uuid = "concat_rgb"
assert self.concat_rgb_uuid not in observation_space
self.height_map_channels = height_map_channels
self.semantic_map_channels = semantic_map_channels
self.ego_map_encoder_out_dim = 512
self.ego_map_attention = nn.Sequential(
nn.Conv2d(
3 * (height_map_channels + semantic_map_channels), 128, 2, stride=2
),
nn.ReLU(inplace=True),
nn.Conv2d(128, 1, 1),
)
self.ego_map_encoder = nn.Sequential(
nn.Conv2d(
3 * (height_map_channels + semantic_map_channels),
self.ego_map_encoder_out_dim,
2,
stride=2,
),
nn.ReLU(inplace=True),
nn.Conv2d(self.ego_map_encoder_out_dim, self.ego_map_encoder_out_dim, 1,),
nn.ReLU(inplace=True),
)
self.ego_map_attention.apply(simple_conv_and_linear_weights_init)
self.ego_map_encoder.apply(simple_conv_and_linear_weights_init)
#
self.visual_attention = nn.Sequential(
nn.Conv2d(3 * 512, 32, 1,), nn.ReLU(inplace=True), nn.Conv2d(32, 1, 1,),
)
self.visual_encoder = nn.Sequential(
nn.Conv2d(3 * 512, self._hidden_size, 1,), nn.ReLU(inplace=True),
)
self.visual_attention.apply(simple_conv_and_linear_weights_init)
self.visual_encoder.apply(simple_conv_and_linear_weights_init)
# Used to predict whether or not there is an object with a different pose
# in front of the agent.
self.sem_difference_predictor = nn.Linear(
self.ego_map_encoder_out_dim, semantic_map_channels
)
self.sem_difference_predictor.apply(simple_conv_and_linear_weights_init)
# Standard CNN
self.state_encoder = RNNStateEncoder(
self.ego_map_encoder_out_dim + self.recurrent_hidden_state_size,
self._hidden_size,
num_layers=num_rnn_layers,
rnn_type=rnn_type,
)
self.actor = LinearActorHead(self._hidden_size, action_space.n)
self.critic = LinearCriticHead(self._hidden_size)
self.map = map
assert self.map.use_resnet_layernorm
assert self.map.freeze_resnet_batchnorm
for p in self.map.parameters():
p.requires_grad = False
self.train()
def train(self, mode: bool = True):
super(OnePhaseRearrangeActorCriticFrozenMap, self).train()
self.map.eval()
@property
def output_size(self):
return self._hidden_size
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
@property
def recurrent_hidden_state_size(self):
return self._hidden_size
def _recurrent_memory_specification(self):
return dict(
rnn=(
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
),
walkthrough_allo_map_probs=(
(
("sampler", None),
("channels", self.map.n_map_channels),
("height", self.map.map_size),
("width", self.map.map_size),
),
torch.float32,
),
)
def compute_visual_features(
self, imgs: torch.Tensor,
):
nsteps, nsamplers, h, w, c = imgs.shape
return self.map.resnet_normalizer(
self.map.resnet_l5(
imgs.permute(0, 1, 4, 2, 3).reshape(nsteps * nsamplers, c, h, w)
)
).view(nsteps, nsamplers, 512, 7, 7)
def _create_visual_encoder(self) -> Optional[nn.Module]:
"""Create the visual encoder for the model."""
return None
def _get_height_binned_map_and_semantic_map(
self, map: torch.Tensor, batch_size: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
height_binned_map = map[:, :, : self.height_map_channels].view(
batch_size, -1, *map.shape[-2:]
)
semantic_map = map[:, :, self.height_map_channels :].view(
batch_size, -1, *map.shape[-2:]
)
return height_binned_map, semantic_map
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
unshuffle_img = observations[self.rgb_uuid]
walkthrough_img = observations[self.unshuffled_rgb_uuid]
nsteps, nsamplers, h, w, c = unshuffle_img.shape
with torch.no_grad():
unshuffle_img_embed = self.compute_visual_features(unshuffle_img)
walkthrough_img_embed = self.compute_visual_features(walkthrough_img)
concat_img = torch.cat(
(
unshuffle_img_embed,
walkthrough_img_embed,
unshuffle_img_embed * walkthrough_img_embed,
),
dim=-3,
)
batch_shape, features_shape = concat_img.shape[:-3], concat_img.shape[-3:]
concat_img_reshaped = concat_img.view(-1, *features_shape)
attention_probs = torch.softmax(
self.visual_attention(concat_img_reshaped).view(
concat_img_reshaped.shape[0], -1
),
dim=-1,
).view(concat_img_reshaped.shape[0], 1, *concat_img_reshaped.shape[-2:])
downsampled_img_embed = (
(self.visual_encoder(concat_img_reshaped) * attention_probs)
.mean(-1)
.mean(-1)
)
downsampled_img_embed = downsampled_img_embed.view(*batch_shape, -1)
########
dx_dz_drs_egocentric = observations["rel_position_change"]["dx_dz_dr"].clone()
last_allo_pos = observations["rel_position_change"][
"last_allocentric_position"
].clone()
scene_bounds = observations["scene_bounds"]
x_mins = scene_bounds["x_range"][..., 0]
z_mins = scene_bounds["z_range"][..., 0]
last_allo_pos_rel_bounds = last_allo_pos - torch.stack(
(x_mins, z_mins, torch.zeros_like(x_mins)), dim=-1
)
# Converting THOR rotation to rotation expected by map
last_allo_pos_rel_bounds[..., 2] = -last_allo_pos_rel_bounds[..., 2]
dx_dz_drs_egocentric[..., 2] *= -1
map_mask = masks.view(*masks.shape[:2], 1, 1, 1)
walkthrough_allo_map_probs = memory.tensor("walkthrough_allo_map_probs")
map_summaries = []
rnn_hidden_states = memory.tensor("rnn")
rnn_outputs_list = []
for step in range(nsteps):
with torch.no_grad():
walkthrough_allo_map_probs = ( # Reset the map
walkthrough_allo_map_probs * map_mask[step]
)
walkthrough_map_result = self.map.forward(
images=None,
resnet_image_features=walkthrough_img_embed[step],
last_map_probs_allocentric=walkthrough_allo_map_probs,
last_xzrs_allocentric=last_allo_pos_rel_bounds[step].view(-1, 3),
dx_dz_drs_egocentric=dx_dz_drs_egocentric[step],
last_map_logits_egocentric=None,
return_allocentric_maps=True,
)
walkthrough_allo_map_probs = walkthrough_map_result[
"map_probs_allocentric_no_grad"
]
unshuffle_map_result = self.map.forward(
images=None,
resnet_image_features=unshuffle_img_embed[step],
last_map_probs_allocentric=None,
last_xzrs_allocentric=last_allo_pos_rel_bounds[step].view(-1, 3),
dx_dz_drs_egocentric=dx_dz_drs_egocentric[step],
last_map_logits_egocentric=None,
return_allocentric_maps=False,
)
last_unshuffle_ego_map_logits = unshuffle_map_result[
"egocentric_update"
]
walkthrough_updated_allo_probs = torch.sigmoid(
walkthrough_allo_map_probs
)
walkthrough_updated_ego_probs = self.map.allocentric_map_to_egocentric_view(
allocentric_map=walkthrough_updated_allo_probs,
xzr=walkthrough_map_result["xzr_allocentric_preds"],
padding_mode="zeros",
)
a = walkthrough_updated_ego_probs
b = torch.sigmoid(last_unshuffle_ego_map_logits)
concat_map = torch.cat((a, b, a * b,), dim=1,)
attention_logits = self.ego_map_attention(concat_map)
attention_probs = torch.softmax(
attention_logits.view(concat_map.shape[0], -1), dim=-1,
).view(attention_logits.shape[0], 1, *attention_logits.shape[-2:])
map_summary = (
(self.ego_map_encoder(concat_map) * attention_probs).mean(-1).mean(-1)
)
map_summary = map_summary.view(concat_map.shape[0], -1)
map_summaries.append(map_summary)
x = torch.cat(
(downsampled_img_embed[step], map_summary,), dim=-1,
).unsqueeze(0)
x, rnn_hidden_states = self.state_encoder(
x, rnn_hidden_states, masks[step : (step + 1)]
)
rnn_outputs_list.append(x)
memory = memory.set_tensor(
key="walkthrough_allo_map_probs", tensor=walkthrough_allo_map_probs
)
memory = memory.set_tensor(key="rnn", tensor=rnn_hidden_states)
x = torch.cat(rnn_outputs_list, dim=0)
extras = {}
if torch.is_grad_enabled():
# TODO: Create a loss to train the below as additonal supervision
extras["object_type_change_logits"] = self.sem_difference_predictor(
torch.stack(map_summaries, dim=0)
)
return (
ActorCriticOutput(
distributions=self.actor(x), values=self.critic(x), extras=extras,
),
memory,
)
class TwoPhaseRearrangeActorCriticFrozenMap(ActorCriticModel[CategoricalDistr]):
"""A (IMG, MAP)->CNN->RNN actor-critic model for rearrangement tasks."""
def __init__(
self,
map: ActiveNeuralSLAM,
height_map_channels: int,
semantic_map_channels: int,
action_space: gym.spaces.Discrete,
observation_space: gym.spaces.Dict,
rgb_uuid: str,
in_walkthrough_phase_uuid: str,
is_walkthrough_phase_embedding_dim: int,
done_action_index: int,
walkthrougher_should_ignore_action_mask: Optional[Sequence[float]] = None,
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
):
super().__init__(action_space=action_space, observation_space=observation_space)
self._hidden_size = hidden_size
self.rgb_uuid = rgb_uuid
self.in_walkthrough_phase_uuid = in_walkthrough_phase_uuid
self.done_action_index = done_action_index
self.is_walkthrough_phase_embedder = nn.Embedding(
num_embeddings=2, embedding_dim=is_walkthrough_phase_embedding_dim
)
self.walkthrough_good_action_logits: Optional[torch.Tensor]
if walkthrougher_should_ignore_action_mask is not None:
self.register_buffer(
"walkthrough_good_action_logits",
-1000 * torch.FloatTensor(walkthrougher_should_ignore_action_mask),
persistent=False,
)
else:
self.walkthrough_good_action_logits = None
self.height_map_channels = height_map_channels
self.semantic_map_channels = semantic_map_channels
self.ego_map_encoder_out_dim = 512
self.ego_map_attention = nn.Sequential(
nn.Conv2d(
3 * (height_map_channels + semantic_map_channels), 128, 2, stride=2
),
nn.ReLU(inplace=True),
nn.Conv2d(128, 1, 1),
)
self.ego_map_encoder = nn.Sequential(
nn.Conv2d(
3 * (height_map_channels + semantic_map_channels),
self.ego_map_encoder_out_dim,
2,
stride=2,
),
nn.ReLU(inplace=True),
nn.Conv2d(self.ego_map_encoder_out_dim, self.ego_map_encoder_out_dim, 1,),
nn.ReLU(inplace=True),
)
self.ego_map_attention.apply(simple_conv_and_linear_weights_init)
self.ego_map_encoder.apply(simple_conv_and_linear_weights_init)
#
self.visual_encoder = nn.Sequential(
nn.Conv2d(512, 512, 1,),
nn.ReLU(inplace=True),
nn.AvgPool2d((7, 7)),
nn.Flatten(),
)
self.visual_encoder.apply(simple_conv_and_linear_weights_init)
# Used to predict whether or not there is an object with a different pose
# in front of the agent.
self.sem_difference_predictor = nn.Linear(
self.ego_map_encoder_out_dim, semantic_map_channels
)
self.sem_difference_predictor.apply(simple_conv_and_linear_weights_init)
# Standard CNN
self.state_encoder = RNNStateEncoder(
self.ego_map_encoder_out_dim
+ is_walkthrough_phase_embedding_dim
+ 2 * self.recurrent_hidden_state_size,
self._hidden_size,
num_layers=num_rnn_layers,
rnn_type=rnn_type,
)
self.state_encoder.apply(simple_conv_and_linear_weights_init)
self.walkthrough_encoder = RNNStateEncoder(
self._hidden_size, self._hidden_size, num_layers=1, rnn_type="GRU",
)
self.walkthrough_encoder.apply(simple_conv_and_linear_weights_init)
self.walkthrough_ac = LinearActorCriticHead(self._hidden_size, action_space.n)
self.walkthrough_ac.actor_and_critic.bias.data[self.done_action_index] -= 3
self.unshuffle_ac = LinearActorCriticHead(self._hidden_size, action_space.n)
self.map = map
assert self.map.use_resnet_layernorm
assert self.map.freeze_resnet_batchnorm
for p in self.map.parameters():
p.requires_grad = False
self.train()
def train(self, mode: bool = True):
super(TwoPhaseRearrangeActorCriticFrozenMap, self).train()
self.map.eval()
@property
def output_size(self):
return self._hidden_size
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
@property
def recurrent_hidden_state_size(self):
return self._hidden_size
def _recurrent_memory_specification(self):
return dict(
rnn=(
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
),
walkthrough_encoding=(
(
("layer", self.walkthrough_encoder.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
),
walkthrough_allo_map_probs=(
(
("sampler", None),
("channels", self.map.n_map_channels),
("height", self.map.map_size),
("width", self.map.map_size),
),
torch.float32,
),
)
def compute_visual_features(
self, imgs: torch.Tensor,
):
nsteps, nsamplers, h, w, c = imgs.shape
return self.map.resnet_normalizer(
self.map.resnet_l5(
imgs.permute(0, 1, 4, 2, 3).reshape(nsteps * nsamplers, c, h, w)
)
).view(nsteps, nsamplers, 512, 7, 7)
def _create_visual_encoder(self) -> Optional[nn.Module]:
"""Create the visual encoder for the model."""
return None
def _get_height_binned_map_and_semantic_map(
self, map: torch.Tensor, batch_size: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
height_binned_map = map[:, :, : self.height_map_channels].view(
batch_size, -1, *map.shape[-2:]
)
semantic_map = map[:, :, self.height_map_channels :].view(
batch_size, -1, *map.shape[-2:]
)
return height_binned_map, semantic_map
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
in_walkthrough_phase_mask = observations[self.in_walkthrough_phase_uuid]
in_unshuffle_phase_mask = ~in_walkthrough_phase_mask
in_walkthrough_float = in_walkthrough_phase_mask.float()
in_unshuffle_float = in_unshuffle_phase_mask.float()
# Don't reset hidden state at start of the unshuffle task
masks_no_unshuffle_reset = (masks.bool() | in_unshuffle_phase_mask).float()
masks_with_unshuffle_reset = masks.float()
del masks # Just to make sure we don't accidentally use `masks when we want `masks_no_unshuffle_reset`
cur_img = observations[self.rgb_uuid]
nsteps, nsamplers, h, w, c = cur_img.shape
with torch.no_grad():
cur_img_embed = self.compute_visual_features(cur_img)
batch_shape, features_shape = cur_img_embed.shape[:-3], cur_img_embed.shape[-3:]
concat_img_reshaped = cur_img_embed.view(-1, *features_shape)
downsampled_img_embed = self.visual_encoder(concat_img_reshaped).view(
*batch_shape, -1
)
# Various embeddings
is_walkthrough_phase_embedding = self.is_walkthrough_phase_embedder(
in_walkthrough_phase_mask.long()
).squeeze(-2)
#######
dx_dz_drs_egocentric = observations["rel_position_change"]["dx_dz_dr"].clone()
last_allo_pos = observations["rel_position_change"][
"last_allocentric_position"
].clone()
scene_bounds = observations["scene_bounds"]
x_mins = scene_bounds["x_range"][..., 0]
z_mins = scene_bounds["z_range"][..., 0]
last_allo_pos_rel_bounds = last_allo_pos - torch.stack(
(x_mins, z_mins, torch.zeros_like(x_mins)), dim=-1
)
# Converting THOR rotation to rotation expected by map
last_allo_pos_rel_bounds[..., 2] = -last_allo_pos_rel_bounds[..., 2]
dx_dz_drs_egocentric[..., 2] *= -1
map_mask = masks_no_unshuffle_reset.view(nsteps, nsamplers, 1, 1, 1)
in_walkthrough_map_mask = in_walkthrough_float.view(nsteps, nsamplers, 1, 1, 1)
in_unshuffle_map_mask = in_unshuffle_float.view(nsteps, nsamplers, 1, 1, 1)
walkthrough_allo_map_probs = memory.tensor("walkthrough_allo_map_probs")
walkthrough_encoding = memory.tensor("walkthrough_encoding")
map_summaries = []
rnn_hidden_states = memory.tensor("rnn")
rnn_outputs_list = []
for step in range(nsteps):
with torch.no_grad():
walkthrough_allo_map_probs = ( # Resetting the map
walkthrough_allo_map_probs * map_mask[step]
)
map_result = self.map.forward(
images=None,
resnet_image_features=cur_img_embed[step],
last_map_probs_allocentric=walkthrough_allo_map_probs,
last_xzrs_allocentric=last_allo_pos_rel_bounds[step].view(-1, 3),
dx_dz_drs_egocentric=dx_dz_drs_egocentric[step],
last_map_logits_egocentric=None,
return_allocentric_maps=True,
)
walkthrough_allo_map_probs = (
map_result["map_probs_allocentric_no_grad"]
* in_walkthrough_map_mask[step]
+ walkthrough_allo_map_probs * in_unshuffle_map_mask[step]
)
walkthrough_updated_ego_probs = self.map.allocentric_map_to_egocentric_view(
allocentric_map=walkthrough_allo_map_probs,
xzr=map_result["xzr_allocentric_preds"],
padding_mode="zeros",
)
last_map_logits_egocentric = map_result["egocentric_update"]
a = walkthrough_updated_ego_probs
b = torch.sigmoid(last_map_logits_egocentric)
concat_map = torch.cat((a, b, a * b,), dim=1,)
attention_logits = self.ego_map_attention(concat_map)
attention_probs = torch.softmax(
attention_logits.view(concat_map.shape[0], -1), dim=-1,
).view(attention_logits.shape[0], 1, *attention_logits.shape[-2:])
map_summary = (
(self.ego_map_encoder(concat_map) * attention_probs).mean(-1).mean(-1)
)
map_summary = map_summary.view(concat_map.shape[0], -1)
map_summaries.append(map_summary)
rnn_input = torch.cat(
(
downsampled_img_embed[step],
map_summary,
walkthrough_encoding[0] * masks_no_unshuffle_reset[step],
is_walkthrough_phase_embedding[step],
),
dim=-1,
).unsqueeze(0)
rnn_out, rnn_hidden_states = self.state_encoder(
rnn_input,
rnn_hidden_states,
masks_with_unshuffle_reset[step : (step + 1)],
)
rnn_outputs_list.append(rnn_out)
new_walkthrough_encoding, _ = self.walkthrough_encoder(
rnn_out,
walkthrough_encoding,
masks_no_unshuffle_reset[step : step + 1],
)
walkthrough_encoding = (
walkthrough_encoding * in_unshuffle_float[step : step + 1]
+ new_walkthrough_encoding * in_walkthrough_float[step : step + 1]
)
memory = memory.set_tensor("walkthrough_encoding", walkthrough_encoding)
memory = memory.set_tensor(
key="walkthrough_allo_map_probs", tensor=walkthrough_allo_map_probs
)
memory = memory.set_tensor(key="rnn", tensor=rnn_hidden_states)
rnn_out = torch.cat(rnn_outputs_list, dim=0)
walkthrough_dist, walkthrough_vals = self.walkthrough_ac(rnn_out)
unshuffle_dist, unshuffle_vals = self.unshuffle_ac(rnn_out)
assert len(in_walkthrough_float.shape) == len(walkthrough_dist.logits.shape)
if self.walkthrough_good_action_logits is not None:
walkthrough_logits = (
walkthrough_dist.logits
+ self.walkthrough_good_action_logits.view(
*((1,) * (len(walkthrough_dist.logits.shape) - 1)), -1
)
)
else:
walkthrough_logits = walkthrough_dist.logits
actor = CategoricalDistr(
logits=in_walkthrough_float * walkthrough_logits
+ in_unshuffle_float * unshuffle_dist.logits
)
values = (
in_walkthrough_float * walkthrough_vals
+ in_unshuffle_float * unshuffle_vals
)
return ActorCriticOutput(distributions=actor, values=values, extras={}), memory
| ai2thor-rearrangement-main | rearrange/baseline_models.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.