python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
allennlp-models-main | allennlp_models/__init__.py |
|
# flake8: noqa: F403
from allennlp_models.classification.models import *
from allennlp_models.classification.dataset_readers import *
| allennlp-models-main | allennlp_models/classification/__init__.py |
from allennlp_models.classification.dataset_readers.stanford_sentiment_tree_bank import (
StanfordSentimentTreeBankDatasetReader,
)
from allennlp_models.classification.dataset_readers.boolq import BoolQDatasetReader
| allennlp-models-main | allennlp_models/classification/dataset_readers/__init__.py |
import json
import logging
from typing import Optional, Iterable, Dict
from allennlp.common.file_utils import cached_path
from allennlp.data import DatasetReader, Tokenizer, TokenIndexer, Instance, Field
from allennlp.data.tokenizers import WhitespaceTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.fields import TextField, LabelField
logger = logging.getLogger(__name__)
@DatasetReader.register("boolq")
class BoolQDatasetReader(DatasetReader):
"""
This DatasetReader is designed to read in the BoolQ data
for binary QA task. It returns a dataset of instances with the
following fields:
The output of `read` is a list of `Instance` s with the fields:
tokens : `TextField` and
label : `LabelField`
Registered as a `DatasetReader` with name "boolq".
# Parameters
tokenizer: `Tokenizer`, optional (default=`WhitespaceTokenizer()`)
Tokenizer to use to split the input sequences into words or other kinds of tokens.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
"""
def __init__(
self, tokenizer: Tokenizer = None, token_indexers: Dict[str, TokenIndexer] = None, **kwargs
):
super().__init__(
manual_distributed_sharding=True, manual_multiprocess_sharding=True, **kwargs
)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path) -> Iterable[Instance]:
file_path = cached_path(file_path, extract_archive=True)
with open(file_path) as f:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in self.shard_iterable(f):
record = json.loads(line.strip())
yield self.text_to_instance(
passage=record.get("passage"),
question=record.get("question"),
label=record.get("label"),
)
def text_to_instance( # type: ignore
self, passage: str, question: str, label: Optional[bool] = None
) -> Instance:
"""
We take the passage and the question as input, tokenize and concat them.
# Parameters
passage : `str`, required.
The passage in a given BoolQ record.
question : `str`, required.
The passage in a given BoolQ record.
label : `bool`, optional, (default = `None`).
The label for the passage and the question.
# Returns
An `Instance` containing the following fields:
tokens : `TextField`
The tokens in the concatenation of the passage and the question.
label : `LabelField`
The answer to the question.
"""
fields: Dict[str, Field] = {}
# 80% of the question length in the training set is less than 60, 512 - 4 - 60 = 448.
passage_tokens = self.tokenizer.tokenize(passage)[:448]
question_tokens = self.tokenizer.tokenize(question)[:60]
tokens = self.tokenizer.add_special_tokens(passage_tokens, question_tokens)
text_field = TextField(tokens)
fields["tokens"] = text_field
if label is not None:
label_field = LabelField(int(label), skip_indexing=True)
fields["label"] = label_field
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["tokens"].token_indexers = self.token_indexers # type: ignore
| allennlp-models-main | allennlp_models/classification/dataset_readers/boolq.py |
from typing import Dict, List, Optional, Union
import logging
from allennlp.data import Tokenizer
from nltk.tree import Tree
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField, Field
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__)
@DatasetReader.register("sst_tokens")
class StanfordSentimentTreeBankDatasetReader(DatasetReader):
"""
Reads tokens and their sentiment labels from the Stanford Sentiment Treebank.
The Stanford Sentiment Treebank comes with labels
from 0 to 4. `"5-class"` uses these labels as is. `"3-class"` converts the
problem into one of identifying whether a sentence is negative, positive, or
neutral sentiment. In this case, 0 and 1 are grouped as label 0 (negative sentiment),
2 is converted to label 1 (neutral sentiment) and 3 and 4 are grouped as label 2
(positive sentiment). `"2-class"` turns it into a binary classification problem
between positive and negative sentiment. 0 and 1 are grouped as the label 0
(negative sentiment), 2 (neutral) is discarded, and 3 and 4 are grouped as the label 1
(positive sentiment).
Expected format for each input line: a linearized tree, where nodes are labeled
by their sentiment.
The output of `read` is a list of `Instance` s with the fields:
tokens : `TextField` and
label : `LabelField`
Registered as a `DatasetReader` with name "sst_tokens".
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
use_subtrees : `bool`, optional, (default = `False`)
Whether or not to use sentiment-tagged subtrees.
granularity : `str`, optional (default = `"5-class"`)
One of `"5-class"`, `"3-class"`, or `"2-class"`, indicating the number
of sentiment labels to use.
"""
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
tokenizer: Optional[Tokenizer] = None,
use_subtrees: bool = False,
granularity: str = "5-class",
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._tokenizer = tokenizer
self._use_subtrees = use_subtrees
allowed_granularities = ["5-class", "3-class", "2-class"]
if granularity not in allowed_granularities:
raise ConfigurationError(
"granularity is {}, but expected one of: {}".format(
granularity, allowed_granularities
)
)
self._granularity = granularity
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in data_file.readlines():
line = line.strip("\n")
if not line:
continue
parsed_line = Tree.fromstring(line)
if self._use_subtrees:
for subtree in parsed_line.subtrees():
instance = self.text_to_instance(subtree.leaves(), subtree.label())
if instance is not None:
yield instance
else:
instance = self.text_to_instance(parsed_line.leaves(), parsed_line.label())
if instance is not None:
yield instance
def text_to_instance(self, tokens: List[str], sentiment: str = None) -> Optional[Instance]:
"""
We take `pre-tokenized` input here, because we might not have a tokenizer in this class.
# Parameters
tokens : `List[str]`, required.
The tokens in a given sentence.
sentiment : `str`, optional, (default = `None`).
The sentiment for this sentence.
# Returns
An `Instance` containing the following fields:
tokens : `TextField`
The tokens in the sentence or phrase.
label : `LabelField`
The sentiment label of the sentence or phrase.
"""
if isinstance(tokens, str):
assert self._tokenizer is not None
tokens = [tokens]
if self._tokenizer is None:
def make_token(t: Union[str, Token]):
if isinstance(t, str):
return Token(t)
elif isinstance(t, Token):
return t
else:
raise ValueError("Tokens must be either str or Token.")
tokens = [make_token(x) for x in tokens]
else:
tokens = self._tokenizer.tokenize(" ".join(tokens))
text_field = TextField(tokens)
fields: Dict[str, Field] = {"tokens": text_field}
if sentiment is not None:
# 0 and 1 are negative sentiment, 2 is neutral, and 3 and 4 are positive sentiment
# In 5-class, we use labels as is.
# 3-class reduces the granularity, and only asks the model to predict
# negative, neutral, or positive.
# 2-class further reduces the granularity by only asking the model to
# predict whether an instance is negative or positive.
if self._granularity == "3-class":
if int(sentiment) < 2:
sentiment = "0"
elif int(sentiment) == 2:
sentiment = "1"
else:
sentiment = "2"
elif self._granularity == "2-class":
if int(sentiment) < 2:
sentiment = "0"
elif int(sentiment) == 2:
return None
else:
sentiment = "1"
fields["label"] = LabelField(sentiment)
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["tokens"].token_indexers = self._token_indexers
| allennlp-models-main | allennlp_models/classification/dataset_readers/stanford_sentiment_tree_bank.py |
import logging
from typing import Dict, Optional
import torch
from allennlp.data import Vocabulary
from allennlp.models import Model
from allennlp.modules.transformer import TransformerEmbeddings, TransformerStack, TransformerPooler
from torch.nn import Dropout
logger = logging.getLogger(__name__)
@Model.register("transformer_classification_tt")
class TransformerClassificationTT(Model):
"""
This class implements a classification patterned after the proposed model in
[RoBERTa: A Robustly Optimized BERT Pretraining Approach (Liu et al)]
(https://api.semanticscholar.org/CorpusID:198953378).
Parameters
----------
vocab : ``Vocabulary``
transformer_model : ``str``, optional (default=``"roberta-large"``)
This model chooses the embedder according to this setting. You probably want to make sure this matches the
setting in the reader.
"""
def __init__(
self,
vocab: Vocabulary,
transformer_model: str = "roberta-large",
num_labels: Optional[int] = None,
label_namespace: str = "labels",
override_weights_file: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
transformer_kwargs = {
"model_name": transformer_model,
"weights_path": override_weights_file,
}
self.embeddings = TransformerEmbeddings.from_pretrained_module(**transformer_kwargs)
self.transformer_stack = TransformerStack.from_pretrained_module(**transformer_kwargs)
self.pooler = TransformerPooler.from_pretrained_module(**transformer_kwargs)
self.pooler_dropout = Dropout(p=0.1)
self.label_tokens = vocab.get_index_to_token_vocabulary(label_namespace)
if num_labels is None:
num_labels = len(self.label_tokens)
self.linear_layer = torch.nn.Linear(self.pooler.get_output_dim(), num_labels)
self.linear_layer.weight.data.normal_(mean=0.0, std=0.02)
self.linear_layer.bias.data.zero_()
from allennlp.training.metrics import CategoricalAccuracy, FBetaMeasure
self.loss = torch.nn.CrossEntropyLoss()
self.acc = CategoricalAccuracy()
self.f1 = FBetaMeasure()
def forward( # type: ignore
self,
text: Dict[str, torch.Tensor],
label: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
text : ``Dict[str, torch.LongTensor]``
From a ``TensorTextField``. Contains the text to be classified.
label : ``Optional[torch.LongTensor]``
From a ``LabelField``, specifies the true class of the instance
Returns
-------
An output dictionary consisting of:
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised. This is only returned when `correct_alternative` is not `None`.
logits : ``torch.FloatTensor``
The logits for every possible answer choice
"""
embedded_alternatives = self.embeddings(**text)
embedded_alternatives = self.transformer_stack(
embedded_alternatives, text["attention_mask"]
)
embedded_alternatives = self.pooler(embedded_alternatives.final_hidden_states)
embedded_alternatives = self.pooler_dropout(embedded_alternatives)
logits = self.linear_layer(embedded_alternatives)
result = {"logits": logits, "answers": logits.argmax(1)}
if label is not None:
result["loss"] = self.loss(logits, label)
self.acc(logits, label)
self.f1(logits, label)
return result
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
result = {"acc": self.acc.get_metric(reset)}
for metric_name, metrics_per_class in self.f1.get_metric(reset).items():
for class_index, value in enumerate(metrics_per_class):
result[f"{self.label_tokens[class_index]}-{metric_name}"] = value
return result
| allennlp-models-main | allennlp_models/classification/models/transformer_classification_tt.py |
from allennlp_models.classification.models.biattentive_classification_network import (
BiattentiveClassificationNetwork,
)
from allennlp_models.classification.models.transformer_classification_tt import (
TransformerClassificationTT,
)
| allennlp-models-main | allennlp_models/classification/models/__init__.py |
from typing import Dict, Union
import numpy
import torch
from torch import nn
import torch.nn.functional as F
from allennlp.common.checks import check_dimensions_match, ConfigurationError
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Elmo, FeedForward, Maxout, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("bcn")
class BiattentiveClassificationNetwork(Model):
"""
This class implements the Biattentive Classification Network model described
in section 5 of
[Learned in Translation: Contextualized Word Vectors (NIPS 2017)](https://arxiv.org/abs/1708.00107)
for text classification. We assume we're given a piece of text, and we predict some output label.
At a high level, the model starts by embedding the tokens and running them through
a feed-forward neural net (`pre_encode_feedforward`). Then, we encode these
representations with a `Seq2SeqEncoder` (`encoder`). We run biattention
on the encoder output representations (self-attention in this case, since
the two representations that typically go into biattention are identical) and
get out an attentive vector representation of the text. We combine this text
representation with the encoder outputs computed earlier, and then run this through
yet another `Seq2SeqEncoder` (the `integrator`). Lastly, we take the output of the
integrator and max, min, mean, and self-attention pool to create a final representation,
which is passed through a maxout network or some feed-forward layers
to output a classification (`output_layer`).
Registered as a `Model` with name "bcn".
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the `tokens` `TextField` we get as input to the model.
embedding_dropout : `float`
The amount of dropout to apply on the embeddings.
pre_encode_feedforward : `FeedForward`
A feedforward network that is run on the embedded tokens before they
are passed to the encoder.
encoder : `Seq2SeqEncoder`
The encoder to use on the tokens.
integrator : `Seq2SeqEncoder`
The encoder to use when integrating the attentive text encoding
with the token encodings.
integrator_dropout : `float`
The amount of dropout to apply on integrator output.
output_layer : `Union[Maxout, FeedForward]`
The maxout or feed forward network that takes the final representations and produces
a classification prediction.
elmo : `Elmo`, optional (default=`None`)
If provided, will be used to concatenate pretrained ELMo representations to
either the integrator output (`use_integrator_output_elmo`) or the
input (`use_input_elmo`).
use_input_elmo : `bool` (default=`False`)
If true, concatenate pretrained ELMo representations to the input vectors.
use_integrator_output_elmo : `bool` (default=`False`)
If true, concatenate pretrained ELMo representations to the integrator output.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
embedding_dropout: float,
pre_encode_feedforward: FeedForward,
encoder: Seq2SeqEncoder,
integrator: Seq2SeqEncoder,
integrator_dropout: float,
output_layer: Union[FeedForward, Maxout],
elmo: Elmo = None,
use_input_elmo: bool = False,
use_integrator_output_elmo: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
if "elmo" in self._text_field_embedder._token_embedders.keys():
raise ConfigurationError(
"To use ELMo in the BiattentiveClassificationNetwork input, "
"remove elmo from the text_field_embedder and pass an "
"Elmo object to the BiattentiveClassificationNetwork and set the "
"'use_input_elmo' and 'use_integrator_output_elmo' flags accordingly."
)
self._embedding_dropout = nn.Dropout(embedding_dropout)
self._num_classes = self.vocab.get_vocab_size("labels")
self._pre_encode_feedforward = pre_encode_feedforward
self._encoder = encoder
self._integrator = integrator
self._integrator_dropout = nn.Dropout(integrator_dropout)
self._elmo = elmo
self._use_input_elmo = use_input_elmo
self._use_integrator_output_elmo = use_integrator_output_elmo
self._num_elmo_layers = int(self._use_input_elmo) + int(self._use_integrator_output_elmo)
# Check that, if elmo is None, none of the elmo flags are set.
if self._elmo is None and self._num_elmo_layers != 0:
raise ConfigurationError(
"One of 'use_input_elmo' or 'use_integrator_output_elmo' is True, "
"but no Elmo object was provided upon construction. Pass in an Elmo "
"object to use Elmo."
)
if self._elmo is not None:
# Check that, if elmo is not None, we use it somewhere.
if self._num_elmo_layers == 0:
raise ConfigurationError(
"Elmo object provided upon construction, but both 'use_input_elmo' "
"and 'use_integrator_output_elmo' are 'False'. Set one of them to "
"'True' to use Elmo, or do not provide an Elmo object upon construction."
)
# Check that the number of flags set is equal to the num_output_representations of the Elmo object
if len(self._elmo._scalar_mixes) != self._num_elmo_layers:
raise ConfigurationError(
f"Elmo object has num_output_representations={len(self._elmo._scalar_mixes)}, but this "
f"does not match the number of use_*_elmo flags set to true. use_input_elmo "
f"is {self._use_input_elmo}, and use_integrator_output_elmo "
f"is {self._use_integrator_output_elmo}"
)
# Calculate combined integrator output dim, taking into account elmo
if self._use_integrator_output_elmo:
self._combined_integrator_output_dim = (
self._integrator.get_output_dim() + self._elmo.get_output_dim()
)
else:
self._combined_integrator_output_dim = self._integrator.get_output_dim()
self._self_attentive_pooling_projection = nn.Linear(self._combined_integrator_output_dim, 1)
self._output_layer = output_layer
if self._use_input_elmo:
check_dimensions_match(
text_field_embedder.get_output_dim() + self._elmo.get_output_dim(),
self._pre_encode_feedforward.get_input_dim(),
"text field embedder output dim + ELMo output dim",
"Pre-encoder feedforward input dim",
)
else:
check_dimensions_match(
text_field_embedder.get_output_dim(),
self._pre_encode_feedforward.get_input_dim(),
"text field embedder output dim",
"Pre-encoder feedforward input dim",
)
check_dimensions_match(
self._pre_encode_feedforward.get_output_dim(),
self._encoder.get_input_dim(),
"Pre-encoder feedforward output dim",
"Encoder input dim",
)
check_dimensions_match(
self._encoder.get_output_dim() * 3,
self._integrator.get_input_dim(),
"Encoder output dim * 3",
"Integrator input dim",
)
if self._use_integrator_output_elmo:
check_dimensions_match(
self._combined_integrator_output_dim * 4,
self._output_layer.get_input_dim(),
"(Integrator output dim + ELMo output dim) * 4",
"Output layer input dim",
)
else:
check_dimensions_match(
self._integrator.get_output_dim() * 4,
self._output_layer.get_input_dim(),
"Integrator output dim * 4",
"Output layer input dim",
)
check_dimensions_match(
self._output_layer.get_output_dim(),
self._num_classes,
"Output layer output dim",
"Number of classes.",
)
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3),
}
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(
self, # type: ignore
tokens: TextFieldTensors,
label: torch.LongTensor = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`.
label : `torch.LongTensor`, optional (default = `None`)
A variable representing the label for each instance in the batch.
# Returns
An output dictionary consisting of:
- `class_probabilities` (`torch.FloatTensor`) :
A tensor of shape `(batch_size, num_classes)` representing a
distribution over the label classes for each instance.
- `loss` (`torch.FloatTensor`, optional) :
A scalar loss to be optimised."""
text_mask = util.get_text_field_mask(tokens)
# Pop elmo tokens, since elmo embedder should not be present.
elmo_tokens = tokens.pop("elmo", None)
if tokens:
embedded_text = self._text_field_embedder(tokens)
else:
# only using "elmo" for input
embedded_text = None
# Add the "elmo" key back to "tokens" if not None, since the tests and the
# subsequent training epochs rely not being modified during forward()
if elmo_tokens is not None:
tokens["elmo"] = elmo_tokens
# Create ELMo embeddings if applicable
if self._elmo:
if elmo_tokens is not None:
elmo_representations = self._elmo(elmo_tokens["elmo_tokens"])[
"elmo_representations"
]
# Pop from the end is more performant with list
if self._use_integrator_output_elmo:
integrator_output_elmo = elmo_representations.pop()
if self._use_input_elmo:
input_elmo = elmo_representations.pop()
assert not elmo_representations
else:
raise ConfigurationError(
"Model was built to use Elmo, but input text is not tokenized for Elmo."
)
if self._use_input_elmo:
if embedded_text is not None:
embedded_text = torch.cat([embedded_text, input_elmo], dim=-1)
else:
embedded_text = input_elmo
dropped_embedded_text = self._embedding_dropout(embedded_text)
pre_encoded_text = self._pre_encode_feedforward(dropped_embedded_text)
encoded_tokens = self._encoder(pre_encoded_text, text_mask)
# Compute biattention. This is a special case since the inputs are the same.
attention_logits = encoded_tokens.bmm(encoded_tokens.permute(0, 2, 1).contiguous())
attention_weights = util.masked_softmax(attention_logits, text_mask)
encoded_text = util.weighted_sum(encoded_tokens, attention_weights)
# Build the input to the integrator
integrator_input = torch.cat(
[encoded_tokens, encoded_tokens - encoded_text, encoded_tokens * encoded_text], 2
)
integrated_encodings = self._integrator(integrator_input, text_mask)
# Concatenate ELMo representations to integrated_encodings if specified
if self._use_integrator_output_elmo:
integrated_encodings = torch.cat([integrated_encodings, integrator_output_elmo], dim=-1)
# Simple Pooling layers
max_masked_integrated_encodings = util.replace_masked_values(
integrated_encodings,
text_mask.unsqueeze(2),
util.min_value_of_dtype(integrated_encodings.dtype),
)
max_pool = torch.max(max_masked_integrated_encodings, 1)[0]
min_masked_integrated_encodings = util.replace_masked_values(
integrated_encodings,
text_mask.unsqueeze(2),
util.max_value_of_dtype(integrated_encodings.dtype),
)
min_pool = torch.min(min_masked_integrated_encodings, 1)[0]
mean_pool = torch.sum(integrated_encodings, 1) / torch.sum(text_mask, 1, keepdim=True)
# Self-attentive pooling layer
# Run through linear projection. Shape: (batch_size, sequence length, 1)
# Then remove the last dimension to get the proper attention shape (batch_size, sequence length).
self_attentive_logits = self._self_attentive_pooling_projection(
integrated_encodings
).squeeze(2)
self_weights = util.masked_softmax(self_attentive_logits, text_mask)
self_attentive_pool = util.weighted_sum(integrated_encodings, self_weights)
pooled_representations = torch.cat([max_pool, min_pool, mean_pool, self_attentive_pool], 1)
pooled_representations_dropped = self._integrator_dropout(pooled_representations)
logits = self._output_layer(pooled_representations_dropped)
class_probabilities = F.softmax(logits, dim=-1)
output_dict = {"logits": logits, "class_probabilities": class_probabilities}
if label is not None:
loss = self.loss(logits, label)
for metric in self.metrics.values():
metric(logits, label)
output_dict["loss"] = loss
return output_dict
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the class probabilities, converts indices to string labels, and
adds a `"label"` key to the dictionary with the result.
"""
predictions = output_dict["class_probabilities"].cpu().data.numpy()
argmax_indices = numpy.argmax(predictions, axis=-1)
labels = [self.vocab.get_token_from_index(x, namespace="labels") for x in argmax_indices]
output_dict["label"] = labels
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()
}
| allennlp-models-main | allennlp_models/classification/models/biattentive_classification_network.py |
# flake8: noqa: F403
from allennlp_models.structured_prediction.predictors import *
from allennlp_models.structured_prediction.dataset_readers import *
from allennlp_models.structured_prediction.metrics import *
from allennlp_models.structured_prediction.models import *
| allennlp-models-main | allennlp_models/structured_prediction/__init__.py |
from typing import Dict, List, Optional, Set
from collections import defaultdict
import logging
import os
import tempfile
import subprocess
import shutil
from allennlp.common.util import is_distributed
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
logger = logging.getLogger(__name__)
DEFAULT_SRL_EVAL_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "tools", "srl-eval.pl")
)
@Metric.register("srl_eval")
class SrlEvalScorer(Metric):
"""
This class uses the external srl-eval.pl script for computing the CoNLL SRL metrics.
AllenNLP contains the srl-eval.pl script, but you will need perl 5.x.
Note that this metric reads and writes from disk quite a bit. In particular, it
writes and subsequently reads two files per __call__, which is typically invoked
once per batch. You probably don't want to include it in your training loop;
instead, you should calculate this on a validation set only.
# Parameters
srl_eval_path : `str`, optional.
The path to the srl-eval.pl script.
ignore_classes : `List[str]`, optional (default=`None`).
A list of classes to ignore.
"""
def __init__(
self, srl_eval_path: str = DEFAULT_SRL_EVAL_PATH, ignore_classes: List[str] = None
) -> None:
self._srl_eval_path = srl_eval_path
self._ignore_classes = set(ignore_classes)
# These will hold per label span counts.
self._true_positives: Dict[str, int] = defaultdict(int)
self._false_positives: Dict[str, int] = defaultdict(int)
self._false_negatives: Dict[str, int] = defaultdict(int)
def __call__(
self, # type: ignore
batch_verb_indices: List[Optional[int]],
batch_sentences: List[List[str]],
batch_conll_formatted_predicted_tags: List[List[str]],
batch_conll_formatted_gold_tags: List[List[str]],
) -> None:
"""
# Parameters
batch_verb_indices : `List[Optional[int]]`, required.
The indices of the verbal predicate in the sentences which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
batch_sentences : `List[List[str]]`, required.
The word tokens for each instance in the batch.
batch_conll_formatted_predicted_tags : `List[List[str]]`, required.
A list of predicted CoNLL-formatted SRL tags (itself a list) to compute score for.
Use allennlp.models.semantic_role_labeler.convert_bio_tags_to_conll_format
to convert from BIO to CoNLL format before passing the tags into the metric,
if applicable.
batch_conll_formatted_gold_tags : `List[List[str]]`, required.
A list of gold CoNLL-formatted SRL tags (itself a list) to use as a reference.
Use allennlp.models.semantic_role_labeler.convert_bio_tags_to_conll_format
to convert from BIO to CoNLL format before passing the
tags into the metric, if applicable.
"""
if not os.path.exists(self._srl_eval_path):
raise ConfigurationError(f"srl-eval.pl not found at {self._srl_eval_path}.")
tempdir = tempfile.mkdtemp()
gold_path = os.path.join(tempdir, "gold.txt")
predicted_path = os.path.join(tempdir, "predicted.txt")
with open(predicted_path, "w", encoding="utf-8") as predicted_file, open(
gold_path, "w", encoding="utf-8"
) as gold_file:
for verb_index, sentence, predicted_tag_sequence, gold_tag_sequence in zip(
batch_verb_indices,
batch_sentences,
batch_conll_formatted_predicted_tags,
batch_conll_formatted_gold_tags,
):
from allennlp_models.structured_prediction.models.srl import (
write_conll_formatted_tags_to_file,
)
write_conll_formatted_tags_to_file(
predicted_file,
gold_file,
verb_index,
sentence,
predicted_tag_sequence,
gold_tag_sequence,
)
perl_script_command = ["perl", self._srl_eval_path, gold_path, predicted_path]
try:
completed_process = subprocess.run(
perl_script_command, stdout=subprocess.PIPE, universal_newlines=True, check=True
)
except FileNotFoundError:
raise FileNotFoundError(
"'File not found' while running the evaluation. Do you have perl installed?"
)
for line in completed_process.stdout.split("\n"):
stripped = line.strip().split()
if len(stripped) == 7:
tag = stripped[0]
# Overall metrics are calculated in get_metric, skip them here.
if tag == "Overall" or tag in self._ignore_classes:
continue
# This line contains results for a span
num_correct = int(stripped[1])
num_excess = int(stripped[2])
num_missed = int(stripped[3])
self._true_positives[tag] += num_correct
self._false_positives[tag] += num_excess
self._false_negatives[tag] += num_missed
# Note: we cannot aggregate across distributed workers because each worker
# may end up with different tags, and in such a case, the reduce operation
# will stall, or return with inaccurate values.
shutil.rmtree(tempdir)
def get_metric(self, reset: bool = False):
"""
# Returns
A Dict per label containing following the span based metrics:
- precision : `float`
- recall : `float`
- f1-measure : `float`
Additionally, an `overall` key is included, which provides the precision,
recall and f1-measure for all spans.
"""
if is_distributed():
raise RuntimeError(
"Distributed aggregation for `SrlEvalScorer` is currently not supported."
)
all_tags: Set[str] = set()
all_tags.update(self._true_positives.keys())
all_tags.update(self._false_positives.keys())
all_tags.update(self._false_negatives.keys())
all_metrics = {}
for tag in all_tags:
if tag == "overall":
raise ValueError(
"'overall' is disallowed as a tag type, "
"rename the tag type to something else if necessary."
)
precision, recall, f1_measure = self._compute_metrics(
self._true_positives[tag], self._false_positives[tag], self._false_negatives[tag]
)
precision_key = "precision" + "-" + tag
recall_key = "recall" + "-" + tag
f1_key = "f1-measure" + "-" + tag
all_metrics[precision_key] = precision
all_metrics[recall_key] = recall
all_metrics[f1_key] = f1_measure
# Compute the precision, recall and f1 for all spans jointly.
precision, recall, f1_measure = self._compute_metrics(
sum(self._true_positives.values()),
sum(self._false_positives.values()),
sum(self._false_negatives.values()),
)
all_metrics["precision-overall"] = precision
all_metrics["recall-overall"] = recall
all_metrics["f1-measure-overall"] = f1_measure
if reset:
self.reset()
return all_metrics
@staticmethod
def _compute_metrics(true_positives: int, false_positives: int, false_negatives: int):
precision = true_positives / (true_positives + false_positives + 1e-13)
recall = true_positives / (true_positives + false_negatives + 1e-13)
f1_measure = 2.0 * (precision * recall) / (precision + recall + 1e-13)
return precision, recall, f1_measure
def reset(self):
self._true_positives = defaultdict(int)
self._false_positives = defaultdict(int)
self._false_negatives = defaultdict(int)
| allennlp-models-main | allennlp_models/structured_prediction/metrics/srl_eval_scorer.py |
from allennlp_models.structured_prediction.metrics.srl_eval_scorer import SrlEvalScorer
| allennlp-models-main | allennlp_models/structured_prediction/metrics/__init__.py |
import argparse
import os
import sys
from collections import namedtuple
from typing import Iterable, List, Tuple
import regex
from tqdm import tqdm
from allennlp.data import Token
from allennlp.data.tokenizers import SpacyTokenizer
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
Extraction = namedtuple(
"Extraction", # Open IE extraction
[
"sent", # Sentence in which this extraction appears
"toks", # spaCy tokens
"arg1", # Subject
"rel", # Relation
"args2", # A list of arguments after the predicate
"confidence",
], # Confidence in this extraction
)
Element = namedtuple(
"Element", # An element (predicate or argument) in an Open IE extraction
[
"elem_type", # Predicate or argument ID
"span", # The element's character span in the sentence
"text",
], # The textual representation of this element
)
def main(inp_fn: str, domain: str, out_fn: str) -> None:
"""
inp_fn: `str`
Path to file from which to read Open IE extractions in Open IE4's format.
domain: `str`
Domain to be used when writing CoNLL format.
out_fn: `str`
Path to file to which to write the CoNLL format Open IE extractions.
"""
with open(out_fn, "w") as fout:
for sent_ls in read(inp_fn):
fout.write(f"{convert_sent_to_conll_str(sent_ls, domain)}\n\n")
def safe_zip(*args):
"""
Zip which ensures all lists are of same size.
"""
assert len({len(arg) for arg in args}) == 1
return zip(*args)
def char_to_word_index(char_ind: int, sent: str) -> int:
"""
Convert a character index to word index in the given sentence.
"""
return sent[:char_ind].count(" ")
def element_from_span(span: List[Token], span_type: str) -> Element:
"""
Return an Element from span (list of spaCy tokens).
"""
return Element(
span_type, [span[0].idx, span[-1].idx + len(span[-1])], " ".join(str(t) for t in span)
)
def split_predicate(ex: Extraction) -> Extraction:
"""
Ensure single word predicate by adding "before-predicate" and "after-predicate" arguments.
"""
rel_toks = ex.toks[
char_to_word_index(ex.rel.span[0], ex.sent) : char_to_word_index(ex.rel.span[1], ex.sent)
+ 1
]
if not rel_toks:
return ex
verb_inds = [tok_ind for (tok_ind, tok) in enumerate(rel_toks) if tok.tag_.startswith("VB")]
last_verb_ind = verb_inds[-1] if verb_inds else (len(rel_toks) - 1)
rel_parts = [element_from_span([rel_toks[last_verb_ind]], "V")]
before_verb = rel_toks[:last_verb_ind]
after_verb = rel_toks[last_verb_ind + 1 :]
if before_verb:
rel_parts.append(element_from_span(before_verb, "BV"))
if after_verb:
rel_parts.append(element_from_span(after_verb, "AV"))
return Extraction(ex.sent, ex.toks, ex.arg1, rel_parts, ex.args2, ex.confidence)
def extraction_to_conll(ex: Extraction) -> List[str]:
"""
Return a CoNLL representation of a given input Extraction.
"""
ex = split_predicate(ex)
toks = ex.sent.split(" ")
ret = ["*"] * len(toks)
args = [ex.arg1] + ex.args2
rels_and_args = [(f"ARG{arg_ind}", arg) for arg_ind, arg in enumerate(args)] + [
(rel_part.elem_type, rel_part) for rel_part in ex.rel
]
for rel, arg in rels_and_args:
# Add brackets
cur_start_ind = char_to_word_index(arg.span[0], ex.sent)
cur_end_ind = char_to_word_index(arg.span[1], ex.sent)
ret[cur_start_ind] = f"({rel}{ret[cur_start_ind]}"
ret[cur_end_ind] += ")"
return ret
def interpret_span(text_spans: str) -> Tuple[int, int]:
"""
Return an integer tuple from textual representation of closed/open spans.
"""
m = regex.match(r"^(?:(?:([\(\[]\d+, \d+[\)\]])|({\d+}))[,]?\s*)+$", text_spans) # noqa
spans = m.captures(1) + m.captures(2)
int_spans: List[Tuple[int, int]] = []
for span in spans:
ints = [int(s) for s in span[1:-1].split(",")]
if span[0] == "(":
ints[0] += 1
if span[-1] == "]":
ints[1] += 1
if span.startswith("{"):
assert len(ints) == 1
ints.append(ints[0] + 1)
assert len(ints) == 2
int_spans.append((ints[0], ints[1]))
# Merge consecutive spans
ret = []
cur_span: Tuple[int, int] = int_spans[0]
for start, end in int_spans[1:]:
if start - 1 == cur_span[-1]:
cur_span = (cur_span[0], end)
else:
ret.append(cur_span)
cur_span = (start, end)
if not ret or cur_span != ret[-1]:
ret.append(cur_span)
return ret[0]
def interpret_element(element_type: str, text: str, span: str) -> Element:
"""
Construct an Element instance from regexp groups.
"""
return Element(element_type, interpret_span(span), text)
def parse_element(raw_element: str) -> List[Element]:
"""
Parse a raw element into text and indices (integers).
"""
elements = (
regex.match(r"^(([a-zA-Z]+)\(([^;]+),List\(([^;]*)\)\))$", elem.lstrip().rstrip()) # noqa
for elem in raw_element.split(";")
)
return [interpret_element(*elem.groups()[1:]) for elem in elements if elem]
def read(fn: str) -> Iterable[List[Extraction]]:
tokenizer = SpacyTokenizer(pos_tags=True)
prev_sent: List[Extraction] = []
with open(fn) as fin:
for line in tqdm(fin):
data = line.strip().split("\t")
confidence = data[0]
if not all(data[2:5]):
# Make sure that all required elements are present
continue
arg1, rel, args2 = (parse_element(e) for e in data[2:5])
# Exactly one subject and one relation
# and at least one object
if len(rel) == 1 and len(arg1) == 1 and len(args2) >= 1:
sent = data[5]
cur_ex = Extraction(
sent=sent,
toks=tokenizer.tokenize(sent),
arg1=arg1[0],
rel=rel[0],
args2=args2,
confidence=confidence,
)
# Decide whether to append or yield
if not prev_sent or prev_sent[0].sent == sent:
prev_sent.append(cur_ex)
else:
yield prev_sent
prev_sent = [cur_ex]
if prev_sent:
# Yield last element
yield prev_sent
def convert_sent_to_conll(sent_ls: List[Extraction]) -> Iterable[Tuple[str, ...]]:
"""
Given a list of extractions for a single sentence, converts it to CoNLL representation.
"""
# Sanity check - make sure all extractions are on the same sentence
assert len({ex.sent for ex in sent_ls}) == 1
toks = sent_ls[0].sent.split(" ")
return safe_zip(
*[(str(i) for i in range(len(toks))), toks] + [extraction_to_conll(ex) for ex in sent_ls]
)
def pad_line_to_ontonotes(line: Tuple[str, ...], domain: str) -> List[str]:
"""
Pad line to conform to OntoNotes representation.
"""
word_ind, word = line[:2]
pos = "XX"
oie_tags = line[2:]
line_num = "0"
parse = "-"
lemma = "-"
return (
[domain, line_num, word_ind, word, pos, parse, lemma, "-", "-", "-", "*"]
+ list(oie_tags)
+ ["-"]
)
def convert_sent_to_conll_str(sent_ls: List[Extraction], domain: str) -> str:
"""
Given a dictionary from sentence -> extractions, return a corresponding CoNLL representation.
"""
return "\n".join(
"\t".join(pad_line_to_ontonotes(line, domain)) for line in convert_sent_to_conll(sent_ls)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert Open IE4 extractions to CoNLL (OntoNotes) format."
)
parser.add_argument(
"--inp", type=str, help="input file from which to read Open IE extractions.", required=True
)
parser.add_argument(
"--domain", type=str, help="domain to use when writing the OntoNotes file.", required=True
)
parser.add_argument(
"--out",
type=str,
help="path to the output file, where CoNLL format should be written.",
required=True,
)
args = parser.parse_args()
main(args.inp, args.domain, args.out)
| allennlp-models-main | allennlp_models/structured_prediction/tools/convert_openie_to_conll.py |
allennlp-models-main | allennlp_models/structured_prediction/tools/__init__.py |
|
# flake8: noqa
import os
import sys
import argparse
from typing import List
import torch
from allennlp_models.structured_prediction.models.srl import write_to_conll_eval_file
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
from allennlp.common.tqdm import Tqdm
from allennlp.common import Params
from allennlp.models.archival import load_archive
from allennlp.data import DatasetReader
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.nn.util import move_to_device
def main(serialization_directory: str, device: int, data: str, prefix: str, domain: str = None):
"""
serialization_directory : str, required.
The directory containing the serialized weights.
device: int, default = -1
The device to run the evaluation on.
data: str, default = None
The data to evaluate on. By default, we use the validation data from
the original experiment.
prefix: str, default=""
The prefix to prepend to the generated gold and prediction files, to distinguish
different models/data.
domain: str, optional (default = None)
If passed, filters the ontonotes evaluation/test dataset to only contain the
specified domain. This overwrites the domain in the config file from the model,
to allow evaluation on domains other than the one the model was trained on.
"""
config = Params.from_file(os.path.join(serialization_directory, "config.json"))
if domain is not None:
# Hack to allow evaluation on different domains than the
# model was trained on.
config["dataset_reader"]["domain_identifier"] = domain
prefix = f"{domain}_{prefix}"
else:
config["dataset_reader"].pop("domain_identifier", None)
dataset_reader = DatasetReader.from_params(config["dataset_reader"])
evaluation_data_path = data if data else config["validation_data_path"]
archive = load_archive(
os.path.join(serialization_directory, "model.tar.gz"), cuda_device=device
)
model = archive.model
model.eval()
prediction_file_path = os.path.join(serialization_directory, prefix + "_predictions.txt")
gold_file_path = os.path.join(serialization_directory, prefix + "_gold.txt")
prediction_file = open(prediction_file_path, "w+")
gold_file = open(gold_file_path, "w+")
# Load the evaluation data and index it.
print("reading evaluation data from {}".format(evaluation_data_path))
dataset = list(dataset_reader.read(evaluation_data_path))
with torch.autograd.no_grad():
loader = SimpleDataLoader(dataset, 32)
model_predictions: List[List[str]] = []
for batch in Tqdm.tqdm(loader):
batch = move_to_device(batch, device)
result = model(**batch)
predictions = model.decode(result)
model_predictions.extend(predictions["tags"])
for instance, prediction in zip(dataset, model_predictions):
fields = instance.fields
verb_index = fields["metadata"]["verb_index"]
gold_tags = fields["metadata"]["gold_tags"]
sentence = fields["metadata"]["words"]
write_to_conll_eval_file(
prediction_file, gold_file, verb_index, sentence, prediction, gold_tags
)
prediction_file.close()
gold_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="write conll format srl predictions to file from a pretrained model."
)
parser.add_argument("--path", type=str, help="the serialization directory.")
parser.add_argument("--device", type=int, default=-1, help="the device to load the model onto.")
parser.add_argument(
"--data", type=str, default=None, help="A directory containing a dataset to evaluate on."
)
parser.add_argument(
"--prefix", type=str, default="", help="A prefix to distinguish model outputs."
)
parser.add_argument(
"--domain",
type=str,
default=None,
help="An optional domain to filter by for producing results.",
)
args = parser.parse_args()
main(args.path, args.device, args.data, args.prefix, args.domain)
| allennlp-models-main | allennlp_models/structured_prediction/tools/write_srl_predictions_to_conll_format.py |
from typing import Dict, Tuple, List
import logging
from conllu import parse_incr
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, SequenceLabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer
logger = logging.getLogger(__name__)
# exist_ok has to be true until we remove this from the core library
@DatasetReader.register("universal_dependencies", exist_ok=True)
class UniversalDependenciesDatasetReader(DatasetReader):
"""
Reads a file in the conllu Universal Dependencies format.
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
The token indexers to be applied to the words TextField.
use_language_specific_pos : `bool`, optional (default = `False`)
Whether to use UD POS tags, or to use the language specific POS tags
provided in the conllu format.
tokenizer : `Tokenizer`, optional (default = `None`)
A tokenizer to use to split the text. This is useful when the tokens that you pass
into the model need to have some particular attribute. Typically it is not necessary.
"""
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
use_language_specific_pos: bool = False,
tokenizer: Tokenizer = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.use_language_specific_pos = use_language_specific_pos
self.tokenizer = tokenizer
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as conllu_file:
logger.info("Reading UD instances from conllu dataset at: %s", file_path)
for annotation in parse_incr(conllu_file):
# CoNLLU annotations sometimes add back in words that have been elided
# in the original sentence; we remove these, as we're just predicting
# dependencies for the original sentence.
# We filter by integers here as elided words have a non-integer word id,
# as parsed by the conllu python library.
annotation = [x for x in annotation if isinstance(x["id"], int)]
heads = [x["head"] for x in annotation]
tags = [x["deprel"] for x in annotation]
words = [x["form"] for x in annotation]
if self.use_language_specific_pos:
pos_tags = [x["xpostag"] for x in annotation]
else:
pos_tags = [x["upostag"] for x in annotation]
yield self.text_to_instance(words, pos_tags, list(zip(tags, heads)))
def text_to_instance(
self, # type: ignore
words: List[str],
upos_tags: List[str],
dependencies: List[Tuple[str, int]] = None,
) -> Instance:
"""
# Parameters
words : `List[str]`, required.
The words in the sentence to be encoded.
upos_tags : `List[str]`, required.
The universal dependencies POS tags for each word.
dependencies : `List[Tuple[str, int]]`, optional (default = `None`)
A list of (head tag, head index) tuples. Indices are 1 indexed,
meaning an index of 0 corresponds to that word being the root of
the dependency tree.
# Returns
An instance containing words, upos tags, dependency head tags and head
indices as fields.
"""
fields: Dict[str, Field] = {}
if self.tokenizer is not None:
tokens = self.tokenizer.tokenize(" ".join(words))
else:
tokens = [Token(t) for t in words]
text_field = TextField(tokens, self._token_indexers)
fields["words"] = text_field
fields["pos_tags"] = SequenceLabelField(upos_tags, text_field, label_namespace="pos")
if dependencies is not None:
# We don't want to expand the label namespace with an additional dummy token, so we'll
# always give the 'ROOT_HEAD' token a label of 'root'.
fields["head_tags"] = SequenceLabelField(
[x[0] for x in dependencies], text_field, label_namespace="head_tags"
)
fields["head_indices"] = SequenceLabelField(
[x[1] for x in dependencies], text_field, label_namespace="head_index_tags"
)
fields["metadata"] = MetadataField({"words": words, "pos": upos_tags})
return Instance(fields)
| allennlp-models-main | allennlp_models/structured_prediction/dataset_readers/universal_dependencies.py |
from allennlp_models.structured_prediction.dataset_readers.penn_tree_bank import (
PennTreeBankConstituencySpanDatasetReader,
)
from allennlp_models.structured_prediction.dataset_readers.semantic_dependencies import (
SemanticDependenciesDatasetReader,
)
from allennlp_models.structured_prediction.dataset_readers.srl import SrlReader
from allennlp_models.structured_prediction.dataset_readers.universal_dependencies import (
UniversalDependenciesDatasetReader,
)
| allennlp-models-main | allennlp_models/structured_prediction/dataset_readers/__init__.py |
from typing import Dict, List, Tuple
import logging
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import AdjacencyField, MetadataField, SequenceLabelField
from allennlp.data.fields import Field, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.instance import Instance
logger = logging.getLogger(__name__)
FIELDS = ["id", "form", "lemma", "pos", "head", "deprel", "top", "pred", "frame"]
def parse_sentence(
sentence_blob: str,
) -> Tuple[List[Dict[str, str]], List[Tuple[int, int]], List[str]]:
"""
Parses a chunk of text in the SemEval SDP format.
Each word in the sentence is returned as a dictionary with the following
format:
```
'id': '1',
'form': 'Pierre',
'lemma': 'Pierre',
'pos': 'NNP',
'head': '2', # Note that this is the `syntactic` head.
'deprel': 'nn',
'top': '-',
'pred': '+',
'frame': 'named:x-c'
```
Along with a list of arcs and their corresponding tags. Note that
in semantic dependency parsing words can have more than one head
(it is not a tree), meaning that the list of arcs and tags are
not tied to the length of the sentence.
"""
annotated_sentence = []
arc_indices = []
arc_tags = []
predicates = []
lines = [
line.split("\t")
for line in sentence_blob.split("\n")
if line and not line.strip().startswith("#")
]
for line_idx, line in enumerate(lines):
annotated_token = {k: v for k, v in zip(FIELDS, line)}
if annotated_token["pred"] == "+":
predicates.append(line_idx)
annotated_sentence.append(annotated_token)
for line_idx, line in enumerate(lines):
for predicate_idx, arg in enumerate(line[len(FIELDS) :]):
if arg != "_":
arc_indices.append((line_idx, predicates[predicate_idx]))
arc_tags.append(arg)
return annotated_sentence, arc_indices, arc_tags
def lazy_parse(text: str):
for sentence in text.split("\n\n"):
if sentence:
yield parse_sentence(sentence)
@DatasetReader.register("semantic_dependencies")
class SemanticDependenciesDatasetReader(DatasetReader):
"""
Reads a file in the SemEval 2015 Task 18 (Broad-coverage Semantic Dependency Parsing)
format.
Registered as a `DatasetReader` with name "semantic_dependencies".
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
The token indexers to be applied to the words TextField.
skip_when_no_arcs : `bool`, optional (default=`True`)
If this is true, skip examples containing no semantic arcs.
"""
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
skip_when_no_arcs: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._skip_when_no_arcs = skip_when_no_arcs
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading semantic dependency parsing data from: %s", file_path)
with open(file_path) as sdp_file:
for annotated_sentence, directed_arc_indices, arc_tags in lazy_parse(sdp_file.read()):
# If there are no arc indices, skip this instance.
if self._skip_when_no_arcs and not directed_arc_indices:
continue
tokens = [word["form"] for word in annotated_sentence]
pos_tags = [word["pos"] for word in annotated_sentence]
yield self.text_to_instance(tokens, pos_tags, directed_arc_indices, arc_tags)
def text_to_instance(
self, # type: ignore
tokens: List[str],
pos_tags: List[str] = None,
arc_indices: List[Tuple[int, int]] = None,
arc_tags: List[str] = None,
) -> Instance:
fields: Dict[str, Field] = {}
token_field = TextField([Token(t) for t in tokens], self._token_indexers)
fields["tokens"] = token_field
fields["metadata"] = MetadataField({"tokens": tokens})
if pos_tags is not None:
fields["pos_tags"] = SequenceLabelField(pos_tags, token_field, label_namespace="pos")
if arc_indices is not None and arc_tags is not None:
fields["arc_tags"] = AdjacencyField(arc_indices, token_field, arc_tags)
return Instance(fields)
| allennlp-models-main | allennlp_models/structured_prediction/dataset_readers/semantic_dependencies.py |
import logging
from typing import Dict, List, Iterable, Tuple, Any
from transformers.models.bert.tokenization_bert import BertTokenizer
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, SequenceLabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from allennlp_models.common.ontonotes import Ontonotes, OntonotesSentence
logger = logging.getLogger(__name__)
def _convert_tags_to_wordpiece_tags(tags: List[str], offsets: List[int]) -> List[str]:
"""
Converts a series of BIO tags to account for a wordpiece tokenizer,
extending/modifying BIO tags where appropriate to deal with words which
are split into multiple wordpieces by the tokenizer.
This is only used if you pass a `bert_model_name` to the dataset reader below.
# Parameters
tags : `List[str]`
The BIO formatted tags to convert to BIO tags for wordpieces
offsets : `List[int]`
The wordpiece offsets.
# Returns
The new BIO tags.
"""
new_tags = []
j = 0
for i, offset in enumerate(offsets):
tag = tags[i]
is_o = tag == "O"
is_start = True
while j < offset:
if is_o:
new_tags.append("O")
elif tag.startswith("I"):
new_tags.append(tag)
elif is_start and tag.startswith("B"):
new_tags.append(tag)
is_start = False
elif tag.startswith("B"):
_, label = tag.split("-", 1)
new_tags.append("I-" + label)
j += 1
# Add O tags for cls and sep tokens.
return ["O"] + new_tags + ["O"]
def _convert_verb_indices_to_wordpiece_indices(verb_indices: List[int], offsets: List[int]):
"""
Converts binary verb indicators to account for a wordpiece tokenizer,
extending/modifying BIO tags where appropriate to deal with words which
are split into multiple wordpieces by the tokenizer.
This is only used if you pass a `bert_model_name` to the dataset reader below.
# Parameters
verb_indices : `List[int]`
The binary verb indicators, 0 for not a verb, 1 for verb.
offsets : `List[int]`
The wordpiece offsets.
# Returns
The new verb indices.
"""
j = 0
new_verb_indices = []
for i, offset in enumerate(offsets):
indicator = verb_indices[i]
while j < offset:
new_verb_indices.append(indicator)
j += 1
# Add 0 indicators for cls and sep tokens.
return [0] + new_verb_indices + [0]
@DatasetReader.register("srl")
class SrlReader(DatasetReader):
"""
This DatasetReader is designed to read in the English OntoNotes v5.0 data
for semantic role labelling. It returns a dataset of instances with the
following fields:
tokens : `TextField`
The tokens in the sentence.
verb_indicator : `SequenceLabelField`
A sequence of binary indicators for whether the word is the verb for this frame.
tags : `SequenceLabelField`
A sequence of Propbank tags for the given verb in a BIO format.
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional
We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
domain_identifier : `str`, (default = `None`)
A string denoting a sub-domain of the Ontonotes 5.0 dataset to use. If present, only
conll files under paths containing this domain identifier will be processed.
bert_model_name : `Optional[str]`, (default = `None`)
The BERT model to be wrapped. If you specify a bert_model here, then we will
assume you want to use BERT throughout; we will use the bert tokenizer,
and will expand your tags and verb indicators accordingly. If not,
the tokens will be indexed as normal with the token_indexers.
# Returns
A `Dataset` of `Instances` for Semantic Role Labelling.
"""
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
domain_identifier: str = None,
bert_model_name: str = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if token_indexers is not None:
self._token_indexers = token_indexers
elif bert_model_name is not None:
from allennlp.data.token_indexers import PretrainedTransformerIndexer
self._token_indexers = {"tokens": PretrainedTransformerIndexer(bert_model_name)}
else:
self._token_indexers = {"tokens": SingleIdTokenIndexer()}
self._domain_identifier = domain_identifier
if bert_model_name is not None:
self.bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name)
self.lowercase_input = "uncased" in bert_model_name
else:
self.bert_tokenizer = None
self.lowercase_input = False
def _wordpiece_tokenize_input(
self, tokens: List[str]
) -> Tuple[List[str], List[int], List[int]]:
"""
Convert a list of tokens to wordpiece tokens and offsets, as well as adding
BERT CLS and SEP tokens to the beginning and end of the sentence.
A slight oddity with this function is that it also returns the wordpiece offsets
corresponding to the _start_ of words as well as the end.
We need both of these offsets (or at least, it's easiest to use both), because we need
to convert the labels to tags using the end_offsets. However, when we are decoding a
BIO sequence inside the SRL model itself, it's important that we use the start_offsets,
because otherwise we might select an ill-formed BIO sequence from the BIO sequence on top of
wordpieces (this happens in the case that a word is split into multiple word pieces,
and then we take the last tag of the word, which might correspond to, e.g, I-V, which
would not be allowed as it is not preceded by a B tag).
For example:
`annotate` will be bert tokenized as ["anno", "##tate"].
If this is tagged as [B-V, I-V] as it should be, we need to select the
_first_ wordpiece label to be the label for the token, because otherwise
we may end up with invalid tag sequences (we cannot start a new tag with an I).
# Returns
wordpieces : `List[str]`
The BERT wordpieces from the words in the sentence.
end_offsets : `List[int]`
Indices into wordpieces such that `[wordpieces[i] for i in end_offsets]`
results in the end wordpiece of each word being chosen.
start_offsets : `List[int]`
Indices into wordpieces such that `[wordpieces[i] for i in start_offsets]`
results in the start wordpiece of each word being chosen.
"""
word_piece_tokens: List[str] = []
end_offsets = []
start_offsets = []
cumulative = 0
for token in tokens:
if self.lowercase_input:
token = token.lower()
word_pieces = self.bert_tokenizer.wordpiece_tokenizer.tokenize(token)
start_offsets.append(cumulative + 1)
cumulative += len(word_pieces)
end_offsets.append(cumulative)
word_piece_tokens.extend(word_pieces)
wordpieces = ["[CLS]"] + word_piece_tokens + ["[SEP]"]
return wordpieces, end_offsets, start_offsets
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
ontonotes_reader = Ontonotes()
logger.info("Reading SRL instances from dataset files at: %s", file_path)
if self._domain_identifier is not None:
logger.info(
"Filtering to only include file paths containing the %s domain",
self._domain_identifier,
)
for sentence in self._ontonotes_subset(
ontonotes_reader, file_path, self._domain_identifier
):
tokens = [Token(t) for t in sentence.words]
if not sentence.srl_frames:
# Sentence contains no predicates.
tags = ["O" for _ in tokens]
verb_label = [0 for _ in tokens]
yield self.text_to_instance(tokens, verb_label, tags)
else:
for (_, tags) in sentence.srl_frames:
verb_indicator = [1 if label[-2:] == "-V" else 0 for label in tags]
yield self.text_to_instance(tokens, verb_indicator, tags)
@staticmethod
def _ontonotes_subset(
ontonotes_reader: Ontonotes, file_path: str, domain_identifier: str
) -> Iterable[OntonotesSentence]:
"""
Iterates over the Ontonotes 5.0 dataset using an optional domain identifier.
If the domain identifier is present, only examples which contain the domain
identifier in the file path are yielded.
"""
for conll_file in ontonotes_reader.dataset_path_iterator(file_path):
if domain_identifier is None or f"/{domain_identifier}/" in conll_file:
yield from ontonotes_reader.sentence_iterator(conll_file)
def text_to_instance( # type: ignore
self, tokens: List[Token], verb_label: List[int], tags: List[str] = None
) -> Instance:
"""
We take `pre-tokenized` input here, along with a verb label. The verb label should be a
one-hot binary vector, the same length as the tokens, indicating the position of the verb
to find arguments for.
"""
metadata_dict: Dict[str, Any] = {}
if self.bert_tokenizer is not None:
wordpieces, offsets, start_offsets = self._wordpiece_tokenize_input(
[t.text for t in tokens]
)
new_verbs = _convert_verb_indices_to_wordpiece_indices(verb_label, offsets)
metadata_dict["offsets"] = start_offsets
# In order to override the indexing mechanism, we need to set the `text_id`
# attribute directly. This causes the indexing to use this id.
text_field = TextField(
[Token(t, text_id=self.bert_tokenizer.vocab[t]) for t in wordpieces],
token_indexers=self._token_indexers,
)
verb_indicator = SequenceLabelField(new_verbs, text_field)
else:
text_field = TextField(tokens, token_indexers=self._token_indexers)
verb_indicator = SequenceLabelField(verb_label, text_field)
fields: Dict[str, Field] = {}
fields["tokens"] = text_field
fields["verb_indicator"] = verb_indicator
if all(x == 0 for x in verb_label):
verb = None
verb_index = None
else:
verb_index = verb_label.index(1)
verb = tokens[verb_index].text
metadata_dict["words"] = [x.text for x in tokens]
metadata_dict["verb"] = verb
metadata_dict["verb_index"] = verb_index
if tags:
if self.bert_tokenizer is not None:
new_tags = _convert_tags_to_wordpiece_tags(tags, offsets)
fields["tags"] = SequenceLabelField(new_tags, text_field)
else:
fields["tags"] = SequenceLabelField(tags, text_field)
metadata_dict["gold_tags"] = tags
fields["metadata"] = MetadataField(metadata_dict)
return Instance(fields)
| allennlp-models-main | allennlp_models/structured_prediction/dataset_readers/srl.py |
from typing import Dict, List, Tuple
import logging
import os
# NLTK is so performance orientated (ha ha) that they have lazy imports. Why? Who knows.
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
from nltk.tree import Tree
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import (
TextField,
SpanField,
SequenceLabelField,
ListField,
MetadataField,
Field,
)
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.dataset_readers.dataset_utils.span_utils import enumerate_spans
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__)
PTB_PARENTHESES = {
"-LRB-": "(",
"-RRB-": ")",
"-LCB-": "{",
"-RCB-": "}",
"-LSB-": "[",
"-RSB-": "]",
}
@DatasetReader.register("ptb_trees")
class PennTreeBankConstituencySpanDatasetReader(DatasetReader):
"""
Reads constituency parses from the WSJ part of the Penn Tree Bank from the LDC.
This `DatasetReader` is designed for use with a span labelling model, so
it enumerates all possible spans in the sentence and returns them, along with gold
labels for the relevant spans present in a gold tree, if provided.
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Note that the `output` tags will always correspond to single token IDs based on how they
are pre-tokenised in the data file.
use_pos_tags : `bool`, optional, (default = `True`)
Whether or not the instance should contain gold POS tags
as a field.
convert_parentheses : `bool`, optional, (default = `False`)
Whether or not to convert special PTB parentheses tokens (e.g., "-LRB-")
to the corresponding parentheses tokens (i.e., "(").
label_namespace_prefix : `str`, optional, (default = `""`)
Prefix used for the label namespace. The `span_labels` will use
namespace `label_namespace_prefix + 'labels'`, and if using POS
tags their namespace is `label_namespace_prefix + pos_label_namespace`.
pos_label_namespace : `str`, optional, (default = `"pos"`)
The POS tag namespace is `label_namespace_prefix + pos_label_namespace`.
"""
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
use_pos_tags: bool = True,
convert_parentheses: bool = False,
label_namespace_prefix: str = "",
pos_label_namespace: str = "pos",
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._use_pos_tags = use_pos_tags
self._convert_parentheses = convert_parentheses
self._label_namespace_prefix = label_namespace_prefix
self._pos_label_namespace = pos_label_namespace
def _read(self, file_path):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
directory, filename = os.path.split(file_path)
logger.info("Reading instances from lines in file at: %s", file_path)
for parse in BracketParseCorpusReader(root=directory, fileids=[filename]).parsed_sents():
self._strip_functional_tags(parse)
# This is un-needed and clutters the label space.
# All the trees also contain a root S node.
if parse.label() == "VROOT" or parse.label() == "TOP":
parse = parse[0]
pos_tags = [x[1] for x in parse.pos()] if self._use_pos_tags else None
yield self.text_to_instance(parse.leaves(), pos_tags, parse)
def text_to_instance(
self, # type: ignore
tokens: List[str],
pos_tags: List[str] = None,
gold_tree: Tree = None,
) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
# Parameters
tokens : `List[str]`, required.
The tokens in a given sentence.
pos_tags : `List[str]`, optional, (default = `None`).
The POS tags for the words in the sentence.
gold_tree : `Tree`, optional (default = `None`).
The gold parse tree to create span labels from.
# Returns
An `Instance` containing the following fields:
tokens : `TextField`
The tokens in the sentence.
pos_tags : `SequenceLabelField`
The POS tags of the words in the sentence.
Only returned if `use_pos_tags` is `True`
spans : `ListField[SpanField]`
A ListField containing all possible subspans of the
sentence.
span_labels : `SequenceLabelField`, optional.
The constituency tags for each of the possible spans, with
respect to a gold parse tree. If a span is not contained
within the tree, a span will have a `NO-LABEL` label.
gold_tree : `MetadataField(Tree)`
The gold NLTK parse tree for use in evaluation.
"""
if self._convert_parentheses:
tokens = [PTB_PARENTHESES.get(token, token) for token in tokens]
text_field = TextField([Token(x) for x in tokens], token_indexers=self._token_indexers)
fields: Dict[str, Field] = {"tokens": text_field}
pos_namespace = self._label_namespace_prefix + self._pos_label_namespace
if self._use_pos_tags and pos_tags is not None:
pos_tag_field = SequenceLabelField(pos_tags, text_field, label_namespace=pos_namespace)
fields["pos_tags"] = pos_tag_field
elif self._use_pos_tags:
raise ConfigurationError(
"use_pos_tags was set to True but no gold pos"
" tags were passed to the dataset reader."
)
spans: List[Field] = []
gold_labels = []
if gold_tree is not None:
gold_spans: Dict[Tuple[int, int], str] = {}
self._get_gold_spans(gold_tree, 0, gold_spans)
else:
gold_spans = None
for start, end in enumerate_spans(tokens):
spans.append(SpanField(start, end, text_field))
if gold_spans is not None:
gold_labels.append(gold_spans.get((start, end), "NO-LABEL"))
metadata = {"tokens": tokens}
if gold_tree:
metadata["gold_tree"] = gold_tree
if self._use_pos_tags:
metadata["pos_tags"] = pos_tags
fields["metadata"] = MetadataField(metadata)
span_list_field: ListField = ListField(spans)
fields["spans"] = span_list_field
if gold_tree is not None:
fields["span_labels"] = SequenceLabelField(
gold_labels,
span_list_field,
label_namespace=self._label_namespace_prefix + "labels",
)
return Instance(fields)
def _strip_functional_tags(self, tree: Tree) -> None:
"""
Removes all functional tags from constituency labels in an NLTK tree.
We also strip off anything after a =, - or | character, because these
are functional tags which we don't want to use.
This modification is done in-place.
"""
clean_label = tree.label().split("=")[0].split("-")[0].split("|")[0]
tree.set_label(clean_label)
for child in tree:
if not isinstance(child[0], str):
self._strip_functional_tags(child)
def _get_gold_spans(
self, tree: Tree, index: int, typed_spans: Dict[Tuple[int, int], str]
) -> int:
"""
Recursively construct the gold spans from an nltk `Tree`.
Labels are the constituents, and in the case of nested constituents
with the same spans, labels are concatenated in parent-child order.
For example, `(S (NP (D the) (N man)))` would have an `S-NP` label
for the outer span, as it has both `S` and `NP` label.
Spans are inclusive.
TODO(Mark): If we encounter a gold nested labelling at test time
which we haven't encountered, we won't be able to run the model
at all.
# Parameters
tree : `Tree`, required.
An NLTK parse tree to extract spans from.
index : `int`, required.
The index of the current span in the sentence being considered.
typed_spans : `Dict[Tuple[int, int], str]`, required.
A dictionary mapping spans to span labels.
# Returns
typed_spans : `Dict[Tuple[int, int], str]`.
A dictionary mapping all subtree spans in the parse tree
to their constituency labels. POS tags are ignored.
"""
# NLTK leaves are strings.
if isinstance(tree[0], str):
# The "length" of a tree is defined by
# NLTK as the number of children.
# We don't actually want the spans for leaves, because
# their labels are POS tags. Instead, we just add the length
# of the word to the end index as we iterate through.
end = index + len(tree)
else:
# otherwise, the tree has children.
child_start = index
for child in tree:
# typed_spans is being updated inplace.
end = self._get_gold_spans(child, child_start, typed_spans)
child_start = end
# Set the end index of the current span to
# the last appended index - 1, as the span is inclusive.
span = (index, end - 1)
current_span_label = typed_spans.get(span)
if current_span_label is None:
# This span doesn't have nested labels, just
# use the current node's label.
typed_spans[span] = tree.label()
else:
# This span has already been added, so prepend
# this label (as we are traversing the tree from
# the bottom up).
typed_spans[span] = tree.label() + "-" + current_span_label
return end
| allennlp-models-main | allennlp_models/structured_prediction/dataset_readers/penn_tree_bank.py |
from typing import Dict, Any, List, Tuple
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
# POS tags have a unified colour.
NODE_TYPE_TO_STYLE = {}
NODE_TYPE_TO_STYLE["root"] = ["color5", "strong"]
NODE_TYPE_TO_STYLE["dep"] = ["color5", "strong"]
# Arguments
NODE_TYPE_TO_STYLE["nsubj"] = ["color1"]
NODE_TYPE_TO_STYLE["nsubjpass"] = ["color1"]
NODE_TYPE_TO_STYLE["csubj"] = ["color1"]
NODE_TYPE_TO_STYLE["csubjpass"] = ["color1"]
# Complements
NODE_TYPE_TO_STYLE["pobj"] = ["color2"]
NODE_TYPE_TO_STYLE["dobj"] = ["color2"]
NODE_TYPE_TO_STYLE["iobj"] = ["color2"]
NODE_TYPE_TO_STYLE["mark"] = ["color2"]
NODE_TYPE_TO_STYLE["pcomp"] = ["color2"]
NODE_TYPE_TO_STYLE["xcomp"] = ["color2"]
NODE_TYPE_TO_STYLE["ccomp"] = ["color2"]
NODE_TYPE_TO_STYLE["acomp"] = ["color2"]
# Modifiers
NODE_TYPE_TO_STYLE["aux"] = ["color3"]
NODE_TYPE_TO_STYLE["cop"] = ["color3"]
NODE_TYPE_TO_STYLE["det"] = ["color3"]
NODE_TYPE_TO_STYLE["conj"] = ["color3"]
NODE_TYPE_TO_STYLE["cc"] = ["color3"]
NODE_TYPE_TO_STYLE["prep"] = ["color3"]
NODE_TYPE_TO_STYLE["number"] = ["color3"]
NODE_TYPE_TO_STYLE["possesive"] = ["color3"]
NODE_TYPE_TO_STYLE["poss"] = ["color3"]
NODE_TYPE_TO_STYLE["discourse"] = ["color3"]
NODE_TYPE_TO_STYLE["expletive"] = ["color3"]
NODE_TYPE_TO_STYLE["prt"] = ["color3"]
NODE_TYPE_TO_STYLE["advcl"] = ["color3"]
NODE_TYPE_TO_STYLE["mod"] = ["color4"]
NODE_TYPE_TO_STYLE["amod"] = ["color4"]
NODE_TYPE_TO_STYLE["tmod"] = ["color4"]
NODE_TYPE_TO_STYLE["quantmod"] = ["color4"]
NODE_TYPE_TO_STYLE["npadvmod"] = ["color4"]
NODE_TYPE_TO_STYLE["infmod"] = ["color4"]
NODE_TYPE_TO_STYLE["advmod"] = ["color4"]
NODE_TYPE_TO_STYLE["appos"] = ["color4"]
NODE_TYPE_TO_STYLE["nn"] = ["color4"]
NODE_TYPE_TO_STYLE["neg"] = ["color0"]
NODE_TYPE_TO_STYLE["punct"] = ["color0"]
LINK_TO_POSITION = {}
# Put subjects on the left
LINK_TO_POSITION["nsubj"] = "left"
LINK_TO_POSITION["nsubjpass"] = "left"
LINK_TO_POSITION["csubj"] = "left"
LINK_TO_POSITION["csubjpass"] = "left"
# Put arguments and some clauses on the right
LINK_TO_POSITION["pobj"] = "right"
LINK_TO_POSITION["dobj"] = "right"
LINK_TO_POSITION["iobj"] = "right"
LINK_TO_POSITION["pcomp"] = "right"
LINK_TO_POSITION["xcomp"] = "right"
LINK_TO_POSITION["ccomp"] = "right"
LINK_TO_POSITION["acomp"] = "right"
# exist_ok has to be true until we remove this from the core library
@Predictor.register("biaffine_dependency_parser", exist_ok=True)
class BiaffineDependencyParserPredictor(Predictor):
"""
Predictor for the [`BiaffineDependencyParser`](../models/biaffine_dependency_parser.md) model.
"""
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
# TODO(Mark) Make the language configurable and based on a model attribute.
self._tokenizer = SpacyTokenizer(language=language, pos_tags=True)
def predict(self, sentence: str) -> JsonDict:
"""
Predict a dependency parse for the given sentence.
# Parameters
sentence The sentence to parse.
# Returns
A dictionary representation of the dependency tree.
"""
return self.predict_json({"sentence": sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"sentence": "..."}`.
"""
spacy_tokens = self._tokenizer.tokenize(json_dict["sentence"])
sentence_text = [token.text for token in spacy_tokens]
if self._dataset_reader.use_language_specific_pos: # type: ignore
# fine-grained part of speech
pos_tags = [token.tag_ for token in spacy_tokens]
else:
# coarse-grained part of speech (Universal Depdendencies format)
pos_tags = [token.pos_ for token in spacy_tokens]
return self._dataset_reader.text_to_instance(sentence_text, pos_tags)
def predict_instance(self, instance: Instance) -> JsonDict:
outputs = self._model.forward_on_instance(instance)
words = outputs["words"]
pos = outputs["pos"]
heads = outputs["predicted_heads"]
tags = outputs["predicted_dependencies"]
outputs["hierplane_tree"] = self._build_hierplane_tree(words, heads, tags, pos)
return sanitize(outputs)
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
outputs = self._model.forward_on_instances(instances)
for output in outputs:
words = output["words"]
pos = output["pos"]
heads = output["predicted_heads"]
tags = output["predicted_dependencies"]
output["hierplane_tree"] = self._build_hierplane_tree(words, heads, tags, pos)
return sanitize(outputs)
@staticmethod
def _build_hierplane_tree(
words: List[str], heads: List[int], tags: List[str], pos: List[str]
) -> Dict[str, Any]:
"""
# Returns
A JSON dictionary render-able by Hierplane for the given tree.
"""
word_index_to_cumulative_indices: Dict[int, Tuple[int, int]] = {}
cumulative_index = 0
for i, word in enumerate(words):
word_length = len(word) + 1
word_index_to_cumulative_indices[i] = (cumulative_index, cumulative_index + word_length)
cumulative_index += word_length
def node_constuctor(index: int):
children = []
for next_index, child in enumerate(heads):
if child == index + 1:
children.append(node_constuctor(next_index))
# These are the icons which show up in the bottom right
# corner of the node.
attributes = [pos[index]]
start, end = word_index_to_cumulative_indices[index]
hierplane_node = {
"word": words[index],
# The type of the node - all nodes with the same
# type have a unified colour.
"nodeType": tags[index],
# Attributes of the node.
"attributes": attributes,
# The link between the node and it's parent.
"link": tags[index],
"spans": [{"start": start, "end": end}],
}
if children:
hierplane_node["children"] = children
return hierplane_node
# We are guaranteed that there is a single word pointing to
# the root index, so we can find it just by searching for 0 in the list.
root_index = heads.index(0)
hierplane_tree = {
"text": " ".join(words),
"root": node_constuctor(root_index),
"nodeTypeToStyle": NODE_TYPE_TO_STYLE,
"linkToPosition": LINK_TO_POSITION,
}
return hierplane_tree
| allennlp-models-main | allennlp_models/structured_prediction/predictors/biaffine_dependency_parser.py |
# Copied from spaCy version 2.x
from spacy.symbols import (
POS,
PUNCT,
SYM,
ADJ,
CCONJ,
NUM,
DET,
ADV,
ADP,
X,
VERB,
NOUN,
PROPN,
PART,
INTJ,
SPACE,
PRON,
)
TAG_MAP = {
".": {POS: PUNCT, "PunctType": "peri"},
",": {POS: PUNCT, "PunctType": "comm"},
"-LRB-": {POS: PUNCT, "PunctType": "brck", "PunctSide": "ini"},
"-RRB-": {POS: PUNCT, "PunctType": "brck", "PunctSide": "fin"},
"``": {POS: PUNCT, "PunctType": "quot", "PunctSide": "ini"},
'""': {POS: PUNCT, "PunctType": "quot", "PunctSide": "fin"},
"''": {POS: PUNCT, "PunctType": "quot", "PunctSide": "fin"},
":": {POS: PUNCT},
"$": {POS: SYM},
"#": {POS: SYM},
"AFX": {POS: ADJ, "Hyph": "yes"},
"CC": {POS: CCONJ, "ConjType": "comp"},
"CD": {POS: NUM, "NumType": "card"},
"DT": {POS: DET},
"EX": {POS: PRON, "AdvType": "ex"},
"FW": {POS: X, "Foreign": "yes"},
"HYPH": {POS: PUNCT, "PunctType": "dash"},
"IN": {POS: ADP},
"JJ": {POS: ADJ, "Degree": "pos"},
"JJR": {POS: ADJ, "Degree": "comp"},
"JJS": {POS: ADJ, "Degree": "sup"},
"LS": {POS: X, "NumType": "ord"},
"MD": {POS: VERB, "VerbType": "mod"},
"NIL": {POS: X},
"NN": {POS: NOUN, "Number": "sing"},
"NNP": {POS: PROPN, "NounType": "prop", "Number": "sing"},
"NNPS": {POS: PROPN, "NounType": "prop", "Number": "plur"},
"NNS": {POS: NOUN, "Number": "plur"},
"PDT": {POS: DET},
"POS": {POS: PART, "Poss": "yes"},
"PRP": {POS: PRON, "PronType": "prs"},
"PRP$": {POS: DET, "PronType": "prs", "Poss": "yes"},
"RB": {POS: ADV, "Degree": "pos"},
"RBR": {POS: ADV, "Degree": "comp"},
"RBS": {POS: ADV, "Degree": "sup"},
"RP": {POS: ADP},
"SP": {POS: SPACE},
"SYM": {POS: SYM},
"TO": {POS: PART, "PartType": "inf", "VerbForm": "inf"},
"UH": {POS: INTJ},
"VB": {POS: VERB, "VerbForm": "inf"},
"VBD": {POS: VERB, "VerbForm": "fin", "Tense": "past"},
"VBG": {POS: VERB, "VerbForm": "part", "Tense": "pres", "Aspect": "prog"},
"VBN": {POS: VERB, "VerbForm": "part", "Tense": "past", "Aspect": "perf"},
"VBP": {POS: VERB, "VerbForm": "fin", "Tense": "pres"},
"VBZ": {
POS: VERB,
"VerbForm": "fin",
"Tense": "pres",
"Number": "sing",
"Person": "three",
},
"WDT": {POS: DET},
"WP": {POS: PRON},
"WP$": {POS: DET, "Poss": "yes"},
"WRB": {POS: ADV},
"ADD": {POS: X},
"NFP": {POS: PUNCT},
"GW": {POS: X},
"XX": {POS: X},
"BES": {POS: VERB},
"HVS": {POS: VERB},
"_SP": {POS: SPACE},
}
| allennlp-models-main | allennlp_models/structured_prediction/predictors/util.py |
from allennlp_models.structured_prediction.predictors.biaffine_dependency_parser import (
BiaffineDependencyParserPredictor,
)
from allennlp_models.structured_prediction.predictors.constituency_parser import (
ConstituencyParserPredictor,
)
from allennlp_models.structured_prediction.predictors.openie import OpenIePredictor
from allennlp_models.structured_prediction.predictors.srl import SemanticRoleLabelerPredictor
| allennlp-models-main | allennlp_models/structured_prediction/predictors/__init__.py |
from typing import List
from nltk import Tree
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from .util import TAG_MAP
# Make the links to POS tag nodes render as "pos",
# to distinguish them from constituency tags. The
# actual tag is still visible within the node.
LINK_TO_LABEL = {x: "pos" for x in TAG_MAP}
# POS tags have a unified colour.
NODE_TYPE_TO_STYLE = {x: ["color0"] for x in TAG_MAP}
# Verb and Noun phrases get their own colour.
NODE_TYPE_TO_STYLE["NP"] = ["color1"]
NODE_TYPE_TO_STYLE["NX"] = ["color1"]
NODE_TYPE_TO_STYLE["QP"] = ["color1"]
NODE_TYPE_TO_STYLE["NAC"] = ["color1"]
NODE_TYPE_TO_STYLE["VP"] = ["color2"]
# Clause level fragments
NODE_TYPE_TO_STYLE["S"] = ["color3"]
NODE_TYPE_TO_STYLE["SQ"] = ["color3"]
NODE_TYPE_TO_STYLE["SBAR"] = ["color3"]
NODE_TYPE_TO_STYLE["SBARQ"] = ["color3"]
NODE_TYPE_TO_STYLE["SINQ"] = ["color3"]
NODE_TYPE_TO_STYLE["FRAG"] = ["color3"]
NODE_TYPE_TO_STYLE["X"] = ["color3"]
# Wh-phrases.
NODE_TYPE_TO_STYLE["WHADVP"] = ["color4"]
NODE_TYPE_TO_STYLE["WHADJP"] = ["color4"]
NODE_TYPE_TO_STYLE["WHNP"] = ["color4"]
NODE_TYPE_TO_STYLE["WHPP"] = ["color4"]
# Prepositional Phrases get their own colour because
# they are linguistically interesting.
NODE_TYPE_TO_STYLE["PP"] = ["color6"]
# Everything else.
NODE_TYPE_TO_STYLE["ADJP"] = ["color5"]
NODE_TYPE_TO_STYLE["ADVP"] = ["color5"]
NODE_TYPE_TO_STYLE["CONJP"] = ["color5"]
NODE_TYPE_TO_STYLE["INTJ"] = ["color5"]
NODE_TYPE_TO_STYLE["LST"] = ["color5", "seq"]
NODE_TYPE_TO_STYLE["PRN"] = ["color5"]
NODE_TYPE_TO_STYLE["PRT"] = ["color5"]
NODE_TYPE_TO_STYLE["RRC"] = ["color5"]
NODE_TYPE_TO_STYLE["UCP"] = ["color5"]
@Predictor.register("constituency_parser")
class ConstituencyParserPredictor(Predictor):
"""
Predictor for the [`SpanConstituencyParser`](../models/constituency_parser.md) model.
"""
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._tokenizer = SpacyTokenizer(language=language, pos_tags=True)
def predict(self, sentence: str) -> JsonDict:
"""
Predict a constituency parse for the given sentence.
# Parameters
sentence : `str`
The sentence to parse.
# Returns
A dictionary representation of the constituency tree.
"""
return self.predict_json({"sentence": sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"sentence": "..."}`.
"""
spacy_tokens = self._tokenizer.tokenize(json_dict["sentence"])
sentence_text = [token.text for token in spacy_tokens]
pos_tags = [token.tag_ for token in spacy_tokens]
return self._dataset_reader.text_to_instance(sentence_text, pos_tags)
def predict_instance(self, instance: Instance) -> JsonDict:
outputs = self._model.forward_on_instance(instance)
# format the NLTK tree as a string on a single line.
tree = outputs.pop("trees")
outputs["hierplane_tree"] = self._build_hierplane_tree(tree, 0, is_root=True)
outputs["trees"] = tree.pformat(margin=1000000)
return sanitize(outputs)
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
outputs = self._model.forward_on_instances(instances)
for output in outputs:
# format the NLTK tree as a string on a single line.
tree = output.pop("trees")
output["hierplane_tree"] = self._build_hierplane_tree(tree, 0, is_root=True)
output["trees"] = tree.pformat(margin=1000000)
return sanitize(outputs)
def _build_hierplane_tree(self, tree: Tree, index: int, is_root: bool) -> JsonDict:
"""
Recursively builds a JSON dictionary from an NLTK `Tree` suitable for
rendering trees using the `Hierplane library<https://allenai.github.io/hierplane/>`.
# Parameters
tree : `Tree`, required.
The tree to convert into Hierplane JSON.
index : `int`, required.
The character index into the tree, used for creating spans.
is_root : `bool`
An indicator which allows us to add the outer Hierplane JSON which
is required for rendering.
# Returns
A JSON dictionary render-able by Hierplane for the given tree.
"""
children = []
for child in tree:
if isinstance(child, Tree):
# If the child is a tree, it has children,
# as NLTK leaves are just strings.
children.append(self._build_hierplane_tree(child, index, is_root=False))
else:
# We're at a leaf, so add the length of
# the word to the character index.
index += len(child)
label = tree.label()
span = " ".join(tree.leaves())
hierplane_node = {"word": span, "nodeType": label, "attributes": [label], "link": label}
if children:
hierplane_node["children"] = children
# TODO(Mark): Figure out how to span highlighting to the leaves.
if is_root:
hierplane_node = {
"linkNameToLabel": LINK_TO_LABEL,
"nodeTypeToStyle": NODE_TYPE_TO_STYLE,
"text": span,
"root": hierplane_node,
}
return hierplane_node
| allennlp-models-main | allennlp_models/structured_prediction/predictors/constituency_parser.py |
from typing import List, Dict
import numpy
from spacy.tokens import Doc
from allennlp.common.util import JsonDict, sanitize, group_by_count
from allennlp.data import DatasetReader, Instance
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
@Predictor.register("semantic_role_labeling")
class SemanticRoleLabelerPredictor(Predictor):
"""
Predictor for the [`SemanticRoleLabeler`](../models/semantic_role_labeler.md) model.
"""
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._language = language
self._tokenizer = SpacyTokenizer(language=language, pos_tags=True)
def predict(self, sentence: str) -> JsonDict:
"""
Predicts the semantic roles of the supplied sentence and returns a dictionary
with the results.
```
{"words": [...],
"verbs": [
{"verb": "...", "description": "...", "tags": [...]},
...
{"verb": "...", "description": "...", "tags": [...]},
]}
```
# Parameters
sentence, `str`
The sentence to parse via semantic role labeling.
# Returns
A dictionary representation of the semantic roles in the sentence.
"""
return self.predict_json({"sentence": sentence})
def predict_tokenized(self, tokenized_sentence: List[str]) -> JsonDict:
"""
Predicts the semantic roles of the supplied sentence tokens and returns a dictionary
with the results.
# Parameters
tokenized_sentence, `List[str]`
The sentence tokens to parse via semantic role labeling.
# Returns
A dictionary representation of the semantic roles in the sentence.
"""
spacy_doc = Doc(self._tokenizer.spacy.vocab, words=tokenized_sentence)
for pipe in filter(None, self._tokenizer.spacy.pipeline):
pipe[1](spacy_doc)
tokens = [token for token in spacy_doc]
instances = self.tokens_to_instances(tokens)
if not instances:
return sanitize({"verbs": [], "words": tokens})
return self.predict_instances(instances)
@staticmethod
def make_srl_string(words: List[str], tags: List[str]) -> str:
frame = []
chunk = []
for (token, tag) in zip(words, tags):
if tag.startswith("I-"):
chunk.append(token)
else:
if chunk:
frame.append("[" + " ".join(chunk) + "]")
chunk = []
if tag.startswith("B-"):
chunk.append(tag[2:] + ": " + token)
elif tag == "O":
frame.append(token)
if chunk:
frame.append("[" + " ".join(chunk) + "]")
return " ".join(frame)
def _json_to_instance(self, json_dict: JsonDict):
raise NotImplementedError("The SRL model uses a different API for creating instances.")
def tokens_to_instances(self, tokens):
words = [token.text for token in tokens]
instances: List[Instance] = []
for i, word in enumerate(tokens):
# We treat auxiliaries as verbs only for English for now to be safe. We didn't want to
# hypothetically break the predictor for unknown number of other languages where
# auxiliaries can't be treated this way.
if word.pos_ == "VERB" or (self._language.startswith("en_") and word.pos_ == "AUX"):
verb_labels = [0 for _ in words]
verb_labels[i] = 1
instance = self._dataset_reader.text_to_instance(tokens, verb_labels)
instances.append(instance)
return instances
def _sentence_to_srl_instances(self, json_dict: JsonDict) -> List[Instance]:
"""
The SRL model has a slightly different API from other models, as the model is run
forward for every verb in the sentence. This means that for a single sentence, we need
to generate a `List[Instance]`, where the length of this list corresponds to the number
of verbs in the sentence. Additionally, all of these verbs share the same return dictionary
after being passed through the model (as really we care about all the frames of the sentence
together, rather than separately).
# Parameters
json_dict : `JsonDict`, required.
JSON that looks like `{"sentence": "..."}`.
# Returns
instances : `List[Instance]`
One instance per verb.
"""
sentence = json_dict["sentence"]
tokens = self._tokenizer.tokenize(sentence)
return self.tokens_to_instances(tokens)
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
"""
Expects JSON that looks like `[{"sentence": "..."}, {"sentence": "..."}, ...]`
and returns JSON that looks like
```
[
{"words": [...],
"verbs": [
{"verb": "...", "description": "...", "tags": [...]},
...
{"verb": "...", "description": "...", "tags": [...]},
]},
{"words": [...],
"verbs": [
{"verb": "...", "description": "...", "tags": [...]},
...
{"verb": "...", "description": "...", "tags": [...]},
]}
]
```
"""
# For SRL, we have more instances than sentences, but the user specified
# a batch size with respect to the number of sentences passed, so we respect
# that here by taking the batch size which we use to be the number of sentences
# we are given.
batch_size = len(inputs)
instances_per_sentence = [self._sentence_to_srl_instances(json) for json in inputs]
flattened_instances = [
instance
for sentence_instances in instances_per_sentence
for instance in sentence_instances
]
if not flattened_instances:
return sanitize(
[{"verbs": [], "words": self._tokenizer.tokenize(x["sentence"])} for x in inputs]
)
# Make the instances into batches and check the last batch for
# padded elements as the number of instances might not be perfectly
# divisible by the batch size.
batched_instances = group_by_count(flattened_instances, batch_size, None)
batched_instances[-1] = [
instance for instance in batched_instances[-1] if instance is not None
]
# Run the model on the batches.
outputs: List[Dict[str, numpy.ndarray]] = []
for batch in batched_instances:
outputs.extend(self._model.forward_on_instances(batch))
verbs_per_sentence = [len(sent) for sent in instances_per_sentence]
return_dicts: List[JsonDict] = [{"verbs": []} for x in inputs]
output_index = 0
for sentence_index, verb_count in enumerate(verbs_per_sentence):
if verb_count == 0:
# We didn't run any predictions for sentences with no verbs,
# so we don't have a way to extract the original sentence.
# Here we just tokenize the input again.
original_text = self._tokenizer.tokenize(inputs[sentence_index]["sentence"])
return_dicts[sentence_index]["words"] = original_text
continue
for _ in range(verb_count):
output = outputs[output_index]
words = output["words"]
tags = output["tags"]
description = self.make_srl_string(words, tags)
return_dicts[sentence_index]["words"] = words
return_dicts[sentence_index]["verbs"].append(
{"verb": output["verb"], "description": description, "tags": tags}
)
output_index += 1
return sanitize(return_dicts)
def predict_instances(self, instances: List[Instance]) -> JsonDict:
outputs = self._model.forward_on_instances(instances)
results = {"verbs": [], "words": outputs[0]["words"]}
for output in outputs:
tags = output["tags"]
description = self.make_srl_string(output["words"], tags)
results["verbs"].append(
{"verb": output["verb"], "description": description, "tags": tags}
)
return sanitize(results)
def predict_json(self, inputs: JsonDict) -> JsonDict:
"""
Expects JSON that looks like `{"sentence": "..."}`
and returns JSON that looks like
```
{"words": [...],
"verbs": [
{"verb": "...", "description": "...", "tags": [...]},
...
{"verb": "...", "description": "...", "tags": [...]},
]}
```
"""
instances = self._sentence_to_srl_instances(inputs)
if not instances:
return sanitize({"verbs": [], "words": self._tokenizer.tokenize(inputs["sentence"])})
return self.predict_instances(instances)
| allennlp-models-main | allennlp_models/structured_prediction/predictors/srl.py |
from typing import List, Dict
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import DatasetReader, Instance
from allennlp.data.tokenizers import SpacyTokenizer
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
from allennlp.data.tokenizers import Token
def join_mwp(tags: List[str]) -> List[str]:
"""
Join multi-word predicates to a single
predicate ('V') token.
"""
ret = []
verb_flag = False
for tag in tags:
if "V" in tag:
# Create a continuous 'V' BIO span
prefix, _ = tag.split("-", 1)
if verb_flag:
# Continue a verb label across the different predicate parts
prefix = "I"
ret.append(f"{prefix}-V")
verb_flag = True
else:
ret.append(tag)
verb_flag = False
return ret
def make_oie_string(tokens: List[Token], tags: List[str]) -> str:
"""
Converts a list of model outputs (i.e., a list of lists of bio tags, each
pertaining to a single word), returns an inline bracket representation of
the prediction.
"""
frame = []
chunk = []
words = [token.text for token in tokens]
for (token, tag) in zip(words, tags):
if tag.startswith("I-"):
chunk.append(token)
else:
if chunk:
frame.append("[" + " ".join(chunk) + "]")
chunk = []
if tag.startswith("B-"):
chunk.append(tag[2:] + ": " + token)
elif tag == "O":
frame.append(token)
if chunk:
frame.append("[" + " ".join(chunk) + "]")
return " ".join(frame)
def get_predicate_indices(tags: List[str]) -> List[int]:
"""
Return the word indices of a predicate in BIO tags.
"""
return [ind for ind, tag in enumerate(tags) if "V" in tag]
def get_predicate_text(sent_tokens: List[Token], tags: List[str]) -> str:
"""
Get the predicate in this prediction.
"""
return " ".join([sent_tokens[pred_id].text for pred_id in get_predicate_indices(tags)])
def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool:
"""
Tests whether the predicate in BIO tags1 overlap
with those of tags2.
"""
# Get predicate word indices from both predictions
pred_ind1 = get_predicate_indices(tags1)
pred_ind2 = get_predicate_indices(tags2)
# Return if pred_ind1 pred_ind2 overlap
return any(set.intersection(set(pred_ind1), set(pred_ind2)))
def get_coherent_next_tag(prev_label: str, cur_label: str) -> str:
"""
Generate a coherent tag, given previous tag and current label.
"""
if cur_label == "O":
# Don't need to add prefix to an "O" label
return "O"
if prev_label == cur_label:
return f"I-{cur_label}"
else:
return f"B-{cur_label}"
def merge_overlapping_predictions(tags1: List[str], tags2: List[str]) -> List[str]:
"""
Merge two predictions into one. Assumes the predicate in tags1 overlap with
the predicate of tags2.
"""
ret_sequence = []
prev_label = "O"
# Build a coherent sequence out of two
# spans which predicates' overlap
for tag1, tag2 in zip(tags1, tags2):
label1 = tag1.split("-", 1)[-1]
label2 = tag2.split("-", 1)[-1]
if (label1 == "V") or (label2 == "V"):
# Construct maximal predicate length -
# add predicate tag if any of the sequence predict it
cur_label = "V"
# Else - prefer an argument over 'O' label
elif label1 != "O":
cur_label = label1
else:
cur_label = label2
# Append cur tag to the returned sequence
cur_tag = get_coherent_next_tag(prev_label, cur_label)
prev_label = cur_label
ret_sequence.append(cur_tag)
return ret_sequence
def consolidate_predictions(
outputs: List[List[str]], sent_tokens: List[Token]
) -> Dict[str, List[str]]:
"""
Identify that certain predicates are part of a multiword predicate
(e.g., "decided to run") in which case, we don't need to return
the embedded predicate ("run").
"""
pred_dict: Dict[str, List[str]] = {}
merged_outputs = [join_mwp(output) for output in outputs]
predicate_texts = [get_predicate_text(sent_tokens, tags) for tags in merged_outputs]
for pred1_text, tags1 in zip(predicate_texts, merged_outputs):
# A flag indicating whether to add tags1 to predictions
add_to_prediction = True
# Check if this predicate overlaps another predicate
for pred2_text, tags2 in pred_dict.items():
if predicates_overlap(tags1, tags2):
# tags1 overlaps tags2
pred_dict[pred2_text] = merge_overlapping_predictions(tags1, tags2)
add_to_prediction = False
# This predicate doesn't overlap - add as a new predicate
if add_to_prediction:
pred_dict[pred1_text] = tags1
return pred_dict
def sanitize_label(label: str) -> str:
"""
Sanitize a BIO label - this deals with OIE
labels sometimes having some noise, as parentheses.
"""
if "-" in label:
prefix, suffix = label.split("-", 1)
suffix = suffix.split("(")[-1]
return f"{prefix}-{suffix}"
else:
return label
@Predictor.register("open_information_extraction")
class OpenIePredictor(Predictor):
"""
Predictor for the [`SemanticRolelabeler`](../models/semantic_role_labeler.md) model
(in its Open Information variant).
Used by online demo and for prediction on an input file using command line.
"""
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._language = language
self._tokenizer = SpacyTokenizer(language=language, pos_tags=True)
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"sentence": "...", "predicate_index": "..."}`.
Assumes sentence is tokenized, and that predicate_index points to a specific
predicate (word index) within the sentence, for which to produce Open IE extractions.
"""
tokens = json_dict["sentence"]
predicate_index = int(json_dict["predicate_index"])
verb_labels = [0 for _ in tokens]
verb_labels[predicate_index] = 1
return self._dataset_reader.text_to_instance(tokens, verb_labels)
def predict_json(self, inputs: JsonDict) -> JsonDict:
"""
Create instance(s) after predicting the format. One sentence containing multiple verbs
will lead to multiple instances.
Expects JSON that looks like `{"sentence": "..."}`
Returns a JSON that looks like:
```
{"tokens": [...],
"tag_spans": [{"ARG0": "...",
"V": "...",
"ARG1": "...",
...}]}
```
"""
sent_tokens = self._tokenizer.tokenize(inputs["sentence"])
# Find all verbs in the input sentence
pred_ids = [
i
for (i, t) in enumerate(sent_tokens)
if t.pos_ == "VERB" or (self._language.startswith("en_") and t.pos_ == "AUX")
]
# Create instances
instances = [
self._json_to_instance({"sentence": sent_tokens, "predicate_index": pred_id})
for pred_id in pred_ids
]
# Run model
outputs = [
[sanitize_label(label) for label in self._model.forward_on_instance(instance)["tags"]]
for instance in instances
]
# Consolidate predictions
pred_dict = consolidate_predictions(outputs, sent_tokens)
# Build and return output dictionary
results = {"verbs": [], "words": sent_tokens}
for tags in pred_dict.values():
# Join multi-word predicates
tags = join_mwp(tags)
# Create description text
description = make_oie_string(sent_tokens, tags)
# Add a predicate prediction to the return dictionary.
results["verbs"].append(
{
"verb": get_predicate_text(sent_tokens, tags),
"description": description,
"tags": tags,
}
)
return sanitize(results)
| allennlp-models-main | allennlp_models/structured_prediction/predictors/openie.py |
from typing import Dict, Tuple, Any, List
import logging
import copy
import torch
import torch.nn.functional as F
from torch.nn.modules import Dropout
import numpy
from allennlp.common.checks import check_dimensions_match, ConfigurationError
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder, Embedding, InputVariationalDropout
from allennlp.modules.matrix_attention.bilinear_matrix_attention import BilinearMatrixAttention
from allennlp.modules import FeedForward
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, Activation
from allennlp.nn.util import get_text_field_mask, get_range_vector
from allennlp.nn.util import (
get_device_of,
masked_log_softmax,
get_lengths_from_binary_sequence_mask,
)
from allennlp.nn.chu_liu_edmonds import decode_mst
from allennlp.training.metrics import AttachmentScores
logger = logging.getLogger(__name__)
POS_TO_IGNORE = {"`", "''", ":", ",", ".", "PU", "PUNCT", "SYM"}
@Model.register("biaffine_parser")
class BiaffineDependencyParser(Model):
"""
This dependency parser follows the model of
[Deep Biaffine Attention for Neural Dependency Parsing (Dozat and Manning, 2016)]
(https://arxiv.org/abs/1611.01734) .
Word representations are generated using a bidirectional LSTM,
followed by separate biaffine classifiers for pairs of words,
predicting whether a directed arc exists between the two words
and the dependency label the arc should have. Decoding can either
be done greedily, or the optimal Minimum Spanning Tree can be
decoded using Edmond's algorithm by viewing the dependency tree as
a MST on a fully connected graph, where nodes are words and edges
are scored dependency arcs.
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the `tokens` `TextField` we get as input to the model.
encoder : `Seq2SeqEncoder`
The encoder (with its own internal stacking) that we will use to generate representations
of tokens.
tag_representation_dim : `int`, required.
The dimension of the MLPs used for dependency tag prediction.
arc_representation_dim : `int`, required.
The dimension of the MLPs used for head arc prediction.
tag_feedforward : `FeedForward`, optional, (default = `None`).
The feedforward network used to produce tag representations.
By default, a 1 layer feedforward network with an elu activation is used.
arc_feedforward : `FeedForward`, optional, (default = `None`).
The feedforward network used to produce arc representations.
By default, a 1 layer feedforward network with an elu activation is used.
pos_tag_embedding : `Embedding`, optional.
Used to embed the `pos_tags` `SequenceLabelField` we get as input to the model.
use_mst_decoding_for_validation : `bool`, optional (default = `True`).
Whether to use Edmond's algorithm to find the optimal minimum spanning tree during validation.
If false, decoding is greedy.
dropout : `float`, optional, (default = `0.0`)
The variational dropout applied to the output of the encoder and MLP layers.
input_dropout : `float`, optional, (default = `0.0`)
The dropout applied to the embedded text input.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
tag_representation_dim: int,
arc_representation_dim: int,
tag_feedforward: FeedForward = None,
arc_feedforward: FeedForward = None,
pos_tag_embedding: Embedding = None,
use_mst_decoding_for_validation: bool = True,
dropout: float = 0.0,
input_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.text_field_embedder = text_field_embedder
self.encoder = encoder
encoder_dim = encoder.get_output_dim()
self.head_arc_feedforward = arc_feedforward or FeedForward(
encoder_dim, 1, arc_representation_dim, Activation.by_name("elu")()
)
self.child_arc_feedforward = copy.deepcopy(self.head_arc_feedforward)
self.arc_attention = BilinearMatrixAttention(
arc_representation_dim, arc_representation_dim, use_input_biases=True
)
num_labels = self.vocab.get_vocab_size("head_tags")
self.head_tag_feedforward = tag_feedforward or FeedForward(
encoder_dim, 1, tag_representation_dim, Activation.by_name("elu")()
)
self.child_tag_feedforward = copy.deepcopy(self.head_tag_feedforward)
self.tag_bilinear = torch.nn.modules.Bilinear(
tag_representation_dim, tag_representation_dim, num_labels
)
self._pos_tag_embedding = pos_tag_embedding or None
self._dropout = InputVariationalDropout(dropout)
self._input_dropout = Dropout(input_dropout)
self._head_sentinel = torch.nn.Parameter(torch.randn([1, 1, encoder.get_output_dim()]))
representation_dim = text_field_embedder.get_output_dim()
if pos_tag_embedding is not None:
representation_dim += pos_tag_embedding.get_output_dim()
check_dimensions_match(
representation_dim,
encoder.get_input_dim(),
"text field embedding dim",
"encoder input dim",
)
check_dimensions_match(
tag_representation_dim,
self.head_tag_feedforward.get_output_dim(),
"tag representation dim",
"tag feedforward output dim",
)
check_dimensions_match(
arc_representation_dim,
self.head_arc_feedforward.get_output_dim(),
"arc representation dim",
"arc feedforward output dim",
)
self.use_mst_decoding_for_validation = use_mst_decoding_for_validation
tags = self.vocab.get_token_to_index_vocabulary("pos")
punctuation_tag_indices = {
tag: index for tag, index in tags.items() if tag in POS_TO_IGNORE
}
self._pos_to_ignore = set(punctuation_tag_indices.values())
logger.info(
f"Found POS tags corresponding to the following punctuation : {punctuation_tag_indices}. "
"Ignoring words with these POS tags for evaluation."
)
self._attachment_scores = AttachmentScores()
initializer(self)
def forward(
self, # type: ignore
words: TextFieldTensors,
pos_tags: torch.LongTensor,
metadata: List[Dict[str, Any]],
head_tags: torch.LongTensor = None,
head_indices: torch.LongTensor = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
words : `TextFieldTensors`, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`
tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens":
Tensor(batch_size, sequence_length)}`. This dictionary will have the same keys as were used
for the `TokenIndexers` when you created the `TextField` representing your
sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,
which knows how to combine different word representations into a single vector per
token in your input.
pos_tags : `torch.LongTensor`, required
The output of a `SequenceLabelField` containing POS tags.
POS tags are required regardless of whether they are used in the model,
because they are used to filter the evaluation metric to only consider
heads of words which are not punctuation.
metadata : `List[Dict[str, Any]]`, optional (default=`None`)
A dictionary of metadata for each batch element which has keys:
words : `List[str]`, required.
The tokens in the original sentence.
pos : `List[str]`, required.
The dependencies POS tags for each word.
head_tags : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer gold class labels for the arcs
in the dependency parse. Has shape `(batch_size, sequence_length)`.
head_indices : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer indices denoting the parent of every
word in the dependency parse. Has shape `(batch_size, sequence_length)`.
# Returns
An output dictionary consisting of:
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
arc_loss : `torch.FloatTensor`
The loss contribution from the unlabeled arcs.
loss : `torch.FloatTensor`, optional
The loss contribution from predicting the dependency
tags for the gold arcs.
heads : `torch.FloatTensor`
The predicted head indices for each word. A tensor
of shape (batch_size, sequence_length).
head_types : `torch.FloatTensor`
The predicted head types for each arc. A tensor
of shape (batch_size, sequence_length).
mask : `torch.BoolTensor`
A mask denoting the padded elements in the batch.
"""
embedded_text_input = self.text_field_embedder(words)
if pos_tags is not None and self._pos_tag_embedding is not None:
embedded_pos_tags = self._pos_tag_embedding(pos_tags)
embedded_text_input = torch.cat([embedded_text_input, embedded_pos_tags], -1)
elif self._pos_tag_embedding is not None:
raise ConfigurationError("Model uses a POS embedding, but no POS tags were passed.")
mask = get_text_field_mask(words)
predicted_heads, predicted_head_tags, mask, arc_nll, tag_nll = self._parse(
embedded_text_input, mask, head_tags, head_indices
)
loss = arc_nll + tag_nll
if head_indices is not None and head_tags is not None:
evaluation_mask = self._get_mask_for_eval(mask[:, 1:], pos_tags)
# We calculate attachment scores for the whole sentence
# but excluding the symbolic ROOT token at the start,
# which is why we start from the second element in the sequence.
self._attachment_scores(
predicted_heads[:, 1:],
predicted_head_tags[:, 1:],
head_indices,
head_tags,
evaluation_mask,
)
output_dict = {
"heads": predicted_heads,
"head_tags": predicted_head_tags,
"arc_loss": arc_nll,
"tag_loss": tag_nll,
"loss": loss,
"mask": mask,
"words": [meta["words"] for meta in metadata],
"pos": [meta["pos"] for meta in metadata],
}
return output_dict
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
head_tags = output_dict.pop("head_tags").cpu().detach().numpy()
heads = output_dict.pop("heads").cpu().detach().numpy()
mask = output_dict.pop("mask")
lengths = get_lengths_from_binary_sequence_mask(mask)
head_tag_labels = []
head_indices = []
for instance_heads, instance_tags, length in zip(heads, head_tags, lengths):
instance_heads = list(instance_heads[1:length])
instance_tags = instance_tags[1:length]
labels = [
self.vocab.get_token_from_index(label, "head_tags") for label in instance_tags
]
head_tag_labels.append(labels)
head_indices.append(instance_heads)
output_dict["predicted_dependencies"] = head_tag_labels
output_dict["predicted_heads"] = head_indices
return output_dict
def _parse(
self,
embedded_text_input: torch.Tensor,
mask: torch.BoolTensor,
head_tags: torch.LongTensor = None,
head_indices: torch.LongTensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
embedded_text_input = self._input_dropout(embedded_text_input)
encoded_text = self.encoder(embedded_text_input, mask)
batch_size, _, encoding_dim = encoded_text.size()
head_sentinel = self._head_sentinel.expand(batch_size, 1, encoding_dim)
# Concatenate the head sentinel onto the sentence representation.
encoded_text = torch.cat([head_sentinel, encoded_text], 1)
mask = torch.cat([mask.new_ones(batch_size, 1), mask], 1)
if head_indices is not None:
head_indices = torch.cat([head_indices.new_zeros(batch_size, 1), head_indices], 1)
if head_tags is not None:
head_tags = torch.cat([head_tags.new_zeros(batch_size, 1), head_tags], 1)
encoded_text = self._dropout(encoded_text)
# shape (batch_size, sequence_length, arc_representation_dim)
head_arc_representation = self._dropout(self.head_arc_feedforward(encoded_text))
child_arc_representation = self._dropout(self.child_arc_feedforward(encoded_text))
# shape (batch_size, sequence_length, tag_representation_dim)
head_tag_representation = self._dropout(self.head_tag_feedforward(encoded_text))
child_tag_representation = self._dropout(self.child_tag_feedforward(encoded_text))
# shape (batch_size, sequence_length, sequence_length)
attended_arcs = self.arc_attention(head_arc_representation, child_arc_representation)
minus_inf = -1e8
minus_mask = ~mask * minus_inf
attended_arcs = attended_arcs + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
if self.training or not self.use_mst_decoding_for_validation:
predicted_heads, predicted_head_tags = self._greedy_decode(
head_tag_representation, child_tag_representation, attended_arcs, mask
)
else:
predicted_heads, predicted_head_tags = self._mst_decode(
head_tag_representation, child_tag_representation, attended_arcs, mask
)
if head_indices is not None and head_tags is not None:
arc_nll, tag_nll = self._construct_loss(
head_tag_representation=head_tag_representation,
child_tag_representation=child_tag_representation,
attended_arcs=attended_arcs,
head_indices=head_indices,
head_tags=head_tags,
mask=mask,
)
else:
arc_nll, tag_nll = self._construct_loss(
head_tag_representation=head_tag_representation,
child_tag_representation=child_tag_representation,
attended_arcs=attended_arcs,
head_indices=predicted_heads.long(),
head_tags=predicted_head_tags.long(),
mask=mask,
)
return predicted_heads, predicted_head_tags, mask, arc_nll, tag_nll
def _construct_loss(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
head_indices: torch.Tensor,
head_tags: torch.Tensor,
mask: torch.BoolTensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Computes the arc and tag loss for a sequence given gold head indices and tags.
# Parameters
head_tag_representation : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : `torch.Tensor`, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
attended_arcs : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
head_indices : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length).
The indices of the heads for every word.
head_tags : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length).
The dependency labels of the heads for every word.
mask : `torch.BoolTensor`, required.
A mask of shape (batch_size, sequence_length), denoting unpadded
elements in the sequence.
# Returns
arc_nll : `torch.Tensor`, required.
The negative log likelihood from the arc loss.
tag_nll : `torch.Tensor`, required.
The negative log likelihood from the arc tag loss.
"""
batch_size, sequence_length, _ = attended_arcs.size()
# shape (batch_size, 1)
range_vector = get_range_vector(batch_size, get_device_of(attended_arcs)).unsqueeze(1)
# shape (batch_size, sequence_length, sequence_length)
normalised_arc_logits = (
masked_log_softmax(attended_arcs, mask) * mask.unsqueeze(2) * mask.unsqueeze(1)
)
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self._get_head_tags(
head_tag_representation, child_tag_representation, head_indices
)
normalised_head_tag_logits = masked_log_softmax(
head_tag_logits, mask.unsqueeze(-1)
) * mask.unsqueeze(-1)
# index matrix with shape (batch, sequence_length)
timestep_index = get_range_vector(sequence_length, get_device_of(attended_arcs))
child_index = (
timestep_index.view(1, sequence_length).expand(batch_size, sequence_length).long()
)
# shape (batch_size, sequence_length)
arc_loss = normalised_arc_logits[range_vector, child_index, head_indices]
tag_loss = normalised_head_tag_logits[range_vector, child_index, head_tags]
# We don't care about predictions for the symbolic ROOT token's head,
# so we remove it from the loss.
arc_loss = arc_loss[:, 1:]
tag_loss = tag_loss[:, 1:]
# The number of valid positions is equal to the number of unmasked elements minus
# 1 per sequence in the batch, to account for the symbolic HEAD token.
valid_positions = mask.sum() - batch_size
arc_nll = -arc_loss.sum() / valid_positions.float()
tag_nll = -tag_loss.sum() / valid_positions.float()
return arc_nll, tag_nll
def _greedy_decode(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
mask: torch.BoolTensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Decodes the head and head tag predictions by decoding the unlabeled arcs
independently for each word and then again, predicting the head tags of
these greedily chosen arcs independently. Note that this method of decoding
is not guaranteed to produce trees (i.e. there maybe be multiple roots,
or cycles when children are attached to their parents).
# Parameters
head_tag_representation : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : `torch.Tensor`, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
attended_arcs : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
# Returns
heads : `torch.Tensor`
A tensor of shape (batch_size, sequence_length) representing the
greedily decoded heads of each word.
head_tags : `torch.Tensor`
A tensor of shape (batch_size, sequence_length) representing the
dependency tags of the greedily decoded heads of each word.
"""
# Mask the diagonal, because the head of a word can't be itself.
attended_arcs = attended_arcs + torch.diag(
attended_arcs.new(mask.size(1)).fill_(-numpy.inf)
)
# Mask padded tokens, because we only want to consider actual words as heads.
if mask is not None:
minus_mask = ~mask.unsqueeze(2)
attended_arcs.masked_fill_(minus_mask, -numpy.inf)
# Compute the heads greedily.
# shape (batch_size, sequence_length)
_, heads = attended_arcs.max(dim=2)
# Given the greedily predicted heads, decode their dependency tags.
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self._get_head_tags(
head_tag_representation, child_tag_representation, heads
)
_, head_tags = head_tag_logits.max(dim=2)
return heads, head_tags
def _mst_decode(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
mask: torch.BoolTensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Decodes the head and head tag predictions using the Edmonds' Algorithm
for finding minimum spanning trees on directed graphs. Nodes in the
graph are the words in the sentence, and between each pair of nodes,
there is an edge in each direction, where the weight of the edge corresponds
to the most likely dependency label probability for that arc. The MST is
then generated from this directed graph.
# Parameters
head_tag_representation : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : `torch.Tensor`, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
attended_arcs : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
# Returns
heads : `torch.Tensor`
A tensor of shape (batch_size, sequence_length) representing the
greedily decoded heads of each word.
head_tags : `torch.Tensor`
A tensor of shape (batch_size, sequence_length) representing the
dependency tags of the optimally decoded heads of each word.
"""
batch_size, sequence_length, tag_representation_dim = head_tag_representation.size()
lengths = mask.data.sum(dim=1).long().cpu().numpy()
expanded_shape = [batch_size, sequence_length, sequence_length, tag_representation_dim]
head_tag_representation = head_tag_representation.unsqueeze(2)
head_tag_representation = head_tag_representation.expand(*expanded_shape).contiguous()
child_tag_representation = child_tag_representation.unsqueeze(1)
child_tag_representation = child_tag_representation.expand(*expanded_shape).contiguous()
# Shape (batch_size, sequence_length, sequence_length, num_head_tags)
pairwise_head_logits = self.tag_bilinear(head_tag_representation, child_tag_representation)
# Note that this log_softmax is over the tag dimension, and we don't consider pairs
# of tags which are invalid (e.g are a pair which includes a padded element) anyway below.
# Shape (batch, num_labels,sequence_length, sequence_length)
normalized_pairwise_head_logits = F.log_softmax(pairwise_head_logits, dim=3).permute(
0, 3, 1, 2
)
# Mask padded tokens, because we only want to consider actual words as heads.
minus_inf = -1e8
minus_mask = ~mask * minus_inf
attended_arcs = attended_arcs + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
# Shape (batch_size, sequence_length, sequence_length)
normalized_arc_logits = F.log_softmax(attended_arcs, dim=2).transpose(1, 2)
# Shape (batch_size, num_head_tags, sequence_length, sequence_length)
# This energy tensor expresses the following relation:
# energy[i,j] = "Score that i is the head of j". In this
# case, we have heads pointing to their children.
batch_energy = torch.exp(
normalized_arc_logits.unsqueeze(1) + normalized_pairwise_head_logits
)
return self._run_mst_decoding(batch_energy, lengths)
@staticmethod
def _run_mst_decoding(
batch_energy: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
heads = []
head_tags = []
for energy, length in zip(batch_energy.detach().cpu(), lengths):
scores, tag_ids = energy.max(dim=0)
# Although we need to include the root node so that the MST includes it,
# we do not want any word to be the parent of the root node.
# Here, we enforce this by setting the scores for all word -> ROOT edges
# edges to be 0.
scores[0, :] = 0
# Decode the heads. Because we modify the scores to prevent
# adding in word -> ROOT edges, we need to find the labels ourselves.
instance_heads, _ = decode_mst(scores.numpy(), length, has_labels=False)
# Find the labels which correspond to the edges in the max spanning tree.
instance_head_tags = []
for child, parent in enumerate(instance_heads):
instance_head_tags.append(tag_ids[parent, child].item())
# We don't care what the head or tag is for the root token, but by default it's
# not necessarily the same in the batched vs unbatched case, which is annoying.
# Here we'll just set them to zero.
instance_heads[0] = 0
instance_head_tags[0] = 0
heads.append(instance_heads)
head_tags.append(instance_head_tags)
return (
torch.from_numpy(numpy.stack(heads)).to(batch_energy.device),
torch.from_numpy(numpy.stack(head_tags)).to(batch_energy.device),
)
def _get_head_tags(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
head_indices: torch.Tensor,
) -> torch.Tensor:
"""
Decodes the head tags given the head and child tag representations
and a tensor of head indices to compute tags for. Note that these are
either gold or predicted heads, depending on whether this function is
being called to compute the loss, or if it's being called during inference.
# Parameters
head_tag_representation : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : `torch.Tensor`, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
head_indices : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length). The indices of the heads
for every word.
# Returns
head_tag_logits : `torch.Tensor`
A tensor of shape (batch_size, sequence_length, num_head_tags),
representing logits for predicting a distribution over tags
for each arc.
"""
batch_size = head_tag_representation.size(0)
# shape (batch_size,)
range_vector = get_range_vector(
batch_size, get_device_of(head_tag_representation)
).unsqueeze(1)
# This next statement is quite a complex piece of indexing, which you really
# need to read the docs to understand. See here:
# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#advanced-indexing
# In effect, we are selecting the indices corresponding to the heads of each word from the
# sequence length dimension for each element in the batch.
# shape (batch_size, sequence_length, tag_representation_dim)
selected_head_tag_representations = head_tag_representation[range_vector, head_indices]
selected_head_tag_representations = selected_head_tag_representations.contiguous()
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self.tag_bilinear(
selected_head_tag_representations, child_tag_representation
)
return head_tag_logits
def _get_mask_for_eval(
self, mask: torch.BoolTensor, pos_tags: torch.LongTensor
) -> torch.LongTensor:
"""
Dependency evaluation excludes words are punctuation.
Here, we create a new mask to exclude word indices which
have a "punctuation-like" part of speech tag.
# Parameters
mask : `torch.BoolTensor`, required.
The original mask.
pos_tags : `torch.LongTensor`, required.
The pos tags for the sequence.
# Returns
A new mask, where any indices equal to labels
we should be ignoring are masked.
"""
new_mask = mask.detach()
for label in self._pos_to_ignore:
label_mask = pos_tags.eq(label)
new_mask = new_mask & ~label_mask
return new_mask
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return self._attachment_scores.get_metric(reset)
default_predictor = "biaffine_dependency_parser"
| allennlp-models-main | allennlp_models/structured_prediction/models/biaffine_dependency_parser.py |
from typing import Dict, Tuple, Any, List
import logging
import copy
import torch
from torch.nn.modules import Dropout
import numpy
from allennlp.common.checks import check_dimensions_match, ConfigurationError
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder, Embedding, InputVariationalDropout
from allennlp.modules.matrix_attention.bilinear_matrix_attention import BilinearMatrixAttention
from allennlp.modules import FeedForward
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, Activation
from allennlp.nn.util import min_value_of_dtype
from allennlp.nn.util import get_text_field_mask
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from allennlp.training.metrics import F1Measure
logger = logging.getLogger(__name__)
@Model.register("graph_parser")
@Model.register("sp-graph-parser")
class GraphParser(Model):
"""
A Parser for arbitrary graph structures.
Registered as a `Model` with name "graph_parser".
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the `tokens` `TextField` we get as input to the model.
encoder : `Seq2SeqEncoder`
The encoder (with its own internal stacking) that we will use to generate representations
of tokens.
tag_representation_dim : `int`, required.
The dimension of the MLPs used for arc tag prediction.
arc_representation_dim : `int`, required.
The dimension of the MLPs used for arc prediction.
tag_feedforward : `FeedForward`, optional, (default = `None`).
The feedforward network used to produce tag representations.
By default, a 1 layer feedforward network with an elu activation is used.
arc_feedforward : `FeedForward`, optional, (default = `None`).
The feedforward network used to produce arc representations.
By default, a 1 layer feedforward network with an elu activation is used.
pos_tag_embedding : `Embedding`, optional.
Used to embed the `pos_tags` `SequenceLabelField` we get as input to the model.
dropout : `float`, optional, (default = `0.0`)
The variational dropout applied to the output of the encoder and MLP layers.
input_dropout : `float`, optional, (default = `0.0`)
The dropout applied to the embedded text input.
edge_prediction_threshold : `int`, optional (default = `0.5`)
The probability at which to consider a scored edge to be 'present'
in the decoded graph. Must be between 0 and 1.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
tag_representation_dim: int,
arc_representation_dim: int,
tag_feedforward: FeedForward = None,
arc_feedforward: FeedForward = None,
pos_tag_embedding: Embedding = None,
dropout: float = 0.0,
input_dropout: float = 0.0,
edge_prediction_threshold: float = 0.5,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.text_field_embedder = text_field_embedder
self.encoder = encoder
self.edge_prediction_threshold = edge_prediction_threshold
if not 0 < edge_prediction_threshold < 1:
raise ConfigurationError(
f"edge_prediction_threshold must be between "
f"0 and 1 (exclusive) but found {edge_prediction_threshold}."
)
encoder_dim = encoder.get_output_dim()
self.head_arc_feedforward = arc_feedforward or FeedForward(
encoder_dim, 1, arc_representation_dim, Activation.by_name("elu")()
)
self.child_arc_feedforward = copy.deepcopy(self.head_arc_feedforward)
self.arc_attention = BilinearMatrixAttention(
arc_representation_dim, arc_representation_dim, use_input_biases=True
)
num_labels = self.vocab.get_vocab_size("labels")
self.head_tag_feedforward = tag_feedforward or FeedForward(
encoder_dim, 1, tag_representation_dim, Activation.by_name("elu")()
)
self.child_tag_feedforward = copy.deepcopy(self.head_tag_feedforward)
self.tag_bilinear = BilinearMatrixAttention(
tag_representation_dim, tag_representation_dim, label_dim=num_labels
)
self._pos_tag_embedding = pos_tag_embedding or None
self._dropout = InputVariationalDropout(dropout)
self._input_dropout = Dropout(input_dropout)
representation_dim = text_field_embedder.get_output_dim()
if pos_tag_embedding is not None:
representation_dim += pos_tag_embedding.get_output_dim()
check_dimensions_match(
representation_dim,
encoder.get_input_dim(),
"text field embedding dim",
"encoder input dim",
)
check_dimensions_match(
tag_representation_dim,
self.head_tag_feedforward.get_output_dim(),
"tag representation dim",
"tag feedforward output dim",
)
check_dimensions_match(
arc_representation_dim,
self.head_arc_feedforward.get_output_dim(),
"arc representation dim",
"arc feedforward output dim",
)
self._unlabelled_f1 = F1Measure(positive_label=1)
self._arc_loss = torch.nn.BCEWithLogitsLoss(reduction="none")
self._tag_loss = torch.nn.CrossEntropyLoss(reduction="none")
initializer(self)
def forward(
self, # type: ignore
tokens: TextFieldTensors,
pos_tags: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
arc_tags: torch.LongTensor = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`.
pos_tags : `torch.LongTensor`, optional (default = `None`)
The output of a `SequenceLabelField` containing POS tags.
metadata : `List[Dict[str, Any]]`, optional (default = `None`)
A dictionary of metadata for each batch element which has keys:
tokens : `List[str]`, required.
The original string tokens in the sentence.
arc_tags : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer indices denoting the parent of every
word in the dependency parse. Has shape `(batch_size, sequence_length, sequence_length)`.
# Returns
An output dictionary.
"""
embedded_text_input = self.text_field_embedder(tokens)
if pos_tags is not None and self._pos_tag_embedding is not None:
embedded_pos_tags = self._pos_tag_embedding(pos_tags)
embedded_text_input = torch.cat([embedded_text_input, embedded_pos_tags], -1)
elif self._pos_tag_embedding is not None:
raise ConfigurationError("Model uses a POS embedding, but no POS tags were passed.")
mask = get_text_field_mask(tokens)
embedded_text_input = self._input_dropout(embedded_text_input)
encoded_text = self.encoder(embedded_text_input, mask)
encoded_text = self._dropout(encoded_text)
# shape (batch_size, sequence_length, arc_representation_dim)
head_arc_representation = self._dropout(self.head_arc_feedforward(encoded_text))
child_arc_representation = self._dropout(self.child_arc_feedforward(encoded_text))
# shape (batch_size, sequence_length, tag_representation_dim)
head_tag_representation = self._dropout(self.head_tag_feedforward(encoded_text))
child_tag_representation = self._dropout(self.child_tag_feedforward(encoded_text))
# shape (batch_size, sequence_length, sequence_length)
arc_scores = self.arc_attention(head_arc_representation, child_arc_representation)
# shape (batch_size, num_tags, sequence_length, sequence_length)
arc_tag_logits = self.tag_bilinear(head_tag_representation, child_tag_representation)
# Switch to (batch_size, sequence_length, sequence_length, num_tags)
arc_tag_logits = arc_tag_logits.permute(0, 2, 3, 1).contiguous()
# Since we'll be doing some additions, using the min value will cause underflow
minus_mask = ~mask * min_value_of_dtype(arc_scores.dtype) / 10
arc_scores = arc_scores + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
arc_probs, arc_tag_probs = self._greedy_decode(arc_scores, arc_tag_logits, mask)
output_dict = {"arc_probs": arc_probs, "arc_tag_probs": arc_tag_probs, "mask": mask}
if metadata:
output_dict["tokens"] = [meta["tokens"] for meta in metadata]
if arc_tags is not None:
arc_nll, tag_nll = self._construct_loss(
arc_scores=arc_scores, arc_tag_logits=arc_tag_logits, arc_tags=arc_tags, mask=mask
)
output_dict["loss"] = arc_nll + tag_nll
output_dict["arc_loss"] = arc_nll
output_dict["tag_loss"] = tag_nll
# Make the arc tags not have negative values anywhere
# (by default, no edge is indicated with -1).
arc_indices = (arc_tags != -1).float()
tag_mask = mask.unsqueeze(1) & mask.unsqueeze(2)
one_minus_arc_probs = 1 - arc_probs
# We stack scores here because the f1 measure expects a
# distribution, rather than a single value.
self._unlabelled_f1(
torch.stack([one_minus_arc_probs, arc_probs], -1), arc_indices, tag_mask
)
return output_dict
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
arc_tag_probs = output_dict["arc_tag_probs"].cpu().detach().numpy()
arc_probs = output_dict["arc_probs"].cpu().detach().numpy()
mask = output_dict["mask"]
lengths = get_lengths_from_binary_sequence_mask(mask)
arcs = []
arc_tags = []
for instance_arc_probs, instance_arc_tag_probs, length in zip(
arc_probs, arc_tag_probs, lengths
):
arc_matrix = instance_arc_probs > self.edge_prediction_threshold
edges = []
edge_tags = []
for i in range(length):
for j in range(length):
if arc_matrix[i, j] == 1:
edges.append((i, j))
tag = instance_arc_tag_probs[i, j].argmax(-1)
edge_tags.append(self.vocab.get_token_from_index(tag, "labels"))
arcs.append(edges)
arc_tags.append(edge_tags)
output_dict["arcs"] = arcs
output_dict["arc_tags"] = arc_tags
return output_dict
def _construct_loss(
self,
arc_scores: torch.Tensor,
arc_tag_logits: torch.Tensor,
arc_tags: torch.Tensor,
mask: torch.BoolTensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Computes the arc and tag loss for an adjacency matrix.
# Parameters
arc_scores : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate a
binary classification decision for whether an edge is present between two words.
arc_tag_logits : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, sequence_length, num_tags) used to generate
a distribution over edge tags for a given edge.
arc_tags : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, sequence_length).
The labels for every arc.
mask : `torch.BoolTensor`, required.
A mask of shape (batch_size, sequence_length), denoting unpadded
elements in the sequence.
# Returns
arc_nll : `torch.Tensor`, required.
The negative log likelihood from the arc loss.
tag_nll : `torch.Tensor`, required.
The negative log likelihood from the arc tag loss.
"""
arc_indices = (arc_tags != -1).float()
# Make the arc tags not have negative values anywhere
# (by default, no edge is indicated with -1).
arc_tags = arc_tags * arc_indices
arc_nll = self._arc_loss(arc_scores, arc_indices) * mask.unsqueeze(1) * mask.unsqueeze(2)
# We want the mask for the tags to only include the unmasked words
# and we only care about the loss with respect to the gold arcs.
tag_mask = mask.unsqueeze(1) * mask.unsqueeze(2) * arc_indices
batch_size, sequence_length, _, num_tags = arc_tag_logits.size()
original_shape = [batch_size, sequence_length, sequence_length]
reshaped_logits = arc_tag_logits.view(-1, num_tags)
reshaped_tags = arc_tags.view(-1)
tag_nll = (
self._tag_loss(reshaped_logits, reshaped_tags.long()).view(original_shape) * tag_mask
)
valid_positions = tag_mask.sum()
arc_nll = arc_nll.sum() / valid_positions.float()
tag_nll = tag_nll.sum() / valid_positions.float()
return arc_nll, tag_nll
@staticmethod
def _greedy_decode(
arc_scores: torch.Tensor, arc_tag_logits: torch.Tensor, mask: torch.BoolTensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Decodes the head and head tag predictions by decoding the unlabeled arcs
independently for each word and then again, predicting the head tags of
these greedily chosen arcs independently.
# Parameters
arc_scores : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
arc_tag_logits : `torch.Tensor`, required.
A tensor of shape (batch_size, sequence_length, sequence_length, num_tags) used to
generate a distribution over tags for each arc.
mask : `torch.BoolTensor`, required.
A mask of shape (batch_size, sequence_length).
# Returns
arc_probs : `torch.Tensor`
A tensor of shape (batch_size, sequence_length, sequence_length) representing the
probability of an arc being present for this edge.
arc_tag_probs : `torch.Tensor`
A tensor of shape (batch_size, sequence_length, sequence_length, sequence_length)
representing the distribution over edge tags for a given edge.
"""
# Mask the diagonal, because we don't self edges.
inf_diagonal_mask = torch.diag(arc_scores.new(mask.size(1)).fill_(-numpy.inf))
arc_scores = arc_scores + inf_diagonal_mask
# shape (batch_size, sequence_length, sequence_length, num_tags)
arc_tag_logits = arc_tag_logits + inf_diagonal_mask.unsqueeze(0).unsqueeze(-1)
# Mask padded tokens, because we only want to consider actual word -> word edges.
minus_mask = ~mask.unsqueeze(2)
arc_scores.masked_fill_(minus_mask, -numpy.inf)
arc_tag_logits.masked_fill_(minus_mask.unsqueeze(-1), -numpy.inf)
# shape (batch_size, sequence_length, sequence_length)
arc_probs = arc_scores.sigmoid()
# shape (batch_size, sequence_length, sequence_length, num_tags)
arc_tag_probs = torch.nn.functional.softmax(arc_tag_logits, dim=-1)
return arc_probs, arc_tag_probs
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return self._unlabelled_f1.get_metric(reset)
| allennlp-models-main | allennlp_models/structured_prediction/models/graph_parser.py |
import warnings
from typing import Dict, List, Any, Union
import torch
from torch.nn.modules import Linear, Dropout
import torch.nn.functional as F
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.bert.modeling_bert import BertModel
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, util
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, viterbi_decode
from allennlp_models.structured_prediction.metrics.srl_eval_scorer import (
DEFAULT_SRL_EVAL_PATH,
SrlEvalScorer,
)
@Model.register("srl_bert")
class SrlBert(Model):
"""
A BERT based model [Simple BERT Models for Relation Extraction and Semantic Role Labeling (Shi et al, 2019)]
(https://arxiv.org/abs/1904.05255) with some modifications (no additional parameters apart from a linear
classification layer), which is currently the state-of-the-art single model for English PropBank SRL
(Newswire sentences).
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
bert_model : `Union[str, Dict[str, Any], BertModel]`, required.
A string describing the BERT model to load, a BERT config in the form of a dictionary,
or an already constructed BertModel.
!!! Note
If you pass a config `bert_model` (a dictionary), pretrained weights will
not be cached and loaded! This is ideal if you're loading this model from an
AllenNLP archive since the weights you need will already be included in the
archive, but not what you want if you're training.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
label_smoothing : `float`, optional (default = `0.0`)
Whether or not to use label smoothing on the labels when computing cross entropy loss.
ignore_span_metric : `bool`, optional (default = `False`)
Whether to calculate span loss, which is irrelevant when predicting BIO for Open Information Extraction.
srl_eval_path : `str`, optional (default=`DEFAULT_SRL_EVAL_PATH`)
The path to the srl-eval.pl script. By default, will use the srl-eval.pl included with allennlp,
which is located at allennlp/tools/srl-eval.pl . If `None`, srl-eval.pl is not used.
"""
def __init__(
self,
vocab: Vocabulary,
bert_model: Union[str, Dict[str, Any], BertModel],
embedding_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
label_smoothing: float = None,
ignore_span_metric: bool = False,
srl_eval_path: str = DEFAULT_SRL_EVAL_PATH,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
if isinstance(bert_model, str):
self.bert_model = BertModel.from_pretrained(bert_model)
elif isinstance(bert_model, dict):
warnings.warn(
"Initializing BertModel without pretrained weights. This is fine if you're loading "
"from an AllenNLP archive, but not if you're training.",
UserWarning,
)
bert_config = BertConfig.from_dict(bert_model)
self.bert_model = BertModel(bert_config)
else:
self.bert_model = bert_model
self.num_classes = self.vocab.get_vocab_size("labels")
if srl_eval_path is not None:
# For the span based evaluation, we don't want to consider labels
# for verb, because the verb index is provided to the model.
self.span_metric = SrlEvalScorer(srl_eval_path, ignore_classes=["V"])
else:
self.span_metric = None
self.tag_projection_layer = Linear(self.bert_model.config.hidden_size, self.num_classes)
self.embedding_dropout = Dropout(p=embedding_dropout)
self._label_smoothing = label_smoothing
self.ignore_span_metric = ignore_span_metric
initializer(self)
def forward( # type: ignore
self,
tokens: TextFieldTensors,
verb_indicator: torch.Tensor,
metadata: List[Any],
tags: torch.LongTensor = None,
):
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. For this model, this must be a `SingleIdTokenIndexer` which
indexes wordpieces from the BERT vocabulary.
verb_indicator: `torch.LongTensor`, required.
An integer `SequenceFeatureField` representation of the position of the verb
in the sentence. This should have shape (batch_size, num_tokens) and importantly, can be
all zeros, in the case that the sentence has no verbal predicate.
tags : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer gold class labels
of shape `(batch_size, num_tokens)`
metadata : `List[Dict[str, Any]]`, optional, (default = `None`)
metadata containing the original words in the sentence, the verb to compute the
frame for, and start offsets for converting wordpieces back to a sequence of words,
under 'words', 'verb' and 'offsets' keys, respectively.
# Returns
An output dictionary consisting of:
logits : `torch.FloatTensor`
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
unnormalised log probabilities of the tag classes.
class_probabilities : `torch.FloatTensor`
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
a distribution of the tag classes per word.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
mask = get_text_field_mask(tokens)
bert_embeddings, _ = self.bert_model(
input_ids=util.get_token_ids_from_text_field_tensors(tokens),
token_type_ids=verb_indicator,
attention_mask=mask,
return_dict=False,
)
embedded_text_input = self.embedding_dropout(bert_embeddings)
batch_size, sequence_length, _ = embedded_text_input.size()
logits = self.tag_projection_layer(embedded_text_input)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view(
[batch_size, sequence_length, self.num_classes]
)
output_dict = {"logits": logits, "class_probabilities": class_probabilities}
# We need to retain the mask in the output dictionary
# so that we can crop the sequences to remove padding
# when we do viterbi inference in self.make_output_human_readable.
output_dict["mask"] = mask
# We add in the offsets here so we can compute the un-wordpieced tags.
words, verbs, offsets = zip(*[(x["words"], x["verb"], x["offsets"]) for x in metadata])
output_dict["words"] = list(words)
output_dict["verb"] = list(verbs)
output_dict["wordpiece_offsets"] = list(offsets)
if tags is not None:
loss = sequence_cross_entropy_with_logits(
logits, tags, mask, label_smoothing=self._label_smoothing
)
if not self.ignore_span_metric and self.span_metric is not None and not self.training:
batch_verb_indices = [
example_metadata["verb_index"] for example_metadata in metadata
]
batch_sentences = [example_metadata["words"] for example_metadata in metadata]
# Get the BIO tags from make_output_human_readable()
# TODO (nfliu): This is kind of a hack, consider splitting out part
# of make_output_human_readable() to a separate function.
batch_bio_predicted_tags = self.make_output_human_readable(output_dict).pop("tags")
from allennlp_models.structured_prediction.models.srl import (
convert_bio_tags_to_conll_format,
)
batch_conll_predicted_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_predicted_tags
]
batch_bio_gold_tags = [
example_metadata["gold_tags"] for example_metadata in metadata
]
batch_conll_gold_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_gold_tags
]
self.span_metric(
batch_verb_indices,
batch_sentences,
batch_conll_predicted_tags,
batch_conll_gold_tags,
)
output_dict["loss"] = loss
return output_dict
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does constrained viterbi decoding on class probabilities output in :func:`forward`. The
constraint simply specifies that the output tags must be a valid BIO sequence. We add a
`"tags"` key to the dictionary with the result.
NOTE: First, we decode a BIO sequence on top of the wordpieces. This is important; viterbi
decoding produces low quality output if you decode on top of word representations directly,
because the model gets confused by the 'missing' positions (which is sensible as it is trained
to perform tagging on wordpieces, not words).
Secondly, it's important that the indices we use to recover words from the wordpieces are the
start_offsets (i.e offsets which correspond to using the first wordpiece of words which are
tokenized into multiple wordpieces) as otherwise, we might get an ill-formed BIO sequence
when we select out the word tags from the wordpiece tags. This happens in the case that a word
is split into multiple word pieces, and then we take the last tag of the word, which might
correspond to, e.g, I-V, which would not be allowed as it is not preceeded by a B tag.
"""
all_predictions = output_dict["class_probabilities"]
sequence_lengths = get_lengths_from_binary_sequence_mask(output_dict["mask"]).data.tolist()
if all_predictions.dim() == 3:
predictions_list = [
all_predictions[i].detach().cpu() for i in range(all_predictions.size(0))
]
else:
predictions_list = [all_predictions]
wordpiece_tags = []
word_tags = []
transition_matrix = self.get_viterbi_pairwise_potentials()
start_transitions = self.get_start_transitions()
# **************** Different ********************
# We add in the offsets here so we can compute the un-wordpieced tags.
for predictions, length, offsets in zip(
predictions_list, sequence_lengths, output_dict["wordpiece_offsets"]
):
max_likelihood_sequence, _ = viterbi_decode(
predictions[:length], transition_matrix, allowed_start_transitions=start_transitions
)
tags = [
self.vocab.get_token_from_index(x, namespace="labels")
for x in max_likelihood_sequence
]
wordpiece_tags.append(tags)
word_tags.append([tags[i] for i in offsets])
output_dict["wordpiece_tags"] = wordpiece_tags
output_dict["tags"] = word_tags
return output_dict
def get_metrics(self, reset: bool = False):
if self.ignore_span_metric:
# Return an empty dictionary if ignoring the
# span metric
return {}
else:
metric_dict = self.span_metric.get_metric(reset=reset)
# This can be a lot of metrics, as there are 3 per class.
# we only really care about the overall metrics, so we filter for them here.
return {x: y for x, y in metric_dict.items() if "overall" in x}
def get_viterbi_pairwise_potentials(self):
"""
Generate a matrix of pairwise transition potentials for the BIO labels.
The only constraint implemented here is that I-XXX labels must be preceded
by either an identical I-XXX tag or a B-XXX tag. In order to achieve this
constraint, pairs of labels which do not satisfy this constraint have a
pairwise potential of -inf.
# Returns
transition_matrix : `torch.Tensor`
A `(num_labels, num_labels)` matrix of pairwise potentials.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
transition_matrix = torch.zeros([num_labels, num_labels])
for i, previous_label in all_labels.items():
for j, label in all_labels.items():
# I labels can only be preceded by themselves or
# their corresponding B tag.
if i != j and label[0] == "I" and not previous_label == "B" + label[1:]:
transition_matrix[i, j] = float("-inf")
return transition_matrix
def get_start_transitions(self):
"""
In the BIO sequence, we cannot start the sequence with an I-XXX tag.
This transition sequence is passed to viterbi_decode to specify this constraint.
# Returns
start_transitions : `torch.Tensor`
The pairwise potentials between a START token and
the first token of the sequence.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
start_transitions = torch.zeros(num_labels)
for i, label in all_labels.items():
if label[0] == "I":
start_transitions[i] = float("-inf")
return start_transitions
default_predictor = "semantic_role_labeling"
| allennlp-models-main | allennlp_models/structured_prediction/models/srl_bert.py |
from allennlp_models.structured_prediction.models.biaffine_dependency_parser import (
BiaffineDependencyParser,
)
from allennlp_models.structured_prediction.models.constituency_parser import SpanConstituencyParser
from allennlp_models.structured_prediction.models.graph_parser import GraphParser
from allennlp_models.structured_prediction.models.srl import SemanticRoleLabeler
from allennlp_models.structured_prediction.models.srl_bert import SrlBert
| allennlp-models-main | allennlp_models/structured_prediction/models/__init__.py |
from typing import Dict, Tuple, List, NamedTuple, Any
import torch
from torch.nn.modules.linear import Linear
from nltk import Tree
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder, FeedForward
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import masked_softmax, get_lengths_from_binary_sequence_mask
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.metrics import EvalbBracketingScorer, DEFAULT_EVALB_DIR
from allennlp.common.checks import ConfigurationError
class SpanInformation(NamedTuple):
"""
A helper namedtuple for handling decoding information.
# Parameters
start : `int`
The start index of the span.
end : `int`
The exclusive end index of the span.
no_label_prob : `float`
The probability of this span being assigned the `NO-LABEL` label.
label_prob : `float`
The probability of the most likely label.
"""
start: int
end: int
label_prob: float
no_label_prob: float
label_index: int
@Model.register("constituency_parser")
class SpanConstituencyParser(Model):
"""
This `SpanConstituencyParser` simply encodes a sequence of text
with a stacked `Seq2SeqEncoder`, extracts span representations using a
`SpanExtractor`, and then predicts a label for each span in the sequence.
These labels are non-terminal nodes in a constituency parse tree, which we then
greedily reconstruct.
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the `tokens` `TextField` we get as input to the model.
span_extractor : `SpanExtractor`, required.
The method used to extract the spans from the encoded sequence.
encoder : `Seq2SeqEncoder`, required.
The encoder that we will use in between embedding tokens and
generating span representations.
feedforward : `FeedForward`, required.
The FeedForward layer that we will use in between the encoder and the linear
projection to a distribution over span labels.
pos_tag_embedding : `Embedding`, optional.
Used to embed the `pos_tags` `SequenceLabelField` we get as input to the model.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
evalb_directory_path : `str`, optional (default=`DEFAULT_EVALB_DIR`)
The path to the directory containing the EVALB executable used to score
bracketed parses. By default, will use the EVALB included with allennlp,
which is located at allennlp/tools/EVALB . If `None`, EVALB scoring
is not used.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
span_extractor: SpanExtractor,
encoder: Seq2SeqEncoder,
feedforward: FeedForward = None,
pos_tag_embedding: Embedding = None,
initializer: InitializerApplicator = InitializerApplicator(),
evalb_directory_path: str = DEFAULT_EVALB_DIR,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.text_field_embedder = text_field_embedder
self.span_extractor = span_extractor
self.num_classes = self.vocab.get_vocab_size("labels")
self.encoder = encoder
self.feedforward_layer = TimeDistributed(feedforward) if feedforward else None
self.pos_tag_embedding = pos_tag_embedding or None
if feedforward is not None:
output_dim = feedforward.get_output_dim()
else:
output_dim = span_extractor.get_output_dim()
self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_classes))
representation_dim = text_field_embedder.get_output_dim()
if pos_tag_embedding is not None:
representation_dim += pos_tag_embedding.get_output_dim()
check_dimensions_match(
representation_dim,
encoder.get_input_dim(),
"representation dim (tokens + optional POS tags)",
"encoder input dim",
)
check_dimensions_match(
encoder.get_output_dim(),
span_extractor.get_input_dim(),
"encoder input dim",
"span extractor input dim",
)
if feedforward is not None:
check_dimensions_match(
span_extractor.get_output_dim(),
feedforward.get_input_dim(),
"span extractor output dim",
"feedforward input dim",
)
self.tag_accuracy = CategoricalAccuracy()
if evalb_directory_path is not None:
self._evalb_score = EvalbBracketingScorer(evalb_directory_path)
else:
self._evalb_score = None
initializer(self)
def forward(
self, # type: ignore
tokens: TextFieldTensors,
spans: torch.LongTensor,
metadata: List[Dict[str, Any]],
pos_tags: TextFieldTensors = None,
span_labels: torch.LongTensor = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`
tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens":
Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used
for the `TokenIndexers` when you created the `TextField` representing your
sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,
which knows how to combine different word representations into a single vector per
token in your input.
spans : `torch.LongTensor`, required.
A tensor of shape `(batch_size, num_spans, 2)` representing the
inclusive start and end indices of all possible spans in the sentence.
metadata : `List[Dict[str, Any]]`, required.
A dictionary of metadata for each batch element which has keys:
tokens : `List[str]`, required.
The original string tokens in the sentence.
gold_tree : `nltk.Tree`, optional (default = `None`)
Gold NLTK trees for use in evaluation.
pos_tags : `List[str]`, optional.
The POS tags for the sentence. These can be used in the
model as embedded features, but they are passed here
in addition for use in constructing the tree.
pos_tags : `torch.LongTensor`, optional (default = `None`)
The output of a `SequenceLabelField` containing POS tags.
span_labels : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the integer gold class labels for all possible
spans, of shape `(batch_size, num_spans)`.
# Returns
An output dictionary consisting of:
class_probabilities : `torch.FloatTensor`
A tensor of shape `(batch_size, num_spans, span_label_vocab_size)`
representing a distribution over the label classes per span.
spans : `torch.LongTensor`
The original spans tensor.
tokens : `List[List[str]]`, required.
A list of tokens in the sentence for each element in the batch.
pos_tags : `List[List[str]]`, required.
A list of POS tags in the sentence for each element in the batch.
num_spans : `torch.LongTensor`, required.
A tensor of shape (batch_size), representing the lengths of non-padded spans
in `enumerated_spans`.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.text_field_embedder(tokens)
if pos_tags is not None and self.pos_tag_embedding is not None:
embedded_pos_tags = self.pos_tag_embedding(pos_tags)
embedded_text_input = torch.cat([embedded_text_input, embedded_pos_tags], -1)
elif self.pos_tag_embedding is not None:
raise ConfigurationError("Model uses a POS embedding, but no POS tags were passed.")
mask = get_text_field_mask(tokens)
# Looking at the span start index is enough to know if
# this is padding or not. Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1)
if span_mask.dim() == 1:
# This happens if you use batch_size 1 and encounter
# a length 1 sentence in PTB, which do exist. -.-
span_mask = span_mask.unsqueeze(-1)
if span_labels is not None and span_labels.dim() == 1:
span_labels = span_labels.unsqueeze(-1)
num_spans = get_lengths_from_binary_sequence_mask(span_mask)
encoded_text = self.encoder(embedded_text_input, mask)
span_representations = self.span_extractor(encoded_text, spans, mask, span_mask)
if self.feedforward_layer is not None:
span_representations = self.feedforward_layer(span_representations)
logits = self.tag_projection_layer(span_representations)
class_probabilities = masked_softmax(logits, span_mask.unsqueeze(-1))
output_dict = {
"class_probabilities": class_probabilities,
"spans": spans,
"tokens": [meta["tokens"] for meta in metadata],
"pos_tags": [meta.get("pos_tags") for meta in metadata],
"num_spans": num_spans,
}
if span_labels is not None:
loss = sequence_cross_entropy_with_logits(logits, span_labels, span_mask)
self.tag_accuracy(class_probabilities, span_labels, span_mask)
output_dict["loss"] = loss
# The evalb score is expensive to compute, so we only compute
# it for the validation and test sets.
batch_gold_trees = [meta.get("gold_tree") for meta in metadata]
if all(batch_gold_trees) and self._evalb_score is not None and not self.training:
gold_pos_tags: List[List[str]] = [
list(zip(*tree.pos()))[1] for tree in batch_gold_trees
]
predicted_trees = self.construct_trees(
class_probabilities.cpu().data,
spans.cpu().data,
num_spans.data,
output_dict["tokens"],
gold_pos_tags,
)
self._evalb_score(predicted_trees, batch_gold_trees)
return output_dict
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Constructs an NLTK `Tree` given the scored spans. We also switch to exclusive
span ends when constructing the tree representation, because it makes indexing
into lists cleaner for ranges of text, rather than individual indices.
Finally, for batch prediction, we will have padded spans and class probabilities.
In order to make this less confusing, we remove all the padded spans and
distributions from `spans` and `class_probabilities` respectively.
"""
all_predictions = output_dict["class_probabilities"].cpu().data
all_spans = output_dict["spans"].cpu().data
all_sentences = output_dict["tokens"]
all_pos_tags = output_dict["pos_tags"] if all(output_dict["pos_tags"]) else None
num_spans = output_dict["num_spans"].data
trees = self.construct_trees(
all_predictions, all_spans, num_spans, all_sentences, all_pos_tags
)
batch_size = all_predictions.size(0)
output_dict["spans"] = [all_spans[i, : num_spans[i]] for i in range(batch_size)]
output_dict["class_probabilities"] = [
all_predictions[i, : num_spans[i], :] for i in range(batch_size)
]
output_dict["trees"] = trees
return output_dict
def construct_trees(
self,
predictions: torch.FloatTensor,
all_spans: torch.LongTensor,
num_spans: torch.LongTensor,
sentences: List[List[str]],
pos_tags: List[List[str]] = None,
) -> List[Tree]:
"""
Construct `nltk.Tree`'s for each batch element by greedily nesting spans.
The trees use exclusive end indices, which contrasts with how spans are
represented in the rest of the model.
# Parameters
predictions : `torch.FloatTensor`, required.
A tensor of shape `(batch_size, num_spans, span_label_vocab_size)`
representing a distribution over the label classes per span.
all_spans : `torch.LongTensor`, required.
A tensor of shape (batch_size, num_spans, 2), representing the span
indices we scored.
num_spans : `torch.LongTensor`, required.
A tensor of shape (batch_size), representing the lengths of non-padded spans
in `enumerated_spans`.
sentences : `List[List[str]]`, required.
A list of tokens in the sentence for each element in the batch.
pos_tags : `List[List[str]]`, optional (default = `None`).
A list of POS tags for each word in the sentence for each element
in the batch.
# Returns
A `List[Tree]` containing the decoded trees for each element in the batch.
"""
# Switch to using exclusive end spans.
exclusive_end_spans = all_spans.clone()
exclusive_end_spans[:, :, -1] += 1
no_label_id = self.vocab.get_token_index("NO-LABEL", "labels")
trees: List[Tree] = []
for batch_index, (scored_spans, spans, sentence) in enumerate(
zip(predictions, exclusive_end_spans, sentences)
):
selected_spans = []
for prediction, span in zip(
scored_spans[: num_spans[batch_index]], spans[: num_spans[batch_index]]
):
start, end = span
no_label_prob = prediction[no_label_id]
label_prob, label_index = torch.max(prediction, -1)
# Does the span have a label != NO-LABEL or is it the root node?
# If so, include it in the spans that we consider.
if int(label_index) != no_label_id or (start == 0 and end == len(sentence)):
selected_spans.append(
SpanInformation(
start=int(start),
end=int(end),
label_prob=float(label_prob),
no_label_prob=float(no_label_prob),
label_index=int(label_index),
)
)
# The spans we've selected might overlap, which causes problems when we try
# to construct the tree as they won't nest properly.
consistent_spans = self.resolve_overlap_conflicts_greedily(selected_spans)
spans_to_labels = {
(span.start, span.end): self.vocab.get_token_from_index(span.label_index, "labels")
for span in consistent_spans
}
sentence_pos = pos_tags[batch_index] if pos_tags is not None else None
trees.append(self.construct_tree_from_spans(spans_to_labels, sentence, sentence_pos))
return trees
@staticmethod
def resolve_overlap_conflicts_greedily(spans: List[SpanInformation]) -> List[SpanInformation]:
"""
Given a set of spans, removes spans which overlap by evaluating the difference
in probability between one being labeled and the other explicitly having no label
and vice-versa. The worst case time complexity of this method is `O(k * n^4)` where `n`
is the length of the sentence that the spans were enumerated from (and therefore
`k * m^2` complexity with respect to the number of spans `m`) and `k` is the
number of conflicts. However, in practice, there are very few conflicts. Hopefully.
This function modifies `spans` to remove overlapping spans.
# Parameters
spans : `List[SpanInformation]`, required.
A list of spans, where each span is a `namedtuple` containing the
following attributes:
start : `int`
The start index of the span.
end : `int`
The exclusive end index of the span.
no_label_prob : `float`
The probability of this span being assigned the `NO-LABEL` label.
label_prob : `float`
The probability of the most likely label.
# Returns
A modified list of `spans`, with the conflicts resolved by considering local
differences between pairs of spans and removing one of the two spans.
"""
conflicts_exist = True
while conflicts_exist:
conflicts_exist = False
for span1_index, span1 in enumerate(spans):
for span2_index, span2 in list(enumerate(spans))[span1_index + 1 :]:
if (
span1.start < span2.start < span1.end < span2.end
or span2.start < span1.start < span2.end < span1.end
):
# The spans overlap.
conflicts_exist = True
# What's the more likely situation: that span2 was labeled
# and span1 was unlabled, or that span1 was labeled and span2
# was unlabled? In the first case, we delete span2 from the
# set of spans to form the tree - in the second case, we delete
# span1.
if (
span1.no_label_prob + span2.label_prob
< span2.no_label_prob + span1.label_prob
):
spans.pop(span2_index)
else:
spans.pop(span1_index)
break
return spans
@staticmethod
def construct_tree_from_spans(
spans_to_labels: Dict[Tuple[int, int], str], sentence: List[str], pos_tags: List[str] = None
) -> Tree:
"""
# Parameters
spans_to_labels : `Dict[Tuple[int, int], str]`, required.
A mapping from spans to constituency labels.
sentence : `List[str]`, required.
A list of tokens forming the sentence to be parsed.
pos_tags : `List[str]`, optional (default = `None`)
A list of the pos tags for the words in the sentence, if they
were either predicted or taken as input to the model.
# Returns
An `nltk.Tree` constructed from the labelled spans.
"""
def assemble_subtree(start: int, end: int):
if (start, end) in spans_to_labels:
# Some labels contain nested spans, e.g S-VP.
# We actually want to create (S (VP ...)) nodes
# for these labels, so we split them up here.
labels: List[str] = spans_to_labels[(start, end)].split("-")
else:
labels = None
# This node is a leaf.
if end - start == 1:
word = sentence[start]
pos_tag = pos_tags[start] if pos_tags is not None else "XX"
tree = Tree(pos_tag, [word])
if labels is not None and pos_tags is not None:
# If POS tags were passed explicitly,
# they are added as pre-terminal nodes.
while labels:
tree = Tree(labels.pop(), [tree])
elif labels is not None:
# Otherwise, we didn't want POS tags
# at all.
tree = Tree(labels.pop(), [word])
while labels:
tree = Tree(labels.pop(), [tree])
return [tree]
argmax_split = start + 1
# Find the next largest subspan such that
# the left hand side is a constituent.
for split in range(end - 1, start, -1):
if (start, split) in spans_to_labels:
argmax_split = split
break
left_trees = assemble_subtree(start, argmax_split)
right_trees = assemble_subtree(argmax_split, end)
children = left_trees + right_trees
if labels is not None:
while labels:
children = [Tree(labels.pop(), children)]
return children
tree = assemble_subtree(0, len(sentence))
return tree[0]
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics = {}
all_metrics["tag_accuracy"] = self.tag_accuracy.get_metric(reset=reset)
if self._evalb_score is not None:
evalb_metrics = self._evalb_score.get_metric(reset=reset)
all_metrics.update(evalb_metrics)
return all_metrics
default_predictor = "constituency_parser"
| allennlp-models-main | allennlp_models/structured_prediction/models/constituency_parser.py |
from typing import Dict, List, TextIO, Optional, Any
import warnings
import torch
from torch.nn.modules import Linear, Dropout
import torch.nn.functional as F
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, viterbi_decode
from allennlp_models.structured_prediction.metrics.srl_eval_scorer import (
SrlEvalScorer,
DEFAULT_SRL_EVAL_PATH,
)
def write_bio_formatted_tags_to_file(
prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
prediction: List[str],
gold_labels: List[str],
):
"""
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
The CoNLL SRL format is described in
[the shared task data README](https://www.lsi.upc.edu/~srlconll/conll05st-release/README).
This function expects IOB2-formatted tags, where the B- tag is used in the beginning
of every chunk (i.e. all chunks start with the B- tag).
# Parameters
prediction_file : `TextIO`, required.
A file reference to print predictions to.
gold_file : `TextIO`, required.
A file reference to print gold labels to.
verb_index : `Optional[int]`, required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : `List[str]`, required.
The word tokens.
prediction : `List[str]`, required.
The predicted BIO labels.
gold_labels : `List[str]`, required.
The gold BIO labels.
"""
conll_formatted_predictions = convert_bio_tags_to_conll_format(prediction)
conll_formatted_gold_labels = convert_bio_tags_to_conll_format(gold_labels)
write_conll_formatted_tags_to_file(
prediction_file,
gold_file,
verb_index,
sentence,
conll_formatted_predictions,
conll_formatted_gold_labels,
)
def write_conll_formatted_tags_to_file(
prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
conll_formatted_predictions: List[str],
conll_formatted_gold_labels: List[str],
):
"""
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
The CoNLL SRL format is described in
[the shared task data README](https://www.lsi.upc.edu/~srlconll/conll05st-release/README).
This function expects IOB2-formatted tags, where the B- tag is used in the beginning
of every chunk (i.e. all chunks start with the B- tag).
# Parameters
prediction_file : `TextIO`, required.
A file reference to print predictions to.
gold_file : `TextIO`, required.
A file reference to print gold labels to.
verb_index : `Optional[int]`, required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : `List[str]`, required.
The word tokens.
conll_formatted_predictions : `List[str]`, required.
The predicted CoNLL-formatted labels.
conll_formatted_gold_labels : `List[str]`, required.
The gold CoNLL-formatted labels.
"""
verb_only_sentence = ["-"] * len(sentence)
if verb_index is not None:
verb_only_sentence[verb_index] = sentence[verb_index]
for word, predicted, gold in zip(
verb_only_sentence, conll_formatted_predictions, conll_formatted_gold_labels
):
prediction_file.write(word.ljust(15))
prediction_file.write(predicted.rjust(15) + "\n")
gold_file.write(word.ljust(15))
gold_file.write(gold.rjust(15) + "\n")
prediction_file.write("\n")
gold_file.write("\n")
def convert_bio_tags_to_conll_format(labels: List[str]):
"""
Converts BIO formatted SRL tags to the format required for evaluation with the
official CONLL 2005 perl script. Spans are represented by bracketed labels,
with the labels of words inside spans being the same as those outside spans.
Beginning spans always have a opening bracket and a closing asterisk (e.g. "(ARG-1*" )
and closing spans always have a closing bracket (e.g. "*)" ). This applies even for
length 1 spans, (e.g "(ARG-0*)").
A full example of the conversion performed:
[B-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, O]
[ "(ARG-1*", "*", "*", "*", "*)", "*"]
# Parameters
labels : `List[str]`, required.
A list of BIO tags to convert to the CONLL span based format.
# Returns
A list of labels in the CONLL span based format.
"""
sentence_length = len(labels)
conll_labels = []
for i, label in enumerate(labels):
if label == "O":
conll_labels.append("*")
continue
new_label = "*"
# Are we at the beginning of a new span, at the first word in the sentence,
# or is the label different from the previous one? If so, we are seeing a new label.
if label[0] == "B" or i == 0 or label[1:] != labels[i - 1][1:]:
new_label = "(" + label[2:] + new_label
# Are we at the end of the sentence, is the next word a new span, or is the next
# word not in a span? If so, we need to close the label span.
if i == sentence_length - 1 or labels[i + 1][0] == "B" or label[1:] != labels[i + 1][1:]:
new_label = new_label + ")"
conll_labels.append(new_label)
return conll_labels
@Model.register("srl")
class SemanticRoleLabeler(Model):
"""
This model performs semantic role labeling using BIO tags using Propbank semantic roles.
Specifically, it is an implementation of [Deep Semantic Role Labeling - What works
and what's next](https://www.aclweb.org/anthology/P17-1044).
This implementation is effectively a series of stacked interleaved LSTMs with highway
connections, applied to embedded sequences of words concatenated with a binary indicator
containing whether or not a word is the verbal predicate to generate predictions for in
the sentence. Additionally, during inference, Viterbi decoding is applied to constrain
the predictions to contain valid BIO sequences.
Specifically, the model expects and outputs IOB2-formatted tags, where the
B- tag is used in the beginning of every chunk (i.e. all chunks start with the B- tag).
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the `tokens` `TextField` we get as input to the model.
encoder : `Seq2SeqEncoder`
The encoder (with its own internal stacking) that we will use in between embedding tokens
and predicting output tags.
binary_feature_dim : `int`, required.
The dimensionality of the embedding of the binary verb predicate features.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
label_smoothing : `float`, optional (default = `0.0`)
Whether or not to use label smoothing on the labels when computing cross entropy loss.
ignore_span_metric : `bool`, optional (default = `False`)
Whether to calculate span loss, which is irrelevant when predicting BIO for Open Information Extraction.
srl_eval_path : `str`, optional (default=`DEFAULT_SRL_EVAL_PATH`)
The path to the srl-eval.pl script. By default, will use the srl-eval.pl included with allennlp,
which is located at allennlp/tools/srl-eval.pl . If `None`, srl-eval.pl is not used.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
binary_feature_dim: int,
embedding_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
label_smoothing: float = None,
ignore_span_metric: bool = False,
srl_eval_path: str = DEFAULT_SRL_EVAL_PATH,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
if srl_eval_path is not None:
# For the span based evaluation, we don't want to consider labels
# for verb, because the verb index is provided to the model.
self.span_metric = SrlEvalScorer(srl_eval_path, ignore_classes=["V"])
else:
self.span_metric = None
self.encoder = encoder
# There are exactly 2 binary features for the verb predicate embedding.
self.binary_feature_embedding = Embedding(
num_embeddings=2, embedding_dim=binary_feature_dim
)
self.tag_projection_layer = TimeDistributed(
Linear(self.encoder.get_output_dim(), self.num_classes)
)
self.embedding_dropout = Dropout(p=embedding_dropout)
self._label_smoothing = label_smoothing
self.ignore_span_metric = ignore_span_metric
check_dimensions_match(
text_field_embedder.get_output_dim() + binary_feature_dim,
encoder.get_input_dim(),
"text embedding dim + verb indicator embedding dim",
"encoder input dim",
)
initializer(self)
def forward( # type: ignore
self,
tokens: TextFieldTensors,
verb_indicator: torch.LongTensor,
tags: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`
tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens":
Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used
for the `TokenIndexers` when you created the `TextField` representing your
sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,
which knows how to combine different word representations into a single vector per
token in your input.
verb_indicator: `torch.LongTensor`, required.
An integer `SequenceFeatureField` representation of the position of the verb
in the sentence. This should have shape (batch_size, num_tokens) and importantly, can be
all zeros, in the case that the sentence has no verbal predicate.
tags : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer gold class labels
of shape `(batch_size, num_tokens)`
metadata : `List[Dict[str, Any]]`, optional, (default = `None`)
metadata containg the original words in the sentence and the verb to compute the
frame for, under 'words' and 'verb' keys, respectively.
# Returns
An output dictionary consisting of:
logits : `torch.FloatTensor`
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
unnormalised log probabilities of the tag classes.
class_probabilities : `torch.FloatTensor`
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
a distribution of the tag classes per word.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.embedding_dropout(self.text_field_embedder(tokens))
mask = get_text_field_mask(tokens)
embedded_verb_indicator = self.binary_feature_embedding(verb_indicator.long())
# Concatenate the verb feature onto the embedded text. This now
# has shape (batch_size, sequence_length, embedding_dim + binary_feature_dim).
embedded_text_with_verb_indicator = torch.cat(
[embedded_text_input, embedded_verb_indicator], -1
)
batch_size, sequence_length, _ = embedded_text_with_verb_indicator.size()
encoded_text = self.encoder(embedded_text_with_verb_indicator, mask)
logits = self.tag_projection_layer(encoded_text)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view(
[batch_size, sequence_length, self.num_classes]
)
output_dict = {"logits": logits, "class_probabilities": class_probabilities}
# We need to retain the mask in the output dictionary
# so that we can crop the sequences to remove padding
# when we do viterbi inference in self.make_output_human_readable.
output_dict["mask"] = mask
if tags is not None:
loss = sequence_cross_entropy_with_logits(
logits, tags, mask, label_smoothing=self._label_smoothing
)
if not self.ignore_span_metric and self.span_metric is not None and not self.training:
batch_verb_indices = [
example_metadata["verb_index"] for example_metadata in metadata
]
batch_sentences = [example_metadata["words"] for example_metadata in metadata]
# Get the BIO tags from make_output_human_readable()
# TODO (nfliu): This is kind of a hack, consider splitting out part
# of make_output_human_readable() to a separate function.
batch_bio_predicted_tags = self.make_output_human_readable(output_dict).pop("tags")
batch_conll_predicted_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_predicted_tags
]
batch_bio_gold_tags = [
example_metadata["gold_tags"] for example_metadata in metadata
]
batch_conll_gold_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_gold_tags
]
self.span_metric(
batch_verb_indices,
batch_sentences,
batch_conll_predicted_tags,
batch_conll_gold_tags,
)
output_dict["loss"] = loss
words, verbs = zip(*[(x["words"], x["verb"]) for x in metadata])
if metadata is not None:
output_dict["words"] = list(words)
output_dict["verb"] = list(verbs)
return output_dict
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does constrained viterbi decoding on class probabilities output in :func:`forward`. The
constraint simply specifies that the output tags must be a valid BIO sequence. We add a
`"tags"` key to the dictionary with the result.
"""
all_predictions = output_dict["class_probabilities"]
sequence_lengths = get_lengths_from_binary_sequence_mask(output_dict["mask"]).data.tolist()
if all_predictions.dim() == 3:
predictions_list = [
all_predictions[i].detach().cpu() for i in range(all_predictions.size(0))
]
else:
predictions_list = [all_predictions]
all_tags = []
transition_matrix = self.get_viterbi_pairwise_potentials()
start_transitions = self.get_start_transitions()
for predictions, length in zip(predictions_list, sequence_lengths):
max_likelihood_sequence, _ = viterbi_decode(
predictions[:length], transition_matrix, allowed_start_transitions=start_transitions
)
tags = [
self.vocab.get_token_from_index(x, namespace="labels")
for x in max_likelihood_sequence
]
all_tags.append(tags)
output_dict["tags"] = all_tags
return output_dict
def get_metrics(self, reset: bool = False):
if self.ignore_span_metric:
# Return an empty dictionary if ignoring the
# span metric
return {}
else:
metric_dict = self.span_metric.get_metric(reset=reset)
# This can be a lot of metrics, as there are 3 per class.
# we only really care about the overall metrics, so we filter for them here.
return {x: y for x, y in metric_dict.items() if "overall" in x}
def get_viterbi_pairwise_potentials(self):
"""
Generate a matrix of pairwise transition potentials for the BIO labels.
The only constraint implemented here is that I-XXX labels must be preceded
by either an identical I-XXX tag or a B-XXX tag. In order to achieve this
constraint, pairs of labels which do not satisfy this constraint have a
pairwise potential of -inf.
# Returns
transition_matrix : `torch.Tensor`
A `(num_labels, num_labels)` matrix of pairwise potentials.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
transition_matrix = torch.zeros([num_labels, num_labels])
for i, previous_label in all_labels.items():
for j, label in all_labels.items():
# I labels can only be preceded by themselves or
# their corresponding B tag.
if i != j and label[0] == "I" and not previous_label == "B" + label[1:]:
transition_matrix[i, j] = float("-inf")
return transition_matrix
def get_start_transitions(self):
"""
In the BIO sequence, we cannot start the sequence with an I-XXX tag.
This transition sequence is passed to viterbi_decode to specify this constraint.
# Returns
start_transitions : `torch.Tensor`
The pairwise potentials between a START token and
the first token of the sequence.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
start_transitions = torch.zeros(num_labels)
for i, label in all_labels.items():
if label[0] == "I":
start_transitions[i] = float("-inf")
return start_transitions
default_predictor = "semantic_role_labeling"
def write_to_conll_eval_file(
prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
prediction: List[str],
gold_labels: List[str],
):
"""
.. deprecated:: 0.8.4
The `write_to_conll_eval_file` function was deprecated in favor of the
identical `write_bio_formatted_tags_to_file` in version 0.8.4.
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
The CoNLL SRL format is described in
[the shared task data README](https://www.lsi.upc.edu/~srlconll/conll05st-release/README).
This function expects IOB2-formatted tags, where the B- tag is used in the beginning
of every chunk (i.e. all chunks start with the B- tag).
# Parameters
prediction_file : `TextIO`, required.
A file reference to print predictions to.
gold_file : `TextIO`, required.
A file reference to print gold labels to.
verb_index : `Optional[int]`, required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : `List[str]`, required.
The word tokens.
prediction : `List[str]`, required.
The predicted BIO labels.
gold_labels : `List[str]`, required.
The gold BIO labels.
"""
warnings.warn(
"The 'write_to_conll_eval_file' function has been deprecated in favor of "
"the identical 'write_bio_formatted_tags_to_file' function.",
DeprecationWarning,
)
write_bio_formatted_tags_to_file(
prediction_file, gold_file, verb_index, sentence, prediction, gold_labels
)
| allennlp-models-main | allennlp_models/structured_prediction/models/srl.py |
# flake8: noqa: F403
from allennlp_models.lm.dataset_readers import *
from allennlp_models.lm.models import *
from allennlp_models.lm.modules import *
from allennlp_models.lm.predictors import *
from allennlp_models.lm.util import *
| allennlp-models-main | allennlp_models/lm/__init__.py |
from .beam_search_generators import * # noqa: F403
| allennlp-models-main | allennlp_models/lm/util/__init__.py |
from typing import Dict, List, Optional
import torch
from allennlp.data import TextFieldTensors
from allennlp.modules.text_field_embedders import TextFieldEmbedder, BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from .beam_search_generator import BeamSearchGenerator
@BeamSearchGenerator.register("transformer")
class TransformerBeamSearchGenerator(BeamSearchGenerator):
"""
A `BeamSearchGenerator` for transformer-based `NextTokenLM` models.
This can be used with any `NextTokenLM` that utilizes a single `pretrained_transformer`
`TokenEmbedder` for it's `text_field_embedder`.
"""
def __init__(self, *args, namespace: str = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._namespace: Optional[str] = namespace
def validate_text_field_embedder(self, text_field_embedder: TextFieldEmbedder):
assert isinstance(text_field_embedder, BasicTextFieldEmbedder)
assert len(text_field_embedder._token_embedders) == 1
key = list(text_field_embedder._token_embedders.keys())[0]
assert isinstance(text_field_embedder._token_embedders[key], PretrainedTransformerEmbedder)
self._namespace = key
def prepare_step_input(
self, predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> TextFieldTensors:
# Add `predicted_tokens` to `state["token_ids"]` and expand `state["mask"]`.
new_token_ids: List[torch.Tensor] = []
new_mask: List[torch.Tensor] = []
for instance_token_ids, instance_mask, prediction in zip(
state["token_ids"], state["mask"], predictions
):
# Shape: (?,)
masked_out = (instance_mask == False).nonzero(as_tuple=False).squeeze(-1) # noqa: E712
if masked_out.size()[0] > 0:
first_mask_index = masked_out[0].item()
else:
first_mask_index = instance_token_ids.size()[0]
# Shape: (batch_size, num_tokens + 1)
new_instance_token_ids = torch.cat(
[
instance_token_ids[0:first_mask_index],
prediction.unsqueeze(0),
instance_token_ids[first_mask_index:],
],
dim=-1,
)
# Shape: (batch_size, num_tokens + 1)
new_instance_mask = torch.cat(
[
instance_mask[0:first_mask_index],
torch.tensor([True], device=instance_mask.device),
instance_mask[first_mask_index:],
],
dim=-1,
)
new_token_ids.append(new_instance_token_ids)
new_mask.append(new_instance_mask)
state["token_ids"] = torch.stack(new_token_ids, 0)
state["mask"] = torch.stack(new_mask, 0)
# Expand `state["type_ids"]` by 1 in the last dimension, just repeating whatever the last
# value is.
# Shape: (group_size, num_tokens)
type_ids = state.pop("type_ids")
# Shape: (group_size, num_tokens + 1)
state["type_ids"] = torch.cat([type_ids, type_ids[:, -1].unsqueeze(-1)], dim=-1)
# The model expect input in the form of TextFieldTensors, which just has another
# nested layer like this:
assert self._namespace is not None, (
"token embedder namespace could not be inferred, "
"did you forget to call 'validate_text_field_embedder()'?"
)
inputs = {self._namespace: state}
return inputs
| allennlp-models-main | allennlp_models/lm/util/beam_search_generators/transformer_beam_search_generator.py |
from .beam_search_generator import BeamSearchGenerator
from .transformer_beam_search_generator import TransformerBeamSearchGenerator
| allennlp-models-main | allennlp_models/lm/util/beam_search_generators/__init__.py |
from typing import Dict, Tuple
import torch
from allennlp.common.registrable import Registrable
from allennlp.data import TextFieldTensors
from allennlp.modules import TextFieldEmbedder
from allennlp.nn.beam_search import BeamSearch, StepFunctionType
class BeamSearchGenerator(Registrable):
"""
A beam search generator for next token language models.
This is just a wrapper around `allennlp.nn.beam_search.BeamSearch` with custom
logic for handling the `state` dict.
The reason we need this is because the step function that `BeamSearch` uses
needs to know how to handle different `TextFieldTensors`, the form of which
depends on the exact embedder class that the `NextTokenLm` uses.
So essentially we need a different `BeamSearchGenerator` implementation
for each different `text_field_embedder`.
"""
def __init__(self, beam_search: BeamSearch):
self._beam_search = beam_search
def validate_text_field_embedder(self, text_field_embedder: TextFieldEmbedder):
"""
This should be called after initialization to verify that the model's
`text_field_embedder` is compatable.
"""
raise NotImplementedError
def get_step_state(self, inputs: TextFieldTensors) -> Dict[str, torch.Tensor]:
"""
Create a `state` dictionary for `BeamSearch` from the `TextFieldTensors` inputs
to the `NextTokenLm` model.
By default this assumes the `TextFieldTensors` has a single `TokenEmbedder`,
and just "flattens" the `TextFieldTensors` by returning the `TokenEmbedder`
sub-dictionary.
If you have `TextFieldTensors` with more than one `TokenEmbedder` sub-dictionary,
you'll need to override this class.
"""
assert len(inputs) == 1, (
"'get_step_state()' assumes a single token embedder by default, "
"you'll need to override this method to handle more than one"
)
key = list(inputs.keys())[0]
# We can't just `return inputs[key]` because we might want to modify the state
# dictionary (add or remove fields) without accidentally modifying the inputs
# dictionary.
return {k: v for (k, v) in inputs[key].items()}
def prepare_step_input(
self, predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> TextFieldTensors:
"""
This is like the reverse of `get_step_state()`.
It takes `predictions` and `state` from the current step and returns
a `TextFieldTensors` dictionary that can be fed through the embedder of the `NextTokenLm`
model.
This usually involves adding the predicted tokens to the proper field of the `state` dict,
and expanding any mask tensors or other context tensors by 1 in the right dimension,
and then unflattening the `state` so that it looks like a `TextFieldTensors` dict.
"""
raise NotImplementedError
def search(
self,
start_predictions: torch.Tensor,
state: Dict[str, torch.Tensor],
step_function: StepFunctionType,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calls `BeamSearch.search`, return the top predicted indices and corresponding
log probabilities.
"""
# Shape (top_indices): (batch_size, beam_size, num_predicted_tokens)
# Shape (top_log_probs): (batch_size, beam_size)
top_indices, top_log_probs = self._beam_search.search(
start_predictions, state, step_function
)
return top_indices, top_log_probs
| allennlp-models-main | allennlp_models/lm/util/beam_search_generators/beam_search_generator.py |
from typing import Dict, List
import logging
import copy
from allennlp.data.instance import Instance
from allennlp.data.tokenizers.tokenizer import Tokenizer
from allennlp.data.tokenizers import Token
from allennlp.data.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.fields import IndexField, Field, ListField, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("masked_language_modeling")
class MaskedLanguageModelingReader(DatasetReader):
"""
Reads a text file and converts it into a `Dataset` suitable for training a masked language
model.
The :class:`Field` s that we create are the following: an input `TextField`, a mask position
`ListField[IndexField]`, and a target token `TextField` (the target tokens aren't a single
string of text, but we use a `TextField` so we can index the target tokens the same way as
our input, typically with a single `PretrainedTransformerIndexer`). The mask position and
target token lists are the same length.
NOTE: This is not fully functional! It was written to put together a demo for interpreting and
attacking masked language modeling, not for actually training anything. `text_to_instance`
is functional, but `_read` is not. To make this fully functional, you would want some
sampling strategies for picking the locations for [MASK] tokens, and probably a bunch of
efficiency / multi-processing stuff.
# Parameters
tokenizer : `Tokenizer`, optional (default=`WhitespaceTokenizer()`)
We use this `Tokenizer` for the text. See :class:`Tokenizer`.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text, and to get ids for the mask
targets. See :class:`TokenIndexer`.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or WhitespaceTokenizer()
# temporary hack to not to add special tokens
self._targets_tokenizer: Tokenizer
if isinstance(self._tokenizer, PretrainedTransformerTokenizer):
self._targets_tokenizer = copy.copy(self._tokenizer)
self._targets_tokenizer._add_special_tokens = False
else:
self._targets_tokenizer = self._tokenizer
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path: str):
import sys
# You can call pytest with either `pytest` or `py.test`.
if "test" not in sys.argv[0]:
logger.error("_read is only implemented for unit tests at the moment")
with open(file_path, "r") as text_file:
for sentence in text_file:
tokens = self._tokenizer.tokenize(sentence)
target = tokens[0].text
tokens[0] = Token("[MASK]")
yield self.text_to_instance(sentence, tokens, [target])
def text_to_instance(
self, # type: ignore
sentence: str = None,
tokens: List[Token] = None,
targets: List[str] = None,
) -> Instance:
"""
# Parameters
sentence : `str`, optional
A sentence containing [MASK] tokens that should be filled in by the model. This input
is superceded and ignored if `tokens` is given.
tokens : `List[Token]`, optional
An already-tokenized sentence containing some number of [MASK] tokens to be predicted.
targets : `List[str]`, optional
Contains the target tokens to be predicted. The length of this list should be the same
as the number of [MASK] tokens in the input.
"""
if not tokens:
tokens = self._tokenizer.tokenize(sentence)
input_field = TextField(tokens, self._token_indexers)
mask_positions = []
for i, token in enumerate(tokens):
if token.text == "[MASK]":
mask_positions.append(i)
if not mask_positions:
raise ValueError("No [MASK] tokens found!")
if targets and len(targets) != len(mask_positions):
raise ValueError(f"Found {len(mask_positions)} mask tokens and {len(targets)} targets")
mask_position_field = ListField([IndexField(i, input_field) for i in mask_positions])
fields: Dict[str, Field] = {"tokens": input_field, "mask_positions": mask_position_field}
# TODO(mattg): there's a problem if the targets get split into multiple word pieces...
# (maksym-del): if we index word that was not split into wordpieces with
# PretrainedTransformerTokenizer we will get OOV token ID...
# Until this is handeled, let's use first wordpiece id for each token since tokens should contain text_ids
# to be indexed with PretrainedTokenIndexer. It also requeires hack to avoid adding special tokens...
if targets is not None:
# target_field = TextField([Token(target) for target in targets], self._token_indexers)
first_wordpieces = [self._targets_tokenizer.tokenize(target)[0] for target in targets]
target_tokens = []
for wordpiece, target in zip(first_wordpieces, targets):
target_tokens.append(
Token(text=target, text_id=wordpiece.text_id, type_id=wordpiece.type_id)
)
fields["target_ids"] = TextField(target_tokens, self._token_indexers)
return Instance(fields)
| allennlp-models-main | allennlp_models/lm/dataset_readers/masked_language_model.py |
from typing import Dict, Iterable, Union, Optional, List
import logging
import math
from allennlp.data.tokenizers import Token
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.tokenizers import SpacyTokenizer
from allennlp.data.tokenizers.tokenizer import Tokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("simple_language_modeling")
class SimpleLanguageModelingDatasetReader(DatasetReader):
"""
Reads sentences, one per line, for language modeling. This does not handle arbitrarily formatted
text with sentences spanning multiple lines.
# Parameters
tokenizer : `Tokenizer`, optional
Tokenizer to use to split the input sentences into words or other kinds of tokens. Defaults
to `SpacyTokenizer()`.
token_indexers : `Dict[str, TokenIndexer]`, optional
Indexers used to define input token representations. Defaults to
`{"tokens": SingleIdTokenIndexer()}`.
max_sequence_length : `int`, optional
If specified, sentences with more than this number of tokens will be dropped.
start_tokens : `List[str]`, optional (default=`None`)
These are prepended to the tokens provided to the `TextField`.
end_tokens : `List[str]`, optional (default=`None`)
These are appended to the tokens provided to the `TextField`.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_sequence_length: int = None,
start_tokens: List[str] = None,
end_tokens: List[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
if max_sequence_length is not None:
self._max_sequence_length: Union[float, Optional[int]] = max_sequence_length
else:
self._max_sequence_length = math.inf
self._start_tokens = [Token(st) for st in (start_tokens or [])]
self._end_tokens = [Token(et) for et in (end_tokens or [])]
logger.info("Creating SimpleLanguageModelingDatasetReader")
logger.info("max_sequence_length=%s", max_sequence_length)
def text_to_instance(
self, # type: ignore
sentence: str,
) -> Instance:
tokenized = self._tokenizer.tokenize(sentence)
tokenized_with_ends = []
tokenized_with_ends.extend(self._start_tokens)
tokenized_with_ends.extend(tokenized)
tokenized_with_ends.extend(self._end_tokens)
return_instance = Instance({"source": TextField(tokenized_with_ends, self._token_indexers)})
return return_instance
def _read(self, file_path: str) -> Iterable[Instance]:
logger.info("Loading data from %s", file_path)
dropped_instances = 0
with open(file_path) as file:
for sentence in file:
instance = self.text_to_instance(sentence)
if instance.fields["source"].sequence_length() <= self._max_sequence_length:
yield instance
else:
dropped_instances += 1
if not dropped_instances:
logger.info(f"No instances dropped from {file_path}.")
else:
logger.warning(f"Dropped {dropped_instances} instances from {file_path}.")
| allennlp-models-main | allennlp_models/lm/dataset_readers/simple_language_modeling.py |
from allennlp_models.lm.dataset_readers.masked_language_model import MaskedLanguageModelingReader
from allennlp_models.lm.dataset_readers.next_token_lm import NextTokenLMReader
from allennlp_models.lm.dataset_readers.simple_language_modeling import (
SimpleLanguageModelingDatasetReader,
)
| allennlp-models-main | allennlp_models/lm/dataset_readers/__init__.py |
from typing import Dict, List, cast
import logging
import copy
from allennlp.data.instance import Instance
from allennlp.data.tokenizers.tokenizer import Tokenizer
from allennlp.data.tokenizers import Token
from allennlp.data.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.fields import Field, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("next_token_lm")
class NextTokenLMReader(DatasetReader):
"""
Creates `Instances` suitable for use in predicting a single next token using a language
model. The :class:`Field` s that we create are the following: an input `TextField` and a
target token `TextField` (we only ver have a single token, but we use a `TextField` so we
can index it the same way as our input, typically with a single
`PretrainedTransformerIndexer`).
NOTE: This is not fully functional! It was written to put together a demo for interpreting and
attacking language models, not for actually training anything. It would be a really bad idea
to use this setup for training language models, as it would be incredibly inefficient. The
only purpose of this class is for a demo.
# Parameters
tokenizer : `Tokenizer`, optional (default=`WhitespaceTokenizer()`)
We use this `Tokenizer` for the text. See :class:`Tokenizer`.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text, and to get ids for the mask
targets. See :class:`TokenIndexer`.
max_tokens : `int`, optional (default = `None`)
If you don't handle truncation at the `tokenizer` level, you can specify `max_tokens`
here, and the only the last `max_tokens` will be used.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or WhitespaceTokenizer()
self._targets_tokenizer: Tokenizer
if isinstance(self._tokenizer, PretrainedTransformerTokenizer):
self._targets_tokenizer = copy.copy(self._tokenizer)
self._targets_tokenizer._add_special_tokens = False
else:
self._targets_tokenizer = self._tokenizer
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._max_tokens = max_tokens
def _read(self, file_path: str):
import sys
# You can call pytest with either `pytest` or `py.test`.
if "test" not in sys.argv[0]:
logger.error(
"_read is only implemented for unit tests. You should not actually "
"try to train or evaluate a language model with this code."
)
with open(file_path, "r") as text_file:
for sentence in text_file:
tokens = self._tokenizer.tokenize(sentence)
target = "the"
yield self.text_to_instance(sentence, tokens, target)
def text_to_instance(
self, # type: ignore
sentence: str = None,
tokens: List[Token] = None,
target: str = None,
) -> Instance:
if tokens is None and sentence is not None:
tokens = self._tokenizer.tokenize(sentence)
elif sentence is None:
raise ValueError("expected either 'sentence' or 'tokens' to not be null")
tokens = cast(List[Token], tokens)
if self._max_tokens is not None:
tokens = tokens[-self._max_tokens :]
input_field = TextField(tokens, self._token_indexers)
fields: Dict[str, Field] = {"tokens": input_field}
# TODO: if we index word that was not split into wordpieces with
# PretrainedTransformerTokenizer we will get OOV token ID...
# Until this is handeled, let's use first wordpiece id for each token since tokens should contain text_ids
# to be indexed with PretrainedTokenIndexer. It also requeires hack to avoid adding special tokens...
if target:
wordpiece = self._targets_tokenizer.tokenize(target)[0]
target_token = Token(text=target, text_id=wordpiece.text_id, type_id=wordpiece.type_id)
fields["target_ids"] = TextField([target_token], self._token_indexers)
return Instance(fields)
| allennlp-models-main | allennlp_models/lm/dataset_readers/next_token_lm.py |
from typing import Dict
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import Instance, Token
from allennlp.data.fields import TextField
from allennlp.predictors.predictor import Predictor
@Predictor.register("masked_language_model")
class MaskedLanguageModelPredictor(Predictor):
def predict(self, sentence_with_masks: str) -> JsonDict:
return self.predict_json({"sentence": sentence_with_masks})
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
):
new_instance = instance.duplicate()
token_field: TextField = instance["tokens"] # type: ignore
mask_targets = [Token(target_top_k[0]) for target_top_k in outputs["words"]]
new_instance.add_field(
"target_ids",
TextField(mask_targets, token_field._token_indexers),
vocab=self._model.vocab,
)
return [new_instance]
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"sentence": "..."}`.
"""
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence=sentence) # type: ignore
| allennlp-models-main | allennlp_models/lm/predictors/masked_language_model.py |
from allennlp_models.lm.predictors.masked_language_model import MaskedLanguageModelPredictor
from allennlp_models.lm.predictors.next_token_lm import NextTokenLMPredictor
| allennlp-models-main | allennlp_models/lm/predictors/__init__.py |
from typing import Dict
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import Instance, Token
from allennlp.data.fields import TextField
from allennlp.predictors.predictor import Predictor
@Predictor.register("next_token_lm")
class NextTokenLMPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
):
new_instance = instance.duplicate()
token_field: TextField = instance["tokens"] # type: ignore
mask_targets = [Token(target_top_k[0]) for target_top_k in outputs["top_tokens"][0]]
new_instance.add_field(
"target_ids",
TextField(mask_targets, token_field._token_indexers),
vocab=self._model.vocab,
)
return [new_instance]
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"sentence": "..."}`.
"""
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence=sentence) # type: ignore
| allennlp-models-main | allennlp_models/lm/predictors/next_token_lm.py |
from typing import Dict, List, Tuple, Union
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import SoftmaxLoss
from allennlp.modules.text_field_embedders import TextFieldEmbedder
from allennlp.modules.sampled_softmax_loss import SampledSoftmaxLoss
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder
from allennlp.nn.util import get_text_field_mask
from allennlp.nn import InitializerApplicator
from allennlp.training.metrics import Perplexity
@Model.register("language_model")
class LanguageModel(Model):
"""
The `LanguageModel` applies a "contextualizing"
`Seq2SeqEncoder` to uncontextualized embeddings, using a `SoftmaxLoss`
module (defined above) to compute the language modeling loss.
If bidirectional is True, the language model is trained to predict the next and
previous tokens for each token in the input. In this case, the contextualizer must
be bidirectional. If bidirectional is False, the language model is trained to only
predict the next token for each token in the input; the contextualizer should also
be unidirectional.
If your language model is bidirectional, it is IMPORTANT that your bidirectional
`Seq2SeqEncoder` contextualizer does not do any "peeking ahead". That is, for its
forward direction it should only consider embeddings at previous timesteps, and for
its backward direction only embeddings at subsequent timesteps. Similarly, if your
language model is unidirectional, the unidirectional contextualizer should only
consider embeddings at previous timesteps. If this condition is not met, your
language model is cheating.
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the indexed tokens we get in `forward`.
contextualizer : `Seq2SeqEncoder`
Used to "contextualize" the embeddings. As described above,
this encoder must not cheat by peeking ahead.
dropout : `float`, optional (default = `None`)
If specified, dropout is applied to the contextualized embeddings before computation of
the softmax. The contextualized embeddings themselves are returned without dropout.
num_samples : `int`, optional (default = `None`)
If provided, the model will use `SampledSoftmaxLoss`
with the specified number of samples. Otherwise, it will use
the full `_SoftmaxLoss` defined above.
sparse_embeddings : `bool`, optional (default = `False`)
Passed on to `SampledSoftmaxLoss` if True.
bidirectional : `bool`, optional (default = `False`)
Train a bidirectional language model, where the contextualizer
is used to predict the next and previous token for each input token.
This must match the bidirectionality of the contextualizer.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
contextualizer: Seq2SeqEncoder,
dropout: float = None,
num_samples: int = None,
sparse_embeddings: bool = False,
bidirectional: bool = False,
initializer: InitializerApplicator = None,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
if contextualizer.is_bidirectional() is not bidirectional:
raise ConfigurationError(
"Bidirectionality of contextualizer must match bidirectionality of "
"language model. "
f"Contextualizer bidirectional: {contextualizer.is_bidirectional()}, "
f"language model bidirectional: {bidirectional}"
)
self._contextualizer = contextualizer
self._bidirectional = bidirectional
# The dimension for making predictions just in the forward
# (or backward) direction.
if self._bidirectional:
self._forward_dim = contextualizer.get_output_dim() // 2
else:
self._forward_dim = contextualizer.get_output_dim()
# TODO(joelgrus): more sampled softmax configuration options, as needed.
if num_samples is not None:
self._softmax_loss = SampledSoftmaxLoss(
num_words=vocab.get_vocab_size(),
embedding_dim=self._forward_dim,
num_samples=num_samples,
sparse=sparse_embeddings,
)
else:
self._softmax_loss = SoftmaxLoss(
num_words=vocab.get_vocab_size(), embedding_dim=self._forward_dim
)
# This buffer is now unused and exists only for backwards compatibility reasons.
self.register_buffer("_last_average_loss", torch.zeros(1))
self._perplexity = Perplexity()
if dropout:
self._dropout = torch.nn.Dropout(dropout)
else:
self._dropout = lambda x: x
if initializer is not None:
initializer(self)
def _get_target_token_embeddings(
self, token_embeddings: torch.Tensor, mask: torch.BoolTensor, direction: int
) -> torch.Tensor:
# Need to shift the mask in the correct direction
zero_col = token_embeddings.new_zeros(mask.size(0), 1).to(dtype=torch.bool)
if direction == 0:
# forward direction, get token to right
shifted_mask = torch.cat([zero_col, mask[:, 0:-1]], dim=1)
else:
shifted_mask = torch.cat([mask[:, 1:], zero_col], dim=1)
return token_embeddings.masked_select(shifted_mask.unsqueeze(-1)).view(
-1, self._forward_dim
)
def _compute_loss(
self,
lm_embeddings: torch.Tensor,
token_embeddings: torch.Tensor,
forward_targets: torch.Tensor,
backward_targets: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# If bidirectional, lm_embeddings is shape (batch_size, timesteps, dim * 2)
# If unidirectional, lm_embeddings is shape (batch_size, timesteps, dim)
# forward_targets, backward_targets (None in the unidirectional case) are
# shape (batch_size, timesteps) masked with 0
if self._bidirectional:
forward_embeddings, backward_embeddings = lm_embeddings.chunk(2, -1)
backward_loss = self._loss_helper(
1, backward_embeddings, backward_targets, token_embeddings
)
else:
forward_embeddings = lm_embeddings
backward_loss = None
forward_loss = self._loss_helper(0, forward_embeddings, forward_targets, token_embeddings)
return forward_loss, backward_loss
def _loss_helper(
self,
direction: int,
direction_embeddings: torch.Tensor,
direction_targets: torch.Tensor,
token_embeddings: torch.Tensor,
) -> Tuple[int, int]:
mask = direction_targets > 0
# we need to subtract 1 to undo the padding id since the softmax
# does not include a padding dimension
# shape (batch_size * timesteps, )
non_masked_targets = direction_targets.masked_select(mask) - 1
# shape (batch_size * timesteps, embedding_dim)
non_masked_embeddings = direction_embeddings.masked_select(mask.unsqueeze(-1)).view(
-1, self._forward_dim
)
# note: need to return average loss across forward and backward
# directions, but total sum loss across all batches.
# Assuming batches include full sentences, forward and backward
# directions have the same number of samples, so sum up loss
# here then divide by 2 just below
if not self._softmax_loss.tie_embeddings or not self._use_character_inputs:
return self._softmax_loss(non_masked_embeddings, non_masked_targets)
else:
# we also need the token embeddings corresponding to the
# the targets
raise NotImplementedError(
"This requires SampledSoftmaxLoss, which isn't implemented yet."
)
non_masked_token_embeddings = self._get_target_token_embeddings(
token_embeddings, mask, direction
)
return self._softmax(
non_masked_embeddings, non_masked_targets, non_masked_token_embeddings
)
def delete_softmax(self) -> None:
"""
Remove the softmax weights. Useful for saving memory when calculating the loss
is not necessary, e.g. in an embedder.
"""
self._softmax_loss = None
def num_layers(self) -> int:
"""
Returns the depth of this LM. That is, how many layers the contextualizer has plus one for
the non-contextual layer.
"""
if hasattr(self._contextualizer, "num_layers"):
return self._contextualizer.num_layers + 1
else:
raise NotImplementedError(
f"Contextualizer of type {type(self._contextualizer)} "
+ "does not report how many layers it has."
)
def forward(self, source: TextFieldTensors) -> Dict[str, torch.Tensor]: # type: ignore
"""
Computes the averaged forward (and backward, if language model is bidirectional)
LM loss from the batch.
# Parameters
source : `TextFieldTensors`, required.
The output of `Batch.as_tensor_dict()` for a batch of sentences. By convention,
it's required to have at least a `"tokens"` entry that's the output of a
`SingleIdTokenIndexer`, which is used to compute the language model targets.
# Returns
Dict with keys:
`'loss'` : `torch.Tensor`
forward negative log likelihood, or the average of forward/backward
if language model is bidirectional
`'forward_loss'` : `torch.Tensor`
forward direction negative log likelihood
`'backward_loss'` : `torch.Tensor` or `None`
backward direction negative log likelihood. If language model is not
bidirectional, this is `None`.
`'lm_embeddings'` : `Union[torch.Tensor, List[torch.Tensor]]`
(batch_size, timesteps, embed_dim) tensor of top layer contextual representations or
list of all layers. No dropout applied.
`'noncontextual_token_embeddings'` : `torch.Tensor`
(batch_size, timesteps, token_embed_dim) tensor of bottom layer noncontextual
representations
`'mask'` : `torch.BoolTensor`
(batch_size, timesteps) mask for the embeddings
"""
mask = get_text_field_mask(source)
# shape (batch_size, timesteps, embedding_size)
embeddings = self._text_field_embedder(source)
# Either the top layer or all layers.
contextual_embeddings: Union[torch.Tensor, List[torch.Tensor]] = self._contextualizer(
embeddings, mask
)
return_dict = {}
# If we have target tokens, calculate the loss.
token_id_dict = source.get("tokens")
if token_id_dict is not None:
token_ids = token_id_dict["tokens"]
assert isinstance(contextual_embeddings, torch.Tensor)
# Use token_ids to compute targets
forward_targets = torch.zeros_like(token_ids)
forward_targets[:, 0:-1] = token_ids[:, 1:]
if self._bidirectional:
backward_targets = torch.zeros_like(token_ids)
backward_targets[:, 1:] = token_ids[:, 0:-1]
else:
backward_targets = None
# add dropout
contextual_embeddings_with_dropout = self._dropout(contextual_embeddings)
# compute softmax loss
forward_loss, backward_loss = self._compute_loss(
contextual_embeddings_with_dropout, embeddings, forward_targets, backward_targets
)
num_targets = torch.sum((forward_targets > 0).long())
if num_targets > 0:
if self._bidirectional:
average_loss = 0.5 * (forward_loss + backward_loss) / num_targets.float()
else:
average_loss = forward_loss / num_targets.float()
else:
average_loss = torch.tensor(0.0).to(forward_targets.device)
self._perplexity(average_loss)
if num_targets > 0:
return_dict.update(
{
"loss": average_loss,
"forward_loss": forward_loss / num_targets.float(),
"batch_weight": num_targets.float(),
}
)
if backward_loss is not None:
return_dict["backward_loss"] = backward_loss / num_targets.float()
else:
# average_loss zero tensor, return it for all
return_dict.update({"loss": average_loss, "forward_loss": average_loss})
if backward_loss is not None:
return_dict["backward_loss"] = average_loss
return_dict.update(
{
# Note: These embeddings do not have dropout applied.
"lm_embeddings": contextual_embeddings,
"noncontextual_token_embeddings": embeddings,
"mask": mask,
}
)
return return_dict
def get_metrics(self, reset: bool = False):
return {"perplexity": self._perplexity.get_metric(reset=reset)}
| allennlp-models-main | allennlp_models/lm/models/language_model.py |
from typing import Dict
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import util, InitializerApplicator
from allennlp.training.metrics import Perplexity
from allennlp_models.lm.modules.language_model_heads import LanguageModelHead
@Model.register("masked_language_model")
class MaskedLanguageModel(Model):
"""
The `MaskedLanguageModel` embeds some input tokens (including some which are masked),
contextualizes them, then predicts targets for the masked tokens, computing a loss against
known targets.
NOTE: This was developed for use in a demo, not for training. It's possible that it will still
work for training a masked LM, but it is very likely that some other code would be much more
efficient for that. This `does` compute correct gradients of the loss, because we use that in
our demo, so in principle it should be able to train a model, we just don't necessarily endorse
that use.
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the indexed tokens we get in `forward`.
language_model_head : `LanguageModelHead`
The `torch.nn.Module` that goes from the hidden states output by the contextualizer to
logits over some output vocabulary.
contextualizer : `Seq2SeqEncoder`, optional (default=`None`)
Used to "contextualize" the embeddings. This is optional because the contextualization
might actually be done in the text field embedder.
target_namespace : `str`, optional (default=`'bert'`)
Namespace to use to convert predicted token ids to strings in
`Model.make_output_human_readable`.
dropout : `float`, optional (default=`0.0`)
If specified, dropout is applied to the contextualized embeddings before computation of
the softmax. The contextualized embeddings themselves are returned without dropout.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
language_model_head: LanguageModelHead,
contextualizer: Seq2SeqEncoder = None,
target_namespace: str = "bert",
dropout: float = 0.0,
initializer: InitializerApplicator = None,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
self._contextualizer = contextualizer
if contextualizer:
check_dimensions_match(
text_field_embedder.get_output_dim(),
contextualizer.get_input_dim(),
"text field embedder output",
"contextualizer input",
)
self._language_model_head = language_model_head
self._target_namespace = target_namespace
self._perplexity = Perplexity()
self._dropout = torch.nn.Dropout(dropout)
if initializer is not None:
initializer(self)
def forward( # type: ignore
self,
tokens: TextFieldTensors,
mask_positions: torch.BoolTensor,
target_ids: TextFieldTensors = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`
The output of `TextField.as_tensor()` for a batch of sentences.
mask_positions : `torch.LongTensor`
The positions in `tokens` that correspond to [MASK] tokens that we should try to fill
in. Shape should be (batch_size, num_masks).
target_ids : `TextFieldTensors`
This is a list of token ids that correspond to the mask positions we're trying to fill.
It is the output of a `TextField`, purely for convenience, so we can handle wordpiece
tokenizers and such without having to do crazy things in the dataset reader. We assume
that there is exactly one entry in the dictionary, and that it has a shape identical to
`mask_positions` - one target token per mask position.
"""
targets = None
if target_ids is not None:
targets = util.get_token_ids_from_text_field_tensors(target_ids)
mask_positions = mask_positions.squeeze(-1)
batch_size, num_masks = mask_positions.size()
if targets is not None and targets.size() != mask_positions.size():
raise ValueError(
f"Number of targets ({targets.size()}) and number of masks "
f"({mask_positions.size()}) are not equal"
)
# Shape: (batch_size, num_tokens, embedding_dim)
embeddings = self._text_field_embedder(tokens)
# Shape: (batch_size, num_tokens, encoding_dim)
if self._contextualizer:
mask = util.get_text_field_mask(embeddings)
contextual_embeddings = self._contextualizer(embeddings, mask)
else:
contextual_embeddings = embeddings
# Does advanced indexing to get the embeddings of just the mask positions, which is what
# we're trying to predict.
batch_index = torch.arange(0, batch_size).long().unsqueeze(1)
mask_embeddings = contextual_embeddings[batch_index, mask_positions]
target_logits = self._language_model_head(self._dropout(mask_embeddings))
vocab_size = target_logits.size(-1)
probs = torch.nn.functional.softmax(target_logits, dim=-1)
k = min(vocab_size, 5) # min here largely because tests use small vocab
top_probs, top_indices = probs.topk(k=k, dim=-1)
output_dict = {"probabilities": top_probs, "top_indices": top_indices}
output_dict["token_ids"] = util.get_token_ids_from_text_field_tensors(tokens)
if targets is not None:
target_logits = target_logits.view(batch_size * num_masks, vocab_size)
targets = targets.view(batch_size * num_masks)
loss = torch.nn.functional.cross_entropy(target_logits, targets)
self._perplexity(loss)
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False):
return {"perplexity": self._perplexity.get_metric(reset=reset)}
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
top_words = []
for instance_indices in output_dict["top_indices"]:
top_words.append(
[
[
self.vocab.get_token_from_index(
index.item(), namespace=self._target_namespace
)
for index in mask_positions
]
for mask_positions in instance_indices
]
)
output_dict["words"] = top_words
tokens = []
for instance_tokens in output_dict["token_ids"]:
tokens.append(
[
self.vocab.get_token_from_index(
token_id.item(), namespace=self._target_namespace
)
for token_id in instance_tokens
]
)
output_dict["tokens"] = tokens
return output_dict
default_predictor = "masked_language_model"
| allennlp-models-main | allennlp_models/lm/models/masked_language_model.py |
from allennlp_models.lm.models.bidirectional_lm import BidirectionalLanguageModel
from allennlp_models.lm.models.language_model import LanguageModel
from allennlp_models.lm.models.masked_language_model import MaskedLanguageModel
from allennlp_models.lm.models.next_token_lm import NextTokenLM
| allennlp-models-main | allennlp_models/lm/models/__init__.py |
from typing import Dict, Tuple
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import util, InitializerApplicator
from allennlp.training.metrics import Perplexity
from allennlp_models.lm.modules.language_model_heads import LanguageModelHead
from allennlp_models.lm.util import BeamSearchGenerator
@Model.register("next_token_lm")
class NextTokenLM(Model):
"""
The `NextTokenLM` embeds some input tokens, contextualizes them, then predicts the next word,
computing a loss against known target.
If `BeamSearch` is given, this model will predict a sequence of next tokens.
!!! NOTE
This was developed for use in a demo, not for training. You *definitely* don't want to
train a language model using this code; it would be incredibly inefficient. But it does
compute correct gradients of the loss, however, so you can use it for interesting visualization
of the gradients of a pretrained model, and it appears to be fast enough to sample from, at
least for one word at a time.
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the indexed tokens we get in `forward`.
language_model_head : `LanguageModelHead`
The `torch.nn.Module` that goes from the hidden states output by the contextualizer to
logits over some output vocabulary.
contextualizer : `Seq2SeqEncoder`, optional (default=`None`)
Used to "contextualize" the embeddings. This is optional because the contextualization
might actually be done in the text field embedder.
target_namespace : `str`, optional (default=`'bert'`)
Namespace to use to convert predicted token ids to strings in
`Model.make_output_human_readable`.
dropout : `float`, optional (default=`0.0`)
If specified, dropout is applied to the contextualized embeddings before computation of
the softmax. The contextualized embeddings themselves are returned without dropout.
n_best : `int`, optional (default = `5`)
The number of best tokens to predict. If `beam_search` is given, this option is ignored.
beam_search_generator : `BeamSearchGenerator`, optional (default = `None`)
An optional `BeamSearchGenerator`. If given, the model will predict sequences of next
tokens instead of just a single next token.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
language_model_head: LanguageModelHead,
contextualizer: Seq2SeqEncoder = None,
target_namespace: str = "bert",
dropout: float = 0.0,
initializer: InitializerApplicator = None,
n_best: int = 5,
beam_search_generator: BeamSearchGenerator = None,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
self._contextualizer = contextualizer
if contextualizer:
check_dimensions_match(
text_field_embedder.get_output_dim(),
contextualizer.get_input_dim(),
"text field embedder output",
"contextualizer input",
)
self._language_model_head = language_model_head
self._target_namespace = target_namespace
self._perplexity = Perplexity()
self._dropout = torch.nn.Dropout(dropout)
self._n_best = n_best
self._beam_search_generator = beam_search_generator
# Ensure beam_search_generator is compatable with text_field_embedder.
if self._beam_search_generator is not None:
self._beam_search_generator.validate_text_field_embedder(self._text_field_embedder)
if initializer is not None:
initializer(self)
def forward( # type: ignore
self, tokens: TextFieldTensors, target_ids: TextFieldTensors = None
) -> Dict[str, torch.Tensor]:
"""
Run a forward pass of the model, returning an output tensor dictionary with
the following fields:
- `"probabilities"`: a tensor of shape `(batch_size, n_best)` representing
the probabilities of the predicted tokens, where `n_best`
is either `self._n_best` or `beam_size` if using beam search.
- `"top_indices"`: a tensor of shape `(batch_size, n_best, num_predicted_tokens)`
containing the IDs of the predicted tokens, where `num_predicted_tokens` is just
1 unless using beam search, in which case it depends on the parameters of the beam search.
- `"token_ids"`: a tensor of shape `(batch_size, num_input_tokens)` containing the IDs
of the input tokens.
- `"loss"` (optional): the loss of the batch, only given if `target_ids` is not `None`.
"""
output_dict = {
"token_ids": util.get_token_ids_from_text_field_tensors(tokens),
}
# Shape: (batch_size, vocab_size)
target_logits = self._next_token_scores(tokens)
# Compute loss.
if target_ids is not None:
batch_size, vocab_size = target_logits.size()
tmp = util.get_token_ids_from_text_field_tensors(target_ids)
# In some scenarios, target_ids might be a topk list of token ids (e.g. sorted by probabilities).
# Therefore, we need to make sure only one token per batch
# Assume: first token in each batch is the most desirable one (e.g. highest probability)
tmp = tmp[:, 0] if len(tmp.shape) == 2 else tmp
assert len(tmp.shape) <= 2
targets = tmp.view(batch_size)
loss = torch.nn.functional.cross_entropy(target_logits, targets)
self._perplexity(loss)
output_dict["loss"] = loss
if self._beam_search_generator is not None:
# Dummy start predictions.
# Shape: (batch_size,)
start_predictions = torch.zeros(
target_logits.size()[0], device=target_logits.device, dtype=torch.int
)
state = self._beam_search_generator.get_step_state(tokens)
# Put this in here to avoid having to re-compute on the first step of beam search.
state["start_target_logits"] = target_logits
# Shape (top_indices): (batch_size, beam_size, num_predicted_tokens)
# Shape (top_log_probs): (batch_size, beam_size)
top_indices, top_log_probs = self._beam_search_generator.search(
start_predictions, state, self._beam_search_step
)
# Shape: (batch_size, beam_size)
top_probs = top_log_probs.exp()
else:
# Shape: (batch_size, vocab_size)
probs = torch.nn.functional.softmax(target_logits, dim=-1)
# Shape (both): (batch_size, n_best)
# min here largely because tests use small vocab
top_probs, top_indices = probs.topk(k=min(target_logits.size(-1), self._n_best), dim=-1)
# Shape: (batch_size, n_best, 1)
top_indices = top_indices.unsqueeze(-1)
output_dict["top_indices"] = top_indices
output_dict["probabilities"] = top_probs
return output_dict
def _next_token_scores(self, tokens: TextFieldTensors) -> torch.Tensor:
"""
Get the unnormalized log probabilities of the potential next token.
"""
# Shape: (batch_size, num_tokens, embedding_dim)
embeddings = self._text_field_embedder(tokens)
# Shape: (batch_size, num_tokens, encoding_dim)
if self._contextualizer:
mask = util.get_text_field_mask(embeddings)
contextual_embeddings = self._contextualizer(embeddings, mask)
final_embeddings = util.get_final_encoder_states(contextual_embeddings, mask)
else:
final_embeddings = embeddings[:, -1]
# Shape: (batch_size, vocab_size)
return self._language_model_head(self._dropout(final_embeddings))
def _beam_search_step(
self, predicted_tokens: torch.Tensor, state: Dict[str, torch.Tensor], step: int
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Step function to use with `BeamSearch`.
`predicted_tokens` is a tensor of shape `(group_size,)` and
`state` is a dictionary of tensors with the following fields:
- "token_ids": shape `(group_size, num_tokens)`
- "mask": shape `(group_size, num_tokens)`
- "type_ids": shape `(group_size, num_tokens)`
"""
assert self._beam_search_generator is not None
if step == 0:
# Shape: (group_size, vocab_size)
start_target_logits = state.pop("start_target_logits")
# Shape: (group_size, vocab_size)
start_target_log_probs = torch.nn.functional.log_softmax(start_target_logits, dim=-1)
return start_target_log_probs, state
inputs = self._beam_search_generator.prepare_step_input(predicted_tokens, state)
state = self._beam_search_generator.get_step_state(inputs)
# Shape: (group_size, vocab_size)
next_token_scores = self._next_token_scores(inputs)
# Shape: (group_size, vocab_size)
log_probs = torch.nn.functional.log_softmax(next_token_scores, dim=-1)
return log_probs, state
def get_metrics(self, reset: bool = False):
return {"perplexity": self._perplexity.get_metric(reset=reset)}
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Collects token strings from indices, adding two fields to the `output_dict`:
- `"top_tokens"`: a list (for each instance in the batch) of lists (for each of
the `n` best predictions) of lists of strings (for each token in each prediction).
- `"tokens"`: a list of list (for each instance in the batch) of strings (for each
input token in the instance).
"""
# Gather predicted words.
top_tokens = []
# shape (output_dict["top_indices"]): (batch_size, n_best, num_predicted_tokens)
for instance in output_dict["top_indices"]:
# shape (instance): (n_best, num_predicted_tokens)
instance_top_words = []
for indices in instance:
# shape (indices): (num_predicted_tokens,)
instance_top_words.append(
[
self.vocab.get_token_from_index(
index.item(), namespace=self._target_namespace
)
for index in indices
]
)
top_tokens.append(instance_top_words)
# Gather input tokens.
tokens = []
for instance_tokens in output_dict["token_ids"]:
tokens.append(
[
self.vocab.get_token_from_index(
token_id.item(), namespace=self._target_namespace
)
for token_id in instance_tokens
]
)
output_dict["top_tokens"] = top_tokens # type: ignore
output_dict["tokens"] = tokens # type: ignore
return output_dict
default_predictor = "next_token_lm"
| allennlp-models-main | allennlp_models/lm/models/next_token_lm.py |
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder
from allennlp.nn import InitializerApplicator
from allennlp_models.lm.models.language_model import LanguageModel
@Model.register("bidirectional-language-model")
class BidirectionalLanguageModel(LanguageModel):
"""
The `BidirectionalLanguageModel` applies a bidirectional "contextualizing"
`Seq2SeqEncoder` to uncontextualized embeddings, using a `SoftmaxLoss`
module (defined above) to compute the language modeling loss.
It is IMPORTANT that your bidirectional `Seq2SeqEncoder` does not do any
"peeking ahead". That is, for its forward direction it should only consider
embeddings at previous timesteps, and for its backward direction only embeddings
at subsequent timesteps. If this condition is not met, your language model is
cheating.
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the indexed tokens we get in `forward`.
contextualizer : `Seq2SeqEncoder`
Used to "contextualize" the embeddings. As described above,
this encoder must not cheat by peeking ahead.
dropout : `float`, optional (default=`None`)
If specified, dropout is applied to the contextualized embeddings before computation of
the softmax. The contextualized embeddings themselves are returned without dropout.
num_samples : `int`, optional (default=`None`)
If provided, the model will use `SampledSoftmaxLoss`
with the specified number of samples. Otherwise, it will use
the full `_SoftmaxLoss` defined above.
sparse_embeddings : `bool`, optional (default=`False`)
Passed on to `SampledSoftmaxLoss` if True.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
contextualizer: Seq2SeqEncoder,
dropout: float = None,
num_samples: int = None,
sparse_embeddings: bool = False,
initializer: InitializerApplicator = None,
**kwargs,
) -> None:
super().__init__(
vocab=vocab,
text_field_embedder=text_field_embedder,
contextualizer=contextualizer,
dropout=dropout,
num_samples=num_samples,
sparse_embeddings=sparse_embeddings,
bidirectional=True,
initializer=initializer,
**kwargs,
)
| allennlp-models-main | allennlp_models/lm/models/bidirectional_lm.py |
# flake8: noqa: F403
from allennlp_models.lm.modules.seq2seq_encoders import *
from allennlp_models.lm.modules.language_model_heads import *
from allennlp_models.lm.modules.token_embedders import *
| allennlp-models-main | allennlp_models/lm/modules/__init__.py |
"""
The BidirectionalTransformerEncoder from Calypso.
This is basically the transformer from https://nlp.seas.harvard.edu/2018/04/03/attention.html
so credit to them.
This code should be considered "private" in that we have several
transformer implementations and may end up deleting this one.
If you use it, consider yourself warned.
"""
from typing import Tuple, Callable
import math
import warnings
import torch
import torch.nn.functional as F
from allennlp.common.checks import ExperimentalFeatureWarning
from allennlp.modules.layer_norm import LayerNorm
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.common import Registrable
from allennlp.nn import util
def attention(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: torch.BoolTensor = None,
dropout: Callable = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(~mask, util.min_value_of_dtype(scores.dtype))
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def subsequent_mask(size: int, device: str = "cpu") -> torch.BoolTensor:
"""Mask out subsequent positions."""
mask = torch.tril(torch.ones(size, size, device=device, dtype=torch.bool)).unsqueeze(0)
return mask
class PositionalEncoding(torch.nn.Module, Registrable):
"""Implement the Positional Encoding function."""
def __init__(self, input_dim: int, max_len: int = 5000) -> None:
super().__init__()
# Compute the positional encodings once in log space.
positional_encoding = torch.zeros(max_len, input_dim, requires_grad=False)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(
torch.arange(0, input_dim, 2).float() * -(math.log(10000.0) / input_dim)
)
positional_encoding[:, 0::2] = torch.sin(position * div_term)
positional_encoding[:, 1::2] = torch.cos(position * div_term)
positional_encoding = positional_encoding.unsqueeze(0)
self.register_buffer("positional_encoding", positional_encoding)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self.positional_encoding[:, : x.size(1)]
class PositionwiseFeedForward(torch.nn.Module):
"""Implements FFN equation."""
def __init__(self, input_dim: int, ff_dim: int, dropout: float = 0.1) -> None:
super().__init__()
self.w_1 = torch.nn.Linear(input_dim, ff_dim)
self.w_2 = torch.nn.Linear(ff_dim, input_dim)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class TransformerEncoder(torch.nn.Module):
"""Core encoder is a stack of N layers"""
def __init__(
self, layer: torch.nn.Module, num_layers: int, return_all_layers: bool = False
) -> None:
super().__init__()
self.layers = util.clone(layer, num_layers)
self.norm = LayerNorm(layer.size)
self.return_all_layers = return_all_layers
def forward(self, x, mask):
"""Pass the input (and mask) through each layer in turn."""
all_layers = []
for layer in self.layers:
x = layer(x, mask)
if self.return_all_layers:
all_layers.append(x)
if self.return_all_layers:
all_layers[-1] = self.norm(all_layers[-1])
return all_layers
return self.norm(x)
class SublayerConnection(torch.nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size: int, dropout: float) -> None:
super().__init__()
self.norm = LayerNorm(size)
self.dropout = torch.nn.Dropout(dropout)
def forward(
self, x: torch.Tensor, sublayer: Callable[[torch.Tensor], torch.Tensor]
) -> torch.Tensor:
"""Apply residual connection to any sublayer with the same size."""
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(torch.nn.Module):
"""Encoder is made up of self-attn and feed forward (defined below)"""
def __init__(
self, size: int, self_attn: torch.nn.Module, feed_forward: torch.nn.Module, dropout: float
) -> None:
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = util.clone(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
"""Follow Figure 1 (left) for connections."""
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class MultiHeadedAttention(torch.nn.Module):
def __init__(self, num_heads: int, input_dim: int, dropout: float = 0.1) -> None:
super().__init__()
assert input_dim % num_heads == 0, "input_dim must be a multiple of num_heads"
# We assume d_v always equals d_k
self.d_k = input_dim // num_heads
self.num_heads = num_heads
# These linear layers are
# [query_projection, key_projection, value_projection, concatenated_heads_projection]
self.linears = util.clone(torch.nn.Linear(input_dim, input_dim), 4)
self.dropout = torch.nn.Dropout(p=dropout)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: torch.BoolTensor = None,
) -> torch.Tensor:
if mask is not None:
# Same mask applied to all h heads.
# Shape (batch_size, num_heads, timesteps, timesteps)
mask = mask.unsqueeze(1).expand([-1, self.num_heads, -1, -1])
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [
layer(x).view(nbatches, -1, self.num_heads, self.d_k).transpose(1, 2)
for layer, x in zip(self.linears, (query, key, value))
]
# 2) Apply attention on all the projected vectors in batch.
x, _ = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.num_heads * self.d_k)
return self.linears[-1](x)
def make_model(
num_layers: int = 6,
input_size: int = 512, # Attention size
hidden_size: int = 2048, # FF layer size
heads: int = 8,
dropout: float = 0.1,
return_all_layers: bool = False,
) -> TransformerEncoder:
"""Helper: Construct a model from hyperparameters."""
attn = MultiHeadedAttention(heads, input_size, dropout)
ff = PositionwiseFeedForward(input_size, hidden_size, dropout)
model = TransformerEncoder(
EncoderLayer(input_size, attn, ff, dropout), num_layers, return_all_layers=return_all_layers
)
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
torch.nn.init.xavier_uniform_(p)
return model
@Seq2SeqEncoder.register("bidirectional_language_model_transformer")
class BidirectionalLanguageModelTransformer(Seq2SeqEncoder):
def __init__(
self,
input_dim: int,
hidden_dim: int,
num_layers: int,
dropout: float = 0.1,
input_dropout: float = None,
return_all_layers: bool = False,
) -> None:
warnings.warn(
"This particular transformer implementation is a provisional feature "
"that's intended for AI2 internal use and might be deleted at any time. "
"If you use it, consider yourself warned!",
ExperimentalFeatureWarning,
)
super().__init__()
self._return_all_layers = return_all_layers
self.transformer_layers = num_layers
self.num_layers = num_layers
self._forward_transformer = make_model(
input_size=input_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=dropout,
return_all_layers=return_all_layers,
)
self._backward_transformer = make_model(
input_size=input_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=dropout,
return_all_layers=return_all_layers,
)
self._position = PositionalEncoding(input_dim)
self.input_dim = input_dim
self.output_dim = 2 * input_dim
if input_dropout:
self._dropout = torch.nn.Dropout(input_dropout)
else:
self._dropout = lambda x: x
self.should_log_activations = False
def get_attention_masks(self, mask: torch.BoolTensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Returns 2 masks of shape (batch_size, timesteps, timesteps) representing
1) non-padded elements, and
2) elements of the sequence which are permitted to be involved in attention at a given timestep.
"""
device = mask.device
# Forward case:
timesteps = mask.size(1)
# Shape (1, timesteps, timesteps)
subsequent = subsequent_mask(timesteps, device)
# Broadcasted logical and - we want zero
# elements where either we have padding from the mask,
# or we aren't allowed to use the timesteps.
# Shape (batch_size, timesteps, timesteps)
forward_mask = mask.unsqueeze(-1) & subsequent
# Backward case - exactly the same, but transposed.
backward_mask = forward_mask.transpose(1, 2)
return forward_mask, backward_mask
def forward(self, token_embeddings: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
forward_mask, backward_mask = self.get_attention_masks(mask)
token_embeddings = self._position(token_embeddings)
token_embeddings = self._dropout(token_embeddings)
forward_output = self._forward_transformer(token_embeddings, forward_mask)
backward_output = self._backward_transformer(token_embeddings, backward_mask)
if self._return_all_layers:
to_return = []
for forward, backward in zip(forward_output, backward_output):
to_return.append(torch.cat([forward, backward], -1))
return to_return
return torch.cat([forward_output, backward_output], -1)
def get_regularization_penalty(self):
return 0.0
def get_input_dim(self) -> int:
return self.input_dim
def get_output_dim(self) -> int:
return self.output_dim
def is_bidirectional(self) -> bool:
return True
| allennlp-models-main | allennlp_models/lm/modules/seq2seq_encoders/bidirectional_lm_transformer.py |
from allennlp_models.lm.modules.seq2seq_encoders.bidirectional_lm_transformer import (
BidirectionalLanguageModelTransformer,
)
| allennlp-models-main | allennlp_models/lm/modules/seq2seq_encoders/__init__.py |
from transformers.models.gpt2.modeling_gpt2 import GPT2Config, GPT2LMHeadModel
import torch
from .language_model_head import LanguageModelHead
@LanguageModelHead.register("gpt2")
class Gpt2LanguageModelHead(LanguageModelHead):
"""
Loads just the LM head from `transformers.GPT2LMHeadModel`. It was easiest to load
the entire model before only pulling out the head, so this is a bit slower than it could be,
but for practical use in a model, the few seconds of extra loading time is probably not a big
deal.
"""
def __init__(self, model_name: str) -> None:
super().__init__()
config = GPT2Config.from_pretrained(model_name)
self.input_dim = config.hidden_size
self.output_dim = config.vocab_size
# TODO(mattg): It's possible that we could use some kind of cache like we have in
# allennlp.modules.token_embedders.bert_token_embedder.PretrainedBertModel. That way, we
# would only load the GPT2 weights once. Though, it's not clear how to do that here, as we
# need to load `GPT2LMHeadModel`, not just `GPT2Model`...
gpt2_model = GPT2LMHeadModel.from_pretrained(model_name)
self.gpt2_lm_head = gpt2_model.lm_head
def get_input_dim(self) -> int:
return self.input_dim
def get_output_dim(self) -> int:
return self.output_dim
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.gpt2_lm_head(hidden_states)
| allennlp-models-main | allennlp_models/lm/modules/language_model_heads/gpt2.py |
import torch
from allennlp.data import Vocabulary
from allennlp_models.lm.modules.language_model_heads.language_model_head import LanguageModelHead
@LanguageModelHead.register("linear")
class LinearLanguageModelHead(LanguageModelHead):
"""
Uses `torch.nn.Linear` as a language model head. Does nothing else fancy. This was intended
largely for testing code with small models and simple components. It's likely that you would
want something nicer for actually training a language model, such as tying weights with an
input embedding, or an adaptive softmax, or something.
"""
def __init__(self, vocab: Vocabulary, input_dim: int, vocab_namespace: str) -> None:
super().__init__()
self.input_dim = input_dim
self.output_dim = vocab.get_vocab_size(vocab_namespace)
if self.output_dim <= 0:
raise ValueError("We can't embed into an empty vocabulary.")
self.linear = torch.nn.Linear(self.input_dim, self.output_dim)
def get_input_dim(self) -> int:
return self.input_dim
def get_output_dim(self) -> int:
return self.output_dim
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.linear(hidden_states)
| allennlp-models-main | allennlp_models/lm/modules/language_model_heads/linear.py |
from allennlp_models.lm.modules.language_model_heads.language_model_head import LanguageModelHead
from allennlp_models.lm.modules.language_model_heads.bert import BertLanguageModelHead
from allennlp_models.lm.modules.language_model_heads.gpt2 import Gpt2LanguageModelHead
from allennlp_models.lm.modules.language_model_heads.linear import LinearLanguageModelHead
| allennlp-models-main | allennlp_models/lm/modules/language_model_heads/__init__.py |
import torch
from allennlp.common import Registrable
class LanguageModelHead(torch.nn.Module, Registrable):
"""
A `LanguageModelHead` encapsulates a function that goes from some hidden state to logits over
a vocabulary.
"""
def get_input_dim(self) -> int:
raise NotImplementedError
def get_output_dim(self) -> int:
raise NotImplementedError
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # type: ignore
raise NotImplementedError
| allennlp-models-main | allennlp_models/lm/modules/language_model_heads/language_model_head.py |
from transformers.models.bert.modeling_bert import BertConfig, BertForMaskedLM
import torch
from .language_model_head import LanguageModelHead
@LanguageModelHead.register("bert")
class BertLanguageModelHead(LanguageModelHead):
"""
Loads just the LM head from `transformers.BertForMaskedLM`. It was easiest to load
the entire model before only pulling out the head, so this is a bit slower than it could be,
but for practical use in a model, the few seconds of extra loading time is probably not a big
deal.
"""
def __init__(self, model_name: str) -> None:
super().__init__()
config = BertConfig.from_pretrained(model_name)
self.input_dim = config.hidden_size
self.output_dim = config.vocab_size
# TODO(mattg): It's possible that we could use some kind of cache like we have in
# allennlp.modules.token_embedders.bert_token_embedder.PretrainedBertModel. That way, we
# would only load the BERT weights once. Though, it's not clear how to do that here, as we
# need to load `BertForMaskedLM`, not just `BertModel`...
bert_model = BertForMaskedLM.from_pretrained(model_name)
self.bert_lm_head = bert_model.cls
def get_input_dim(self) -> int:
return self.input_dim
def get_output_dim(self) -> int:
return self.output_dim
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.bert_lm_head(hidden_states)
| allennlp-models-main | allennlp_models/lm/modules/language_model_heads/bert.py |
import json
from typing import Dict, Tuple, TYPE_CHECKING
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import TokenIndexer, Token
from allennlp.modules import TextFieldEmbedder
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import EmptyEmbedder
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn.util import (
remove_sentence_boundaries,
get_text_field_mask,
add_sentence_boundary_token_ids,
)
# Importing at runtime results in a circular dependency.
if TYPE_CHECKING:
from allennlp_models.lm.models.language_model import LanguageModel
@TokenEmbedder.register("language_model_token_embedder")
class LanguageModelTokenEmbedder(TokenEmbedder):
"""
Compute a single layer of representations from a (optionally bidirectional)
language model. This is done by computing a learned scalar
average of the layers from the LM. Typically the LM's weights
will be fixed, but they can be fine tuned by setting `requires_grad`.
# Parameters
archive_file : `str`, required
An archive file, typically model.tar.gz, from a LanguageModel.
The contextualizer used by the LM must satisfy two requirements:
1. It must have a num_layers field.
2. It must take a boolean return_all_layers parameter in its constructor.
See BidirectionalLanguageModelTransformer for their definitions.
dropout : `float`, optional.
The dropout value to be applied to the representations.
bos_eos_tokens : `Tuple[str, str]`, optional (default=`("<S>", "</S>")`)
These will be indexed and placed around the indexed tokens. Necessary if the language model
was trained with them, but they were injected external to an indexer.
remove_bos_eos : `bool`, optional (default = `True`)
Typically the provided token indexes will be augmented with begin-sentence and end-sentence
tokens. (Alternatively, you can pass bos_eos_tokens.) If this flag is True the
corresponding embeddings will be removed from the return values.
Warning: This only removes a single start and single end token!
requires_grad : `bool`, optional (default = `False`)
If True, compute gradient of bidirectional language model parameters for fine tuning.
"""
def __init__(
self,
archive_file: str,
dropout: float = None,
bos_eos_tokens: Tuple[str, str] = ("<S>", "</S>"),
remove_bos_eos: bool = True,
requires_grad: bool = False,
) -> None:
super().__init__()
overrides = {"model.contextualizer.return_all_layers": True}
# Import here to avoid circular dependency.
from allennlp.models.archival import load_archive
# Load LM and the associated config.
archive = load_archive(archive_file, overrides=json.dumps(overrides))
self._lm: LanguageModel = archive.model
self._lm.delete_softmax()
config = archive.config
dict_config = config.as_dict(quiet=True)
# Extract the name of the tokens that the LM was trained on.
text_field_embedder = dict_config["model"]["text_field_embedder"]
text_field_embedder = TextFieldEmbedder.from_params(Params(text_field_embedder))
if not isinstance(text_field_embedder, BasicTextFieldEmbedder):
raise ConfigurationError(
f"Language model from {archive_file} uses a non-standard TextFieldEmbedder!"
)
non_empty_embedders = [
name
for name, token_embedder in text_field_embedder._token_embedders.items()
if not isinstance(token_embedder, EmptyEmbedder)
]
if len(non_empty_embedders) == 0:
# Only empty embedders were contained in the language model
# We need at least one non-empty embedder in the language model
raise ConfigurationError(
f"Language model from {archive_file} trained with only empty embedders!"
)
elif len(non_empty_embedders) > 1:
raise ConfigurationError(
f"Language model from {archive_file} trained with multiple non-empty embedders!"
)
self._token_name = non_empty_embedders[0]
# TODO(brendanr): Find a way to remove this hack. The issue fundamentally is that the
# BasicTextFieldEmbedder concatenates multiple embedded representations. When a
# downstream model uses both, tokens and token characters, say, and only adds bos/eos
# tokens to the token characters, the dimensions don't match. See:
# https://github.com/allenai/allennlp/blob/eff25a3085aa9976a7650d30d8961c3626ddc411/allennlp/modules/text_field_embedders/basic_text_field_embedder.py#L109
#
# For the equivalent hack in the ELMo embedder see:
# https://github.com/allenai/allennlp/blob/eff25a3085aa9976a7650d30d8961c3626ddc411/allennlp/modules/elmo.py#L590
if bos_eos_tokens:
dataset_reader_config = config.get("dataset_reader")
token_indexer_config = dataset_reader_config.get("token_indexers").get(self._token_name)
token_indexer: TokenIndexer = TokenIndexer.from_params(token_indexer_config)
token_list = [Token(token) for token in bos_eos_tokens]
# TODO(brendanr): Obtain these indices from the vocab once the
# ELMoTokenCharactersIndexer adds the mappings.
bos_eos_indices = token_indexer.tokens_to_indices(token_list, self._lm.vocab)[
"elmo_tokens"
]
self._bos_indices = torch.LongTensor(bos_eos_indices[0])
self._eos_indices = torch.LongTensor(bos_eos_indices[1])
else:
self._bos_indices = None
self._eos_indices = None
if dropout:
self._dropout = torch.nn.Dropout(dropout)
else:
self._dropout = lambda x: x
self._remove_bos_eos = remove_bos_eos
num_layers = self._lm.num_layers()
# TODO(brendanr): Consider passing our LM as a custom module to `Elmo` instead.
# See https://github.com/allenai/allennlp/blob/master/allennlp/modules/elmo.py#L76
self._scalar_mix = ScalarMix(mixture_size=num_layers, do_layer_norm=False, trainable=True)
character_dim = self._lm._text_field_embedder.get_output_dim()
contextual_dim = self._lm._contextualizer.get_output_dim()
if contextual_dim % character_dim != 0:
raise ConfigurationError(
"The output dimensions for the text_field_embedder "
+ f"({character_dim}) and the contextualizer ({contextual_dim})"
+ f" from the language model loaded from {archive_file} are "
+ "not compatible. Please check the config used to train that "
+ "model and ensure that the output dimension of the "
+ "text_field_embedder divides the output dimension of the "
+ "contextualizer."
)
self._character_embedding_duplication_count = contextual_dim // character_dim
for param in self._lm.parameters():
param.requires_grad = requires_grad
def get_output_dim(self) -> int:
return self._lm._contextualizer.get_output_dim()
def forward(
self, # type: ignore
tokens: torch.Tensor,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `torch.Tensor`
Shape `(batch_size, timesteps, ...)` of token ids representing the current batch.
These must have been produced using the same indexer the LM was trained on.
# Returns
The bidirectional language model representations for the input sequence, shape
`(batch_size, timesteps, embedding_dim)`
"""
if self._bos_indices is not None:
num_wrapping_dims = max(tokens.dim() - 2, 0)
mask = get_text_field_mask({"": {"": tokens}}, num_wrapping_dims=num_wrapping_dims)
tokens, mask = add_sentence_boundary_token_ids(
tokens, mask, self._bos_indices, self._eos_indices
)
source = {self._token_name: {"token_characters": tokens}}
result_dict = self._lm(source)
# shape (batch_size, timesteps, embedding_size)
noncontextual_token_embeddings = result_dict["noncontextual_token_embeddings"]
contextual_embeddings = result_dict["lm_embeddings"]
# Typically the non-contextual embeddings are smaller than the contextualized embeddings.
# Since we're averaging all the layers we need to make their dimensions match. Simply
# repeating the non-contextual embeddings is a crude, but effective, way to do this.
duplicated_character_embeddings = torch.cat(
[noncontextual_token_embeddings] * self._character_embedding_duplication_count, -1
)
averaged_embeddings = self._scalar_mix(
[duplicated_character_embeddings] + contextual_embeddings
)
# Add dropout
averaged_embeddings = self._dropout(averaged_embeddings)
if self._remove_bos_eos:
averaged_embeddings, _ = remove_sentence_boundaries(
averaged_embeddings, result_dict["mask"]
)
return averaged_embeddings
| allennlp-models-main | allennlp_models/lm/modules/token_embedders/language_model.py |
from allennlp_models.lm.modules.token_embedders.language_model import LanguageModelTokenEmbedder
from allennlp_models.lm.modules.token_embedders.bidirectional_lm import (
BidirectionalLanguageModelTokenEmbedder,
)
| allennlp-models-main | allennlp_models/lm/modules/token_embedders/__init__.py |
from typing import Tuple
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp_models.lm.modules.token_embedders.language_model import LanguageModelTokenEmbedder
@TokenEmbedder.register("bidirectional_lm_token_embedder")
class BidirectionalLanguageModelTokenEmbedder(LanguageModelTokenEmbedder):
"""
Compute a single layer of representations from a bidirectional language model. This is done
by computing a learned scalar average of the layers from the LM. Typically the LM's weights
will be fixed, but they can be fine tuned by setting `requires_grad`.
# Parameters
archive_file : `str`, required
An archive file, typically model.tar.gz, from a BidirectionalLanguageModel. The
contextualizer used by the LM must satisfy two requirements:
1. It must have a num_layers field.
2. It must take a boolean return_all_layers parameter in its constructor.
See BidirectionalLanguageModelTransformer for their definitions.
dropout : `float`, optional.
The dropout value to be applied to the representations.
bos_eos_tokens : `Tuple[str, str]`, optional (default=`("<S>", "</S>")`)
These will be indexed and placed around the indexed tokens. Necessary if the language model
was trained with them, but they were injected external to an indexer.
remove_bos_eos : `bool`, optional (default = `True`)
Typically the provided token indexes will be augmented with begin-sentence and end-sentence
tokens. (Alternatively, you can pass bos_eos_tokens.) If this flag is True the
corresponding embeddings will be removed from the return values.
Warning: This only removes a single start and single end token!
requires_grad : `bool`, optional (default=`False`)
If True, compute gradient of bidirectional language model parameters for fine tuning.
"""
def __init__(
self,
archive_file: str,
dropout: float = None,
bos_eos_tokens: Tuple[str, str] = ("<S>", "</S>"),
remove_bos_eos: bool = True,
requires_grad: bool = False,
) -> None:
super().__init__(
archive_file=archive_file,
dropout=dropout,
bos_eos_tokens=bos_eos_tokens,
remove_bos_eos=remove_bos_eos,
requires_grad=requires_grad,
)
| allennlp-models-main | allennlp_models/lm/modules/token_embedders/bidirectional_lm.py |
# flake8: noqa: F403
from allennlp_models.vision.models import *
from allennlp_models.vision.predictors import *
from allennlp_models.vision.dataset_readers import *
from allennlp_models.vision.metrics import *
| allennlp-models-main | allennlp_models/vision/__init__.py |
from allennlp_models.vision.metrics.vqa import VqaMeasure
| allennlp-models-main | allennlp_models/vision/metrics/__init__.py |
import torch
from allennlp.training.metrics.metric import Metric
import torch.distributed as dist
@Metric.register("vqa")
class VqaMeasure(Metric):
"""Compute the VQA metric, as described in
https://www.semanticscholar.org/paper/VQA%3A-Visual-Question-Answering-Agrawal-Lu/97ad70a9fa3f99adf18030e5e38ebe3d90daa2db
In VQA, we take the answer with the highest score, and then we find out how often
humans decided this was the right answer. The accuracy score for an answer is
`min(1.0, human_count / 3)`.
This metric takes the logits from the models, i.e., a score for each possible answer,
and the labels for the question, together with their weights.
"""
def __init__(self) -> None:
self._sum_of_scores = 0.0
self._score_count = 0
def __call__(self, logits: torch.Tensor, labels: torch.Tensor, label_weights: torch.Tensor):
"""
# Parameters
logits : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, num_classes).
labels : `torch.Tensor`, required.
A tensor of integer class label of shape (batch_size, num_labels).
label_weights : `torch.Tensor`, required.
A tensor of floats of shape (batch_size, num_labels), giving a weight or score to
every one of the labels.
"""
logits, labels, label_weights = self.detach_tensors(logits, labels, label_weights)
predictions = logits.argmax(dim=1)
# Sum over dimension 1 gives the score per question. We care about the overall sum though,
# so we sum over all dimensions.
local_sum_of_scores = (
(label_weights * (labels == predictions.unsqueeze(-1))).sum().to(torch.float32)
)
local_score_count = torch.tensor(labels.size(0), dtype=torch.int32, device=labels.device)
from allennlp.common.util import is_distributed
if is_distributed():
dist.all_reduce(local_sum_of_scores, op=dist.ReduceOp.SUM)
dist.all_reduce(local_score_count, op=dist.ReduceOp.SUM)
self._sum_of_scores += local_sum_of_scores.item()
self._score_count += local_score_count.item()
def get_metric(self, reset: bool = False):
if self._score_count > 0:
result = self._sum_of_scores / self._score_count
else:
result = 0.0
result_dict = {"score": result}
if reset:
self.reset()
return result_dict
def reset(self) -> None:
self._sum_of_scores = 0.0
self._score_count = 0
| allennlp-models-main | allennlp_models/vision/metrics/vqa.py |
import logging
from os import PathLike
from typing import (
Dict,
# List,
Union,
Optional,
Tuple,
Iterable,
)
import json
import torch
from torch import Tensor
from allennlp.common.lazy import Lazy
from allennlp.common.file_utils import cached_path
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, ArrayField, LabelField, ListField, TextField
from allennlp.data.image_loader import ImageLoader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Tokenizer
from allennlp.modules.vision.grid_embedder import GridEmbedder
from allennlp.modules.vision.region_detector import RegionDetector
from allennlp_models.vision.dataset_readers import utils
from allennlp_models.vision.dataset_readers.vision_reader import VisionReader
logger = logging.getLogger(__name__)
@DatasetReader.register("vgqa")
class VGQAReader(VisionReader):
"""
Parameters
----------
image_dir: `str`
Path to directory containing `png` image files.
image_loader: `ImageLoader`
The image loader component used to load the images.
image_featurizer: `Lazy[GridEmbedder]`
The backbone image processor (like a ResNet), whose output will be passed to the region
detector for finding object boxes in the image.
region_detector: `Lazy[RegionDetector]`
For pulling out regions of the image (both coordinates and features) that will be used by
downstream models.
answer_vocab: `Union[Vocabulary, str]`, optional
The vocabulary to use for answers. The reader will look into the `"answers"` namespace
in the vocabulary to find possible answers.
If this is given, the reader only outputs instances with answers contained in this vocab.
If this is not given, the reader outputs all instances with all answers.
If this is a URL or filename, we will download a previously saved vocabulary from there.
feature_cache_dir: `Union[str, PathLike]`, optional
An optional directory to cache the featurized images in. Featurizing images takes a long
time, and many images are duplicated, so we highly recommend to use this cache.
tokenizer: `Tokenizer`, optional
The `Tokenizer` to use to tokenize the text. By default, this uses the tokenizer for
`"bert-base-uncased"`.
token_indexers: `Dict[str, TokenIndexer]`, optional
The `TokenIndexer` to use. By default, this uses the indexer for `"bert-base-uncased"`.
cuda_device: `Union[int, torch.device]`, optional
Either a torch device or a GPU number. This is the GPU we'll use to featurize the images.
max_instances: `int`, optional
For debugging, you can use this parameter to limit the number of instances the reader
returns.
image_processing_batch_size: `int`
The number of images to process at one time while featurizing. Default is 8.
"""
def __init__(
self,
image_dir: Optional[Union[str, PathLike]] = None,
*,
image_loader: Optional[ImageLoader] = None,
image_featurizer: Optional[Lazy[GridEmbedder]] = None,
region_detector: Optional[Lazy[RegionDetector]] = None,
answer_vocab: Optional[Union[Vocabulary, str]] = None,
feature_cache_dir: Optional[Union[str, PathLike]] = None,
tokenizer: Optional[Tokenizer] = None,
token_indexers: Optional[Dict[str, TokenIndexer]] = None,
cuda_device: Optional[Union[int, torch.device]] = None,
max_instances: Optional[int] = None,
image_processing_batch_size: int = 8,
write_to_cache: bool = True,
) -> None:
run_featurization = image_loader and image_featurizer and region_detector
if image_dir is None and run_featurization:
raise ValueError(
"Because of the size of the image datasets, we don't download them automatically. "
"Please go to https://visualgenome.org/api/v0/api_home.html, download the datasets you need, "
"and set the image_dir parameter to point to your download location. This dataset "
"reader does not care about the exact directory structure. It finds the images "
"wherever they are."
)
super().__init__(
image_dir,
image_loader=image_loader,
image_featurizer=image_featurizer,
region_detector=region_detector,
feature_cache_dir=feature_cache_dir,
tokenizer=tokenizer,
token_indexers=token_indexers,
cuda_device=cuda_device,
max_instances=max_instances,
image_processing_batch_size=image_processing_batch_size,
write_to_cache=write_to_cache,
)
# read answer vocab
if answer_vocab is None:
self.answer_vocab = None
else:
if isinstance(answer_vocab, str):
answer_vocab = cached_path(answer_vocab, extract_archive=True)
answer_vocab = Vocabulary.from_files(answer_vocab)
self.answer_vocab = frozenset(
utils.preprocess_answer(a)
for a in answer_vocab.get_token_to_index_vocabulary("answers").keys()
)
def _read(self, file_path: str):
# if the splits are using slicing syntax, honor it
question_slice, file_path = utils.get_data_slice(file_path)
file_path = cached_path(file_path, extract_archive=True)
logger.info("Reading file at %s", file_path)
questions = []
with open(file_path) as dataset_file:
dataset = json.load(dataset_file)
for data in dataset:
for qa in data["qas"]:
questions.append(qa)
questions = questions[question_slice]
question_dicts = list(self.shard_iterable(questions))
processed_images: Iterable[
Optional[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]
]
if self.produce_featurized_images:
# It would be much easier to just process one image at a time, but it's faster to process
# them in batches. So this code gathers up instances until it has enough to fill up a batch
# that needs processing, and then processes them all.
filenames = [f"{question_dict['image_id']}.jpg" for question_dict in question_dicts]
logger.info("images size: %s", len(self.images))
try:
processed_images = self._process_image_paths(
self.images[filename] for filename in filenames
)
except KeyError as e:
print(self.images)
missing_id = e.args[0]
raise KeyError(
missing_id,
f"We could not find an image with the id {missing_id}. "
"Because of the size of the image datasets, we don't download them automatically. "
"Please go to https://visualqa.org/download.html, download the datasets you need, "
"and set the image_dir parameter to point to your download location. This dataset "
"reader does not care about the exact directory structure. It finds the images "
"wherever they are.",
)
else:
processed_images = [None for _ in range(len(question_dicts))]
logger.info("Reading the dataset")
failed_instances_count = 0
attempted_instances_count = 0
for qa, processed_image in zip(question_dicts, processed_images):
question = qa["question"]
answer = utils.preprocess_answer(qa["answer"])
qa_id = qa["qa_id"]
instance = self.text_to_instance(
qa_id,
question,
answer,
processed_image,
)
attempted_instances_count += 1
if instance is not None:
yield instance
else:
failed_instances_count += 1
failed_instances_fraction = failed_instances_count / attempted_instances_count
logger.warning(f"{failed_instances_fraction*100:.0f}% of instances failed.")
def text_to_instance(
self, # type: ignore
qa_id: int,
question: str,
answer: Optional[str],
image: Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]],
use_cache: bool = True,
keep_impossible_questions: bool = True,
) -> Optional[Instance]:
question_field = TextField(self._tokenizer.tokenize(question), None)
fields: Dict[str, Field] = {
"question": question_field,
}
if isinstance(image, str):
features, coords, _, _ = next(self._process_image_paths([image], use_cache=use_cache))
else:
features, coords, _, _ = image
fields["box_features"] = ArrayField(features)
fields["box_coordinates"] = ArrayField(coords)
fields["box_mask"] = ArrayField(
features.new_ones((features.shape[0],), dtype=torch.bool),
padding_value=False,
dtype=torch.bool,
)
if answer is not None:
labels_fields = []
weights = []
if (not self.answer_vocab or answer in self.answer_vocab) or keep_impossible_questions:
labels_fields.append(LabelField(answer, label_namespace="answers"))
weights.append(1.0)
if len(labels_fields) <= 0:
return None
fields["label_weights"] = ArrayField(torch.tensor(weights))
fields["labels"] = ListField(labels_fields)
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance["question"].token_indexers = self._token_indexers # type: ignore
| allennlp-models-main | allennlp_models/vision/dataset_readers/vgqa.py |
from os import PathLike
from typing import (
Dict,
Union,
Optional,
Tuple,
Iterable,
)
import json
import os
import torch
from torch import Tensor
from allennlp.common.file_utils import cached_path
from allennlp.common.lazy import Lazy
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import ArrayField, LabelField, ListField, TextField
from allennlp.data.image_loader import ImageLoader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Tokenizer
from allennlp.modules.vision.grid_embedder import GridEmbedder
from allennlp.modules.vision.region_detector import RegionDetector
from allennlp_models.vision.dataset_readers.vision_reader import VisionReader
@DatasetReader.register("gqa")
class GQAReader(VisionReader):
"""
Parameters
----------
image_dir: `str`
Path to directory containing `png` image files.
image_loader : `ImageLoader`
image_featurizer: `Lazy[GridEmbedder]`
The backbone image processor (like a ResNet), whose output will be passed to the region
detector for finding object boxes in the image.
region_detector: `Lazy[RegionDetector]`
For pulling out regions of the image (both coordinates and features) that will be used by
downstream models.
data_dir: `str`
Path to directory containing text files for each dataset split. These files contain
the sentences and metadata for each task instance.
tokenizer: `Tokenizer`, optional
token_indexers: `Dict[str, TokenIndexer]`
"""
def __init__(
self,
image_dir: Union[str, PathLike],
*,
image_loader: Optional[ImageLoader] = None,
image_featurizer: Optional[Lazy[GridEmbedder]] = None,
region_detector: Optional[Lazy[RegionDetector]] = None,
answer_vocab: Optional[Union[str, Vocabulary]] = None,
feature_cache_dir: Optional[Union[str, PathLike]] = None,
data_dir: Optional[Union[str, PathLike]] = None,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
cuda_device: Optional[Union[int, torch.device]] = None,
max_instances: Optional[int] = None,
image_processing_batch_size: int = 8,
write_to_cache: bool = True,
) -> None:
super().__init__(
image_dir,
image_loader=image_loader,
image_featurizer=image_featurizer,
region_detector=region_detector,
feature_cache_dir=feature_cache_dir,
tokenizer=tokenizer,
token_indexers=token_indexers,
cuda_device=cuda_device,
max_instances=max_instances,
image_processing_batch_size=image_processing_batch_size,
write_to_cache=write_to_cache,
)
self.data_dir = data_dir
# read answer vocab
if answer_vocab is None:
self.answer_vocab = None
else:
if isinstance(answer_vocab, str):
answer_vocab = cached_path(answer_vocab, extract_archive=True)
answer_vocab = Vocabulary.from_files(answer_vocab)
self.answer_vocab = frozenset(
answer_vocab.get_token_to_index_vocabulary("answers").keys()
)
def _read(self, split_or_filename: str):
if not self.data_dir:
self.data_dir = "https://nlp.stanford.edu/data/gqa/questions1.2.zip!"
splits = {
"challenge_all": f"{self.data_dir}challenge_all_questions.json",
"challenge_balanced": f"{self.data_dir}challenge_balanced_questions.json",
"test_all": f"{self.data_dir}test_all_questions.json",
"test_balanced": f"{self.data_dir}test_balanced_questions.json",
"testdev_all": f"{self.data_dir}testdev_all_questions.json",
"testdev_balanced": f"{self.data_dir}testdev_balanced_questions.json",
"train_balanced": f"{self.data_dir}train_balanced_questions.json",
"train_all": f"{self.data_dir}train_all_questions",
"val_all": f"{self.data_dir}val_all_questions.json",
"val_balanced": f"{self.data_dir}val_balanced_questions.json",
}
filename = splits.get(split_or_filename, split_or_filename)
filename = cached_path(filename, extract_archive=True)
# If we're considering a directory of files (such as train_all)
# loop through each in file in generator
if os.path.isdir(filename):
files = [os.path.join(filename, file_path) for file_path in os.listdir(filename)]
else:
files = [filename]
# Ensure order is deterministic.
files.sort()
for data_file in files:
with open(data_file) as f:
questions_with_annotations = json.load(f)
question_dicts = list(
self.shard_iterable(
questions_with_annotations[q_id] for q_id in questions_with_annotations
)
)
processed_images: Iterable[
Optional[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]
]
if self.produce_featurized_images:
# It would be much easier to just process one image at a time, but it's faster to process
# them in batches. So this code gathers up instances until it has enough to fill up a batch
# that needs processing, and then processes them all.
filenames = [f"{question_dict['imageId']}.jpg" for question_dict in question_dicts]
try:
processed_images = self._process_image_paths(
self.images[filename] for filename in filenames
)
except KeyError as e:
missing_filename = e.args[0]
raise KeyError(
missing_filename,
f"We could not find an image with the name {missing_filename}. "
"Because of the size of the image datasets, we don't download them automatically. "
"Please download the images from"
"https://nlp.stanford.edu/data/gqa/images.zip, "
"extract them into a directory, and set the image_dir parameter to point to that "
"directory. This dataset reader does not care about the exact directory structure. It "
"finds the images wherever they are.",
)
else:
processed_images = [None] * len(question_dicts)
for question_dict, processed_image in zip(question_dicts, processed_images):
answer = {
"answer": question_dict["answer"],
}
instance = self.text_to_instance(question_dict["question"], processed_image, answer)
if instance is not None:
yield instance
def text_to_instance(
self, # type: ignore
question: str,
image: Optional[Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]],
answer: Optional[Dict[str, str]] = None,
*,
use_cache: bool = True,
) -> Optional[Instance]:
from allennlp.data import Field
tokenized_question = self._tokenizer.tokenize(question)
fields: Dict[str, Field] = {"question": TextField(tokenized_question, None)}
if answer is not None:
labels_fields = []
weights = []
if not self.answer_vocab or answer["answer"] in self.answer_vocab:
labels_fields.append(LabelField(answer["answer"], label_namespace="answers"))
weights.append(1.0)
if len(labels_fields) <= 0:
return None
fields["label_weights"] = ArrayField(torch.tensor(weights))
fields["labels"] = ListField(labels_fields)
if image is not None:
if isinstance(image, str):
features, coords, _, _ = next(
self._process_image_paths([image], use_cache=use_cache)
)
else:
features, coords, _, _ = image
fields["box_features"] = ArrayField(features)
fields["box_coordinates"] = ArrayField(coords)
fields["box_mask"] = ArrayField(
features.new_ones((features.shape[0],), dtype=torch.bool),
padding_value=False,
dtype=torch.bool,
)
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance["question"].token_indexers = self._token_indexers # type: ignore
| allennlp-models-main | allennlp_models/vision/dataset_readers/gqa.py |
from os import PathLike
from pathlib import Path
import logging
from typing import (
Any,
Dict,
Iterable,
List,
MutableMapping,
Optional,
Tuple,
Union,
)
import os
import tqdm
import torch
from torch import Tensor
import transformers
from random import randint
from allennlp.common.file_utils import cached_path
from allennlp.common.lazy import Lazy
from allennlp.common import util
from allennlp.common.file_utils import TensorCache
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import ArrayField, LabelField, ListField, TextField, TensorField
from allennlp.data.image_loader import ImageLoader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Tokenizer
from allennlp.modules.vision.grid_embedder import GridEmbedder
from allennlp.modules.vision.region_detector import RegionDetector
from allennlp_models.vision.dataset_readers import utils
from allennlp_models.vision.dataset_readers.vision_reader import VisionReader
logger = logging.getLogger(__name__)
# Parse caption file
def get_caption_data(filename: str):
with open(filename, "r") as f:
captions = f.read().split("\n")
image_id = os.path.splitext(os.path.basename(filename))[0]
result_captions = []
for caption in captions:
if not caption:
continue
words = []
add_to_phrase = False
for token in caption.split():
if add_to_phrase:
if token[-1] == "]":
add_to_phrase = False
token = token[:-1]
words.append(token)
else:
if token[0] == "[":
add_to_phrase = True
else:
words.append(token)
result_captions.append(utils.preprocess_answer(" ".join(words)))
caption_data = {"image_id": image_id, "captions": result_captions}
return caption_data
@DatasetReader.register("flickr30k")
class Flickr30kReader(VisionReader):
"""
Parameters
----------
image_dir: `str`
Path to directory containing `png` image files.
image_loader : `ImageLoader`
image_featurizer: `Lazy[GridEmbedder]`
The backbone image processor (like a ResNet), whose output will be passed to the region
detector for finding object boxes in the image.
region_detector: `Lazy[RegionDetector]`
For pulling out regions of the image (both coordinates and features) that will be used by
downstream models.
data_dir: `str`
Path to directory containing text files for each dataset split. These files contain
the captions and metadata for each task instance.
tokenizer: `Tokenizer`, optional
token_indexers: `Dict[str, TokenIndexer]`
featurize_captions: `bool`, optional
If we should featurize captions while calculating hard negatives, or use placeholder features.
is_evaluation: `bool`, optional
If the reader should return instances for evaluation or training.
num_potential_hard_negatives: int, optional
The number of potential hard negatives to consider.
"""
def __init__(
self,
image_dir: Union[str, PathLike],
*,
image_loader: Optional[ImageLoader] = None,
image_featurizer: Optional[Lazy[GridEmbedder]] = None,
region_detector: Optional[Lazy[RegionDetector]] = None,
feature_cache_dir: Optional[Union[str, PathLike]] = None,
data_dir: Optional[Union[str, PathLike]] = None,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
cuda_device: Optional[Union[int, torch.device]] = None,
max_instances: Optional[int] = None,
image_processing_batch_size: int = 8,
write_to_cache: bool = True,
featurize_captions: bool = True,
is_evaluation: bool = False,
num_potential_hard_negatives: int = 100,
) -> None:
super().__init__(
image_dir,
image_loader=image_loader,
image_featurizer=image_featurizer,
region_detector=region_detector,
feature_cache_dir=feature_cache_dir,
tokenizer=tokenizer,
token_indexers=token_indexers,
cuda_device=cuda_device,
max_instances=max_instances,
image_processing_batch_size=image_processing_batch_size,
write_to_cache=write_to_cache,
manual_distributed_sharding=False,
manual_multiprocess_sharding=False,
)
self.data_dir = cached_path(data_dir, extract_archive=True)
self.featurize_captions = featurize_captions
self.is_evaluation = is_evaluation
self.num_potential_hard_negatives = num_potential_hard_negatives
if self.featurize_captions:
self.model = transformers.AutoModel.from_pretrained("bert-large-uncased").to(
self.cuda_device
)
self.model.eval()
self.tokenizer = transformers.AutoTokenizer.from_pretrained("bert-large-uncased")
# feature cache
self.hard_negative_features_cache_dir = feature_cache_dir
self.hard_negative_coordinates_cache_dir = feature_cache_dir
self._hard_negative_features_cache_instance: Optional[MutableMapping[str, Tensor]] = None
self._hard_negative_coordinates_cache_instance: Optional[MutableMapping[str, Tensor]] = None
if self.hard_negative_features_cache_dir and self.hard_negative_coordinates_cache_dir:
logger.info(f"Calculating hard negatives with a cache at {self.feature_cache_dir}")
@property
def _hard_negative_features_cache(self) -> MutableMapping[str, Tensor]:
if self._hard_negative_features_cache_instance is None:
if self.hard_negative_features_cache_dir is None:
logger.info("could not find feature cache dir")
self._hard_negative_features_cache_instance = {}
else:
logger.info("found feature cache dir")
os.makedirs(self.feature_cache_dir, exist_ok=True) # type: ignore
self._hard_negative_features_cache_instance = TensorCache(
os.path.join(self.feature_cache_dir, "hard_negative_features"), # type: ignore
read_only=not self.write_to_cache,
)
return self._hard_negative_features_cache_instance
@property
def _hard_negative_coordinates_cache(self) -> MutableMapping[str, Tensor]:
if self._hard_negative_coordinates_cache_instance is None:
if self.hard_negative_coordinates_cache_dir is None:
self._hard_negative_coordinates_cache_instance = {}
else:
os.makedirs(self.feature_cache_dir, exist_ok=True) # type: ignore
self._hard_negative_coordinates_cache_instance = TensorCache(
os.path.join(self.feature_cache_dir, "hard_negative_coordinates"), # type: ignore
read_only=not self.write_to_cache,
)
return self._hard_negative_coordinates_cache_instance
def _read(self, file_path: str):
file_path = cached_path(file_path, extract_archive=True)
files_in_split = set()
i = 0
with open(file_path, "r") as f:
for i, line in enumerate(f):
if self.max_instances is not None and i * 5 >= self.max_instances:
break
files_in_split.add(line.rstrip("\n"))
caption_dicts = []
for filename in sorted(os.listdir(self.data_dir)):
if filename.split(".")[0] in files_in_split:
full_file_path = os.path.join(self.data_dir, filename)
caption_dicts.append(get_caption_data(full_file_path))
processed_images: Iterable[
Optional[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]
]
filenames = [f"{caption_dict['image_id']}.jpg" for caption_dict in caption_dicts]
try:
processed_images = self._process_image_paths(
self.images[filename] for filename in tqdm.tqdm(filenames, desc="Processing images")
)
except KeyError as e:
missing_id = e.args[0]
raise KeyError(
missing_id,
f"We could not find an image with the id {missing_id}. "
"Because of the size of the image datasets, we don't download them automatically. "
"Please go to https://shannon.cs.illinois.edu/DenotationGraph/, download the datasets you need, "
"and set the image_dir parameter to point to your download location. This dataset "
"reader does not care about the exact directory structure. It finds the images "
"wherever they are.",
)
features_list = []
averaged_features_list = []
coordinates_list = []
masks_list = []
for features, coords, _, _ in processed_images:
features_list.append(TensorField(features))
averaged_features_list.append(torch.mean(features, dim=0))
coordinates_list.append(TensorField(coords))
masks_list.append(
ArrayField(
features.new_ones((features.shape[0],), dtype=torch.bool),
padding_value=False,
dtype=torch.bool,
)
)
# Validation instances are a 1000-way multiple choice,
# one for each image in the validation set.
if self.is_evaluation:
for image_index in range(len(caption_dicts)):
caption_dict = caption_dicts[image_index]
for caption_index in range(len(caption_dict["captions"])):
instance = self.text_to_instance(
caption_dicts=caption_dicts,
image_index=image_index,
caption_index=caption_index,
features_list=features_list,
coordinates_list=coordinates_list,
masks_list=masks_list,
label=image_index,
)
if instance is not None:
yield instance
else:
# Shape: (num_images, image_dimension)
averaged_features = torch.stack(averaged_features_list, dim=0)
del averaged_features_list
# Shape: (num_images, num_captions_per_image = 5, caption_dimension)
caption_tensor = self.get_caption_features(caption_dicts)
for image_index, caption_dict in enumerate(caption_dicts):
for caption_index in range(len(caption_dict["captions"])):
hard_negative_features, hard_negative_coordinates = self.get_hard_negatives(
image_index,
caption_index,
caption_dicts,
averaged_features,
features_list,
coordinates_list,
caption_tensor,
)
instance = self.text_to_instance(
caption_dicts=caption_dicts,
image_index=image_index,
caption_index=caption_index,
features_list=features_list,
coordinates_list=coordinates_list,
masks_list=masks_list,
hard_negative_features=hard_negative_features,
hard_negative_coordinates=hard_negative_coordinates,
)
if instance is not None:
yield instance
def text_to_instance( # type: ignore
self,
caption_dicts: List[Dict[str, Any]],
image_index: int,
caption_index: int,
features_list: List[TensorField] = [],
coordinates_list: List[TensorField] = [],
masks_list: List[TensorField] = [],
hard_negative_features: Optional[Tensor] = None,
hard_negative_coordinates: Optional[Tensor] = None,
label: int = 0,
):
if self.is_evaluation:
caption_fields = [
TextField(
self._tokenizer.tokenize(caption_dicts[image_index]["captions"][caption_index]),
None,
)
] * len(caption_dicts)
return Instance(
{
"caption": ListField(caption_fields),
"box_features": ListField(features_list),
"box_coordinates": ListField(coordinates_list),
"box_mask": ListField(masks_list),
"label": LabelField(label, skip_indexing=True),
}
)
else:
# 1. Correct answer
caption_field = TextField(
self._tokenizer.tokenize(caption_dicts[image_index]["captions"][caption_index]),
None,
)
caption_fields = [caption_field]
features = [features_list[image_index]]
coords = [coordinates_list[image_index]]
masks = [masks_list[image_index]]
# 2. Correct image, random wrong caption
random_image_index = randint(0, len(caption_dicts) - 2)
if random_image_index == image_index:
random_image_index += 1
random_caption_index = randint(0, 4)
caption_fields.append(
TextField(
self._tokenizer.tokenize(
caption_dicts[random_image_index]["captions"][random_caption_index]
),
None,
)
)
features.append(features_list[image_index])
coords.append(coordinates_list[image_index])
masks.append(masks_list[image_index])
# 3. Random wrong image, correct caption
wrong_image_index = randint(0, len(features_list) - 2)
if wrong_image_index == image_index:
wrong_image_index += 1
caption_fields.append(caption_field)
features.append(features_list[wrong_image_index])
coords.append(coordinates_list[wrong_image_index])
masks.append(masks_list[wrong_image_index])
# 4. Hard negative image, correct caption
caption_fields.append(caption_field)
features.append(TensorField(hard_negative_features))
coords.append(TensorField(hard_negative_coordinates))
masks.append(
ArrayField(
hard_negative_features.new_ones(
(hard_negative_features.shape[0],),
dtype=torch.bool,
),
padding_value=False,
dtype=torch.bool,
)
)
return Instance(
{
"caption": ListField(caption_fields),
"box_features": ListField(features),
"box_coordinates": ListField(coords),
"box_mask": ListField(masks),
"label": LabelField(label, skip_indexing=True),
}
)
def get_hard_negatives(
self,
image_index: int,
caption_index: int,
caption_dicts: List[Dict[str, Any]],
averaged_features: Tensor,
features_list: List[TensorField],
coordinates_list: List[TensorField],
caption_tensor: Tensor,
) -> Tuple[Tensor, Tensor]:
image_id = caption_dicts[image_index]["image_id"]
caption = caption_dicts[image_index]["captions"][caption_index]
cache_id = f"{image_id}-{util.hash_object(caption)}"
if (
cache_id not in self._hard_negative_features_cache
or cache_id not in self._hard_negative_coordinates_cache
):
_, indices = (
-torch.cdist(
averaged_features, averaged_features[image_index].unsqueeze(0)
).squeeze(1)
).topk(min(averaged_features.size(0), self.num_potential_hard_negatives))
index_to_image_index = {}
hard_negative_tensors = []
i = 0
for idx in indices.tolist():
if idx != image_index:
index_to_image_index[i] = idx #
hard_negative_tensors.append(averaged_features[i])
i += 1
hard_negative_image_index = index_to_image_index[
torch.argmax(
torch.stack(hard_negative_tensors, dim=0)
@ caption_tensor[image_index][caption_index]
).item()
]
self._hard_negative_features_cache[cache_id] = features_list[
hard_negative_image_index
].tensor
self._hard_negative_coordinates_cache[cache_id] = coordinates_list[
hard_negative_image_index
].tensor
return (
self._hard_negative_features_cache[cache_id],
self._hard_negative_coordinates_cache[cache_id],
)
def get_caption_features(self, captions):
if not self.featurize_captions:
return torch.ones(len(captions), 5, 10)
captions_as_text = [c for caption_dict in captions for c in caption_dict["captions"]]
if self.feature_cache_dir is not None:
captions_hash = util.hash_object(captions_as_text)
captions_cache_file = (
Path(self.feature_cache_dir) / f"CaptionsCache-{captions_hash[:12]}.pt"
)
if captions_cache_file.exists():
with captions_cache_file.open("rb") as f:
return torch.load(f, map_location=torch.device("cpu"))
features = []
batch_size = 64
with torch.no_grad():
for batch_start in tqdm.trange(
0, len(captions_as_text), batch_size, desc="Featurizing captions"
):
batch_end = min(batch_start + batch_size, len(captions_as_text))
batch = self.tokenizer.batch_encode_plus(
captions_as_text[batch_start:batch_end], return_tensors="pt", padding=True
).to(self.cuda_device)
embeddings = self.model(**batch).pooler_output.squeeze(0)
if len(embeddings.shape) == 1:
embeddings = embeddings.unsqueeze(0)
features.append(embeddings.cpu())
features = torch.cat(features)
features = features.view(len(captions), 5, -1)
if self.feature_cache_dir is not None:
temp_captions_cache_file = captions_cache_file.with_suffix(".tmp")
try:
torch.save(features, temp_captions_cache_file)
temp_captions_cache_file.replace(captions_cache_file)
finally:
try:
temp_captions_cache_file.unlink()
except FileNotFoundError:
pass
return features
def apply_token_indexers(self, instance: Instance) -> None:
for caption in instance["caption"]:
caption.token_indexers = self._token_indexers # type: ignore
| allennlp-models-main | allennlp_models/vision/dataset_readers/flickr30k.py |
from allennlp_models.vision.dataset_readers.vision_reader import VisionReader
from allennlp_models.vision.dataset_readers.gqa import GQAReader
from allennlp_models.vision.dataset_readers.nlvr2 import Nlvr2Reader
from allennlp_models.vision.dataset_readers.vgqa import VGQAReader
from allennlp_models.vision.dataset_readers.vqav2 import VQAv2Reader
from allennlp_models.vision.dataset_readers.visual_entailment import VisualEntailmentReader
from allennlp_models.vision.dataset_readers.flickr30k import Flickr30kReader
| allennlp-models-main | allennlp_models/vision/dataset_readers/__init__.py |
import logging
from collections import Counter
from os import PathLike
from typing import (
Dict,
List,
Union,
Optional,
MutableMapping,
NamedTuple,
Tuple,
Iterable,
)
import json
import re
import torch
from torch import Tensor
from allennlp.common.lazy import Lazy
from allennlp.common.file_utils import cached_path, LocalCacheResource
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, ArrayField, LabelField, ListField, TextField
from allennlp.data.image_loader import ImageLoader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Tokenizer
from allennlp.modules.vision.grid_embedder import GridEmbedder
from allennlp.modules.vision.region_detector import RegionDetector
from allennlp_models.vision.dataset_readers import utils
from allennlp_models.vision.dataset_readers.vision_reader import VisionReader
logger = logging.getLogger(__name__)
def get_score(count: int) -> float:
return min(1.0, count / 3)
@DatasetReader.register("vqav2")
class VQAv2Reader(VisionReader):
"""
Parameters
----------
image_dir: `str`
Path to directory containing `png` image files.
image_loader: `ImageLoader`
The image loader component used to load the images.
image_featurizer: `Lazy[GridEmbedder]`
The backbone image processor (like a ResNet), whose output will be passed to the region
detector for finding object boxes in the image.
region_detector: `Lazy[RegionDetector]`
For pulling out regions of the image (both coordinates and features) that will be used by
downstream models.
answer_vocab: `Union[Vocabulary, str]`, optional
The vocabulary to use for answers. The reader will look into the `"answers"` namespace
in the vocabulary to find possible answers.
If this is given, the reader only outputs instances with answers contained in this vocab.
If this is not given, the reader outputs all instances with all answers.
If this is a URL or filename, we will download a previously saved vocabulary from there.
feature_cache_dir: `Union[str, PathLike]`, optional
An optional directory to cache the featurized images in. Featurizing images takes a long
time, and many images are duplicated, so we highly recommend to use this cache.
tokenizer: `Tokenizer`, optional
The `Tokenizer` to use to tokenize the text. By default, this uses the tokenizer for
`"bert-base-uncased"`.
token_indexers: `Dict[str, TokenIndexer]`, optional
The `TokenIndexer` to use. By default, this uses the indexer for `"bert-base-uncased"`.
cuda_device: `Union[int, torch.device]`, optional
Either a torch device or a GPU number. This is the GPU we'll use to featurize the images.
max_instances: `int`, optional
For debugging, you can use this parameter to limit the number of instances the reader
returns.
image_processing_batch_size: `int`
The number of images to process at one time while featurizing. Default is 8.
multiple_answers_per_question: `bool`
VQA questions have multiple answers. By default, we use all of them, and give more
points to the more common answer. But VQA also has a special answer, the so-called
"multiple choice answer". If this is set to `False`, we only use that answer.
"""
def __init__(
self,
image_dir: Optional[Union[str, PathLike]] = None,
*,
image_loader: Optional[ImageLoader] = None,
image_featurizer: Optional[Lazy[GridEmbedder]] = None,
region_detector: Optional[Lazy[RegionDetector]] = None,
answer_vocab: Optional[Union[Vocabulary, str]] = None,
feature_cache_dir: Optional[Union[str, PathLike]] = None,
tokenizer: Optional[Tokenizer] = None,
token_indexers: Optional[Dict[str, TokenIndexer]] = None,
cuda_device: Optional[Union[int, torch.device]] = None,
max_instances: Optional[int] = None,
image_processing_batch_size: int = 8,
multiple_answers_per_question: bool = True,
write_to_cache: bool = True,
) -> None:
run_featurization = image_loader and image_featurizer and region_detector
if image_dir is None and run_featurization:
raise ValueError(
"Because of the size of the image datasets, we don't download them automatically. "
"Please go to https://visualqa.org/download.html, download the datasets you need, "
"and set the image_dir parameter to point to your download location. This dataset "
"reader does not care about the exact directory structure. It finds the images "
"wherever they are."
)
super().__init__(
image_dir,
image_loader=image_loader,
image_featurizer=image_featurizer,
region_detector=region_detector,
feature_cache_dir=feature_cache_dir,
tokenizer=tokenizer,
token_indexers=token_indexers,
cuda_device=cuda_device,
max_instances=max_instances,
image_processing_batch_size=image_processing_batch_size,
write_to_cache=write_to_cache,
)
# read answer vocab
if answer_vocab is None:
self.answer_vocab = None
else:
if isinstance(answer_vocab, str):
answer_vocab = cached_path(answer_vocab, extract_archive=True)
answer_vocab = Vocabulary.from_files(answer_vocab)
self.answer_vocab = frozenset(
utils.preprocess_answer(a)
for a in answer_vocab.get_token_to_index_vocabulary("answers").keys()
)
if self.produce_featurized_images:
# normalize self.images some more
# At this point, self.images maps filenames to full paths, but we want to map image ids to full paths.
filename_re = re.compile(r".*(\d{12})\.((jpg)|(png))")
def id_from_filename(filename: str) -> Optional[int]:
match = filename_re.fullmatch(filename)
if match is None:
return None
return int(match.group(1))
self.images = {
id_from_filename(name): full_path for name, full_path in self.images.items()
}
if None in self.images:
del self.images[None]
self.multiple_answers_per_question = multiple_answers_per_question
def _read(self, splits_or_list_of_splits: Union[str, List[str]]):
# if we are given a list of splits, concatenate them
if isinstance(splits_or_list_of_splits, str):
split_name = splits_or_list_of_splits
else:
for split_name in splits_or_list_of_splits:
yield from self._read(split_name)
return
# if the splits are using slicing syntax, honor it
question_slice, split_name = utils.get_data_slice(split_name)
class Split(NamedTuple):
annotations: Optional[str]
questions: str
aws_base = "https://s3.amazonaws.com/cvmlp/vqa/"
mscoco_base = aws_base + "mscoco/vqa/"
scene_base = aws_base + "abstract_v002/vqa/"
# fmt: off
splits = {
"balanced_real_train": Split(
mscoco_base + "v2_Annotations_Train_mscoco.zip!v2_mscoco_train2014_annotations.json", # noqa: E501
mscoco_base + "v2_Questions_Train_mscoco.zip!v2_OpenEnded_mscoco_train2014_questions.json", # noqa: E501
),
"balanced_real_val": Split(
mscoco_base + "v2_Annotations_Val_mscoco.zip!v2_mscoco_val2014_annotations.json", # noqa: E501
mscoco_base + "v2_Questions_Val_mscoco.zip!v2_OpenEnded_mscoco_val2014_questions.json", # noqa: E501
),
"balanced_real_test": Split(
None,
mscoco_base + "v2_Questions_Test_mscoco.zip!v2_OpenEnded_mscoco_test2015_questions.json", # noqa: E501
),
"balanced_bas_train": Split( # "bas" is Binary Abstract Scenes
scene_base + "Annotations_Binary_Train2017_abstract_v002.zip!abstract_v002_train2017_annotations.json", # noqa: E501
scene_base + "Questions_Binary_Train2017_abstract_v002.zip!OpenEnded_abstract_v002_train2017_questions.json", # noqa: E501
),
"balanced_bas_val": Split(
scene_base + "Annotations_Binary_Val2017_abstract_v002.zip!abstract_v002_val2017_annotations.json", # noqa: E501
scene_base + "Questions_Binary_Val2017_abstract_v002.zip!OpenEnded_abstract_v002_val2017_questions.json", # noqa: E501
),
"abstract_scenes_train": Split(
scene_base + "Annotations_Train_abstract_v002.zip!abstract_v002_train2015_annotations.json", # noqa: E501
scene_base + "Questions_Train_abstract_v002.zip!OpenEnded_abstract_v002_train2015_questions.json", # noqa: E501
),
"abstract_scenes_val": Split(
scene_base + "Annotations_Val_abstract_v002.zip!abstract_v002_val2015_annotations.json", # noqa: E501
scene_base + "Questions_Val_abstract_v002.zip!OpenEnded_abstract_v002_val2015_questions.json", # noqa: E501
),
"abstract_scenes_test": Split(
None,
scene_base + "Questions_Test_abstract_v002.zip!OpenEnded_abstract_v002_test2015_questions.json", # noqa: E501
),
"unittest": Split(
"test_fixtures/vision/vqav2/annotations.json",
"test_fixtures/vision/vqav2/questions.json"
)
}
# fmt: on
try:
split = splits[split_name]
except KeyError:
raise ValueError(f"Unrecognized split: {split_name}.")
answers_by_question_id = self._get_answers_by_question_id(split)
questions = []
with open(cached_path(split.questions, extract_archive=True)) as f:
questions_file = json.load(f)
for ques in questions_file["questions"]:
questions.append(ques)
questions = questions[question_slice]
question_dicts = list(self.shard_iterable(questions))
processed_images: Iterable[
Optional[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]
]
if self.produce_featurized_images:
# It would be much easier to just process one image at a time, but it's faster to process
# them in batches. So this code gathers up instances until it has enough to fill up a batch
# that needs processing, and then processes them all.
try:
image_paths = [
self.images[int(question_dict["image_id"])] for question_dict in question_dicts
]
except KeyError as e:
missing_id = e.args[0]
raise KeyError(
missing_id,
f"We could not find an image with the id {missing_id}. "
"Because of the size of the image datasets, we don't download them automatically. "
"Please go to https://visualqa.org/download.html, download the datasets you need, "
"and set the image_dir parameter to point to your download location. This dataset "
"reader does not care about the exact directory structure. It finds the images "
"wherever they are.",
)
processed_images = self._process_image_paths(image_paths)
else:
processed_images = [None for _ in range(len(question_dicts))]
attempted_instances_count = 0
failed_instances_count = 0
for question_dict, processed_image in zip(question_dicts, processed_images):
answers = answers_by_question_id.get(str(question_dict["question_id"]))
instance = self.text_to_instance(question_dict["question"], processed_image, answers)
attempted_instances_count += 1
if instance is None:
failed_instances_count += 1
else:
yield instance
if attempted_instances_count % 2000 == 0:
failed_instances_fraction = failed_instances_count / attempted_instances_count
if failed_instances_fraction > 0.1:
logger.warning(
f"{failed_instances_fraction*100:.0f}% of instances have no answers."
)
def text_to_instance(
self, # type: ignore
question: str,
image: Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]],
answer_counts: Optional[MutableMapping[str, int]] = None,
*,
use_cache: bool = True,
) -> Optional[Instance]:
tokenized_question = self._tokenizer.tokenize(question)
question_field = TextField(tokenized_question, None)
fields: Dict[str, Field] = {
"question": question_field,
}
if image is not None:
if isinstance(image, str):
features, coords, _, _ = next(
self._process_image_paths([image], use_cache=use_cache)
)
else:
features, coords, _, _ = image
fields["box_features"] = ArrayField(features)
fields["box_coordinates"] = ArrayField(coords)
fields["box_mask"] = ArrayField(
features.new_ones((features.shape[0],), dtype=torch.bool),
padding_value=False,
dtype=torch.bool,
)
if answer_counts is not None:
answer_fields = []
weights = []
for answer, count in answer_counts.items():
if self.answer_vocab is None or answer in self.answer_vocab:
answer_fields.append(LabelField(answer, label_namespace="answers"))
weights.append(get_score(count))
if len(answer_fields) <= 0:
return None
fields["labels"] = ListField(answer_fields)
fields["label_weights"] = ArrayField(torch.tensor(weights))
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance["question"].token_indexers = self._token_indexers # type: ignore
def _get_answers_by_question_id(self, split):
answers_by_question_id = {}
if split.annotations is not None:
# Pre-processing the annotations is time-consuming, so we don't want to
# have to re-do it each time we call read(). So we cache this result.
annotations_path = cached_path(split.annotations, extract_archive=True)
with LocalCacheResource(split.annotations + "-cache", annotations_path) as cache:
if cache.cached():
logger.info(
"Reading annotation answer counts from cache at %s",
cache.path,
)
with cache.reader() as f:
answers_by_question_id = json.load(f)
else:
logger.info("Calculating annotation answer counts...")
with open(annotations_path) as f:
annotations = json.load(f)
for a in annotations["annotations"]:
qid = a["question_id"]
answer_counts: MutableMapping[str, int] = Counter()
if self.multiple_answers_per_question:
for answer in (answer_dict["answer"] for answer_dict in a["answers"]):
answer_counts[utils.preprocess_answer(answer)] += 1
else:
answer_counts[utils.preprocess_answer(a["multiple_choice_answer"])] = 1
answers_by_question_id[str(qid)] = answer_counts
logger.info("Caching annotation answer counts to %s", cache.path)
with cache.writer() as f:
json.dump(answers_by_question_id, f)
return answers_by_question_id
| allennlp-models-main | allennlp_models/vision/dataset_readers/vqav2.py |
"""
Utilities for vision dataset readers.
"""
import logging
from functools import lru_cache
from typing import Tuple
import re
logger = logging.getLogger(__name__)
contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
manual_map = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
articles = ["a", "an", "the"]
period_strip = re.compile(r"(?!<=\d)(\.)(?!\d)")
comma_strip = re.compile(r"(\d)(\,)(\d)")
punct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def process_punctuation(inText: str) -> str:
outText = inText
for p in punct:
if (p + " " in inText or " " + p in inText) or (comma_strip.search(inText) is not None):
outText = outText.replace(p, "")
else:
outText = outText.replace(p, " ")
outText = period_strip.sub("", outText, re.UNICODE)
return outText
def process_digit_article(input: str) -> str:
output = []
for word in input.lower().split():
word = manual_map.get(word, word)
if word not in articles:
output.append(word)
else:
pass
for index, word in enumerate(output):
if word in contractions:
output[index] = contractions[word]
return " ".join(output)
@lru_cache(maxsize=None)
def preprocess_answer(answer: str) -> str:
answer = process_digit_article(process_punctuation(answer))
answer = answer.replace(",", "")
return answer
def get_data_slice(file_path: str) -> Tuple[slice, str]:
slice_match = re.match(r"(.*)\[([0123456789:]*)]", file_path)
if slice_match is None:
question_slice = slice(None, None, None)
return question_slice, file_path
else:
split_name = slice_match[1]
slice_args = [int(a) if len(a) > 0 else None for a in slice_match[2].split(":")]
question_slice = slice(*slice_args)
return question_slice, split_name
| allennlp-models-main | allennlp_models/vision/dataset_readers/utils.py |
import logging
from os import PathLike
from typing import Any, Dict, Iterable, Tuple, Union, Optional
import torch
from torch import Tensor
from allennlp.common.file_utils import cached_path, json_lines_from_file
from allennlp.common.lazy import Lazy
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import ArrayField, LabelField, ListField, MetadataField, TextField
from allennlp.data.image_loader import ImageLoader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Tokenizer
from allennlp.modules.vision.grid_embedder import GridEmbedder
from allennlp.modules.vision.region_detector import RegionDetector
from allennlp_models.vision.dataset_readers.vision_reader import VisionReader
logger = logging.getLogger(__name__)
@DatasetReader.register("nlvr2")
class Nlvr2Reader(VisionReader):
"""
Reads the NLVR2 dataset from [http://lil.nlp.cornell.edu/nlvr/](http://lil.nlp.cornell.edu/nlvr/).
In this task, the model is presented with two images and a hypothesis referring to those images.
The task for the model is to identify whether the hypothesis is true or false.
Accordingly, the instances produced by this reader contain two images, featurized into the
fields "box_features" and "box_coordinates". In addition to that, it produces a `TextField`
called "hypothesis", and a `MetadataField` called "identifier". The latter contains the question
id from the question set.
Parameters
----------
image_dir: `str`
Path to directory containing `png` image files.
image_loader: `ImageLoader`
An image loader to read the images with
image_featurizer: `GridEmbedder`
The backbone image processor (like a ResNet), whose output will be passed to the region
detector for finding object boxes in the image.
region_detector: `RegionDetector`
For pulling out regions of the image (both coordinates and features) that will be used by
downstream models.
feature_cache_dir: `str`, optional
If given, the reader will attempt to use the featurized image cache in this directory.
Caching the featurized images can result in big performance improvements, so it is
recommended to set this.
tokenizer: `Tokenizer`, optional, defaults to `PretrainedTransformerTokenizer("bert-base-uncased")`
token_indexers: `Dict[str, TokenIndexer]`, optional,
defaults to`{"tokens": PretrainedTransformerIndexer("bert-base-uncased")}`
cuda_device: `int`, optional
Set this to run image featurization on the given GPU. By default, image featurization runs on CPU.
max_instances: `int`, optional
If set, the reader only returns the first `max_instances` instances, and then stops.
This is useful for testing.
image_processing_batch_size: `int`
The number of images to process at one time while featurizing. Default is 8.
"""
def __init__(
self,
image_dir: Optional[Union[str, PathLike]] = None,
*,
image_loader: Optional[ImageLoader] = None,
image_featurizer: Optional[Lazy[GridEmbedder]] = None,
region_detector: Optional[Lazy[RegionDetector]] = None,
feature_cache_dir: Optional[Union[str, PathLike]] = None,
tokenizer: Optional[Tokenizer] = None,
token_indexers: Optional[Dict[str, TokenIndexer]] = None,
cuda_device: Optional[Union[int, torch.device]] = None,
max_instances: Optional[int] = None,
image_processing_batch_size: int = 8,
write_to_cache: bool = True,
) -> None:
run_featurization = image_loader and image_featurizer and region_detector
if image_dir is None and run_featurization:
raise ValueError(
"Because of the size of the image datasets, we don't download them automatically. "
"Please go to https://github.com/lil-lab/nlvr/tree/master/nlvr2, download the datasets you need, "
"and set the image_dir parameter to point to your download location. This dataset "
"reader does not care about the exact directory structure. It finds the images "
"wherever they are."
)
super().__init__(
image_dir,
image_loader=image_loader,
image_featurizer=image_featurizer,
region_detector=region_detector,
feature_cache_dir=feature_cache_dir,
tokenizer=tokenizer,
token_indexers=token_indexers,
cuda_device=cuda_device,
max_instances=max_instances,
image_processing_batch_size=image_processing_batch_size,
write_to_cache=write_to_cache,
)
github_url = "https://raw.githubusercontent.com/lil-lab/nlvr/"
nlvr_commit = "68a11a766624a5b665ec7594982b8ecbedc728c7"
data_dir = f"{github_url}{nlvr_commit}/nlvr2/data"
self.splits = {
"dev": f"{data_dir}/dev.json",
"test": f"{data_dir}/test1.json",
"train": f"{data_dir}/train.json",
"balanced_dev": f"{data_dir}/balanced/balanced_dev.json",
"balanced_test": f"{data_dir}/balanced/balanced_test1.json",
"unbalanced_dev": f"{data_dir}/balanced/unbalanced_dev.json",
"unbalanced_test": f"{data_dir}/balanced/unbalanced_test1.json",
}
def _read(self, split_or_filename: str):
filename = self.splits.get(split_or_filename, split_or_filename)
json_file_path = cached_path(filename)
blobs = []
json_blob: Dict[str, Any]
for json_blob in json_lines_from_file(json_file_path):
blobs.append(json_blob)
blob_dicts = list(self.shard_iterable(blobs))
processed_images1: Iterable[
Optional[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]
]
processed_images2: Iterable[
Optional[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]
]
if self.produce_featurized_images:
# It would be much easier to just process one image at a time, but it's faster to process
# them in batches. So this code gathers up instances until it has enough to fill up a batch
# that needs processing, and then processes them all.
try:
image_paths1 = []
image_paths2 = []
for blob in blob_dicts:
identifier = blob["identifier"]
image_name_base = identifier[: identifier.rindex("-")]
image_paths1.append(self.images[f"{image_name_base}-img0.png"])
image_paths2.append(self.images[f"{image_name_base}-img1.png"])
except KeyError as e:
missing_id = e.args[0]
raise KeyError(
missing_id,
f"We could not find an image with the id {missing_id}. "
"Because of the size of the image datasets, we don't download them automatically. "
"Please go to https://github.com/lil-lab/nlvr/tree/master/nlvr2, download the "
"datasets you need, and set the image_dir parameter to point to your download "
"location. This dataset reader does not care about the exact directory "
"structure. It finds the images wherever they are.",
)
processed_images1 = self._process_image_paths(image_paths1)
processed_images2 = self._process_image_paths(image_paths2)
else:
processed_images1 = [None for _ in range(len(blob_dicts))]
processed_images2 = [None for _ in range(len(blob_dicts))]
attempted_instances = 0
for json_blob, image1, image2 in zip(blob_dicts, processed_images1, processed_images2):
identifier = json_blob["identifier"]
hypothesis = json_blob["sentence"]
label = json_blob["label"] == "True"
instance = self.text_to_instance(identifier, hypothesis, image1, image2, label)
if instance is not None:
attempted_instances += 1
yield instance
logger.info(f"Successfully yielded {attempted_instances} instances")
def extract_image_features(
self,
image: Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]],
use_cache: bool,
):
if isinstance(image, str):
features, coords, _, _ = next(self._process_image_paths([image], use_cache=use_cache))
else:
features, coords, _, _ = image
return (
ArrayField(features),
ArrayField(coords),
ArrayField(
features.new_ones((features.shape[0],), dtype=torch.bool),
padding_value=False,
dtype=torch.bool,
),
)
def text_to_instance( # type: ignore
self,
identifier: Optional[str],
hypothesis: str,
image1: Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]],
image2: Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]],
label: Optional[bool] = None,
use_cache: bool = True,
) -> Instance:
hypothesis_field = TextField(self._tokenizer.tokenize(hypothesis), None)
box_features1, box_coordinates1, box_mask1 = self.extract_image_features(image1, use_cache)
box_features2, box_coordinates2, box_mask2 = self.extract_image_features(image2, use_cache)
fields = {
"hypothesis": ListField([hypothesis_field, hypothesis_field]),
"box_features": ListField([box_features1, box_features2]),
"box_coordinates": ListField([box_coordinates1, box_coordinates2]),
"box_mask": ListField([box_mask1, box_mask2]),
}
if identifier is not None:
fields["identifier"] = MetadataField(identifier)
if label is not None:
fields["label"] = LabelField(int(label), skip_indexing=True)
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance["hypothesis"][0].token_indexers = self._token_indexers # type: ignore
instance["hypothesis"][1].token_indexers = self._token_indexers # type: ignore
| allennlp-models-main | allennlp_models/vision/dataset_readers/nlvr2.py |
import logging
from typing import (
Dict,
List,
Union,
Optional,
Tuple,
)
import torch
from torch import Tensor
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, ArrayField, LabelField, TextField
from allennlp.data.instance import Instance
from allennlp.common.file_utils import json_lines_from_file
from allennlp_models.vision.dataset_readers.vision_reader import VisionReader
logger = logging.getLogger(__name__)
@DatasetReader.register("visual-entailment")
class VisualEntailmentReader(VisionReader):
"""
The dataset reader for visual entailment.
"""
def _read(self, file_path: str):
split_prefix = "https://storage.googleapis.com/allennlp-public-data/snli-ve/"
splits = {
"dev": split_prefix + "snli_ve_dev.jsonl.gz",
"test": split_prefix + "snli_ve_test.jsonl.gz",
"train": split_prefix + "snli_ve_train.jsonl.gz",
}
file_path = splits.get(file_path, file_path)
lines = json_lines_from_file(file_path)
info_dicts: List[Dict] = list(self.shard_iterable(lines)) # type: ignore
if self.produce_featurized_images:
# It would be much easier to just process one image at a time, but it's faster to process
# them in batches. So this code gathers up instances until it has enough to fill up a batch
# that needs processing, and then processes them all.
filenames = [info_dict["Flickr30K_ID"] + ".jpg" for info_dict in info_dicts]
try:
processed_images = self._process_image_paths(
[self.images[filename] for filename in filenames]
)
except KeyError as e:
missing_filename = e.args[0]
raise KeyError(
missing_filename,
f"We could not find an image with the name {missing_filename}. "
"Because of the size of the image datasets, we don't download them automatically. "
"Please download the images from"
"https://storage.googleapis.com/allennlp-public-data/snli-ve/flickr30k_images.tar.gz, "
"extract them into a directory, and set the image_dir parameter to point to that "
"directory. This dataset reader does not care about the exact directory structure. It "
"finds the images wherever they are.",
)
else:
processed_images = [None for _ in range(len(info_dicts))] # type: ignore
for info_dict, processed_image in zip(info_dicts, processed_images):
hypothesis = info_dict["sentence2"]
answer = info_dict["gold_label"]
instance = self.text_to_instance(processed_image, hypothesis, answer)
yield instance
def text_to_instance(
self, # type: ignore
image: Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]],
hypothesis: str,
label: Optional[str] = None,
*,
use_cache: bool = True,
) -> Instance:
tokenized_hypothesis = self._tokenizer.tokenize(hypothesis)
hypothesis_field = TextField(tokenized_hypothesis, None)
fields: Dict[str, Field] = {"hypothesis": hypothesis_field}
if image is not None:
if isinstance(image, str):
features, coords, _, _ = next(
self._process_image_paths([image], use_cache=use_cache)
)
else:
features, coords, _, _ = image
fields["box_features"] = ArrayField(features)
fields["box_coordinates"] = ArrayField(coords)
fields["box_mask"] = ArrayField(
features.new_ones((features.shape[0],), dtype=torch.bool),
padding_value=False,
dtype=torch.bool,
)
if label:
fields["labels"] = LabelField(label)
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance["hypothesis"].token_indexers = self._token_indexers # type: ignore
| allennlp-models-main | allennlp_models/vision/dataset_readers/visual_entailment.py |
import glob
import logging
from os import PathLike
from typing import (
Dict,
List,
Union,
Optional,
MutableMapping,
Set,
Tuple,
Iterator,
Iterable,
)
import os
import torch
from torch import Tensor
from tqdm import tqdm
import torch.distributed as dist
from allennlp.common import util
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.lazy import Lazy
from allennlp.common.util import int_to_device
from allennlp.common.file_utils import TensorCache
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.image_loader import ImageLoader
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.tokenizers import Tokenizer
from allennlp.modules.vision.grid_embedder import GridEmbedder
from allennlp.modules.vision.region_detector import RegionDetector
logger = logging.getLogger(__name__)
class VisionReader(DatasetReader):
"""
Base class for dataset readers for vision tasks.
If you don't specify `image_loader`, `image_featurizer`, and `region_detector`, the reader
assumes it can get all featurized images from the cache.
If you don't specify `feature_cache`, the reader will featurize all images using the
featurization components, and use an internal in-memory cache to catch duplicate
images.
If you don't specify either of these things, the reader will not produce featurized images
at all.
Parameters
----------
image_dir: `str`
Path to directory containing image files. The structure of the directory doesn't matter. We
find images by finding filenames that match `*[image_id].jpg`.
image_loader : `ImageLoader`, optional
The image loading component.
image_featurizer: `Lazy[GridEmbedder]`, optional
The backbone image processor (like a ResNet), whose output will be passed to the region
detector for finding object boxes in the image.
region_detector: `Lazy[RegionDetector]`, optional
For pulling out regions of the image (both coordinates and features) that will be used by
downstream models.
tokenizer: `Tokenizer`, optional
The `Tokenizer` to use to tokenize the text. By default, this uses the tokenizer for
`"bert-base-uncased"`.
token_indexers: `Dict[str, TokenIndexer]`, optional
The `TokenIndexer` to use. By default, this uses the indexer for `"bert-base-uncased"`.
cuda_device: `Union[int, torch.device]`, optional
Either a torch device or a GPU number. This is the GPU we'll use to featurize the images.
max_instances: `int`, optional
For debugging, you can use this parameter to limit the number of instances the reader
returns.
image_processing_batch_size: `int`
The number of images to process at one time while featurizing. Default is 8.
write_to_cache: `bool`, optional (default = `True`)
Allows the reader to write to the cache. Disabling this is useful if you don't want
to accidentally overwrite a cache you already have, or if you don't have write
access to the cache you're using.
"""
def __init__(
self,
image_dir: Optional[Union[str, PathLike]],
*,
image_loader: Optional[ImageLoader] = None,
image_featurizer: Optional[Lazy[GridEmbedder]] = None,
region_detector: Optional[Lazy[RegionDetector]] = None,
feature_cache_dir: Optional[Union[str, PathLike]] = None,
tokenizer: Optional[Tokenizer] = None,
token_indexers: Optional[Dict[str, TokenIndexer]] = None,
cuda_device: Optional[Union[int, torch.device]] = None,
max_instances: Optional[int] = None,
image_processing_batch_size: int = 8,
write_to_cache: bool = True,
manual_distributed_sharding: bool = True,
manual_multiprocess_sharding: bool = True,
) -> None:
super().__init__(
max_instances=max_instances,
manual_distributed_sharding=manual_distributed_sharding,
manual_multiprocess_sharding=manual_multiprocess_sharding,
)
# tokenizers and indexers
if tokenizer is None:
tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
self._tokenizer = tokenizer
if token_indexers is None:
token_indexers = {"tokens": PretrainedTransformerIndexer("bert-base-uncased")}
self._token_indexers = token_indexers
if not ((image_loader is None) == (image_featurizer is None) == (region_detector is None)):
raise ConfigurationError(
"Please either specify all of image_loader, image_featurizer, and region_detector, "
"or specify none of them if you don't want to featurize images."
)
# feature cache
self.feature_cache_dir = feature_cache_dir
self.coordinates_cache_dir = feature_cache_dir
self.class_probs_cache_dir = feature_cache_dir
self.class_labels_cache_dir = feature_cache_dir
if feature_cache_dir:
self.write_to_cache = write_to_cache
else:
# If we don't have a cache dir, we use a dict in memory as a cache, so we
# always write.
self.write_to_cache = True
self._feature_cache_instance: Optional[MutableMapping[str, Tensor]] = None
self._coordinates_cache_instance: Optional[MutableMapping[str, Tensor]] = None
self._class_probs_cache_instance: Optional[MutableMapping[str, Tensor]] = None
self._class_labels_cache_instance: Optional[MutableMapping[str, Tensor]] = None
# image processors
self.image_loader = None
if image_loader and image_featurizer and region_detector:
if cuda_device is None:
if torch.cuda.device_count() > 0:
if util.is_distributed():
cuda_device = dist.get_rank() % torch.cuda.device_count()
else:
cuda_device = 0
else:
cuda_device = -1
check_for_gpu(cuda_device)
self.cuda_device = int_to_device(cuda_device)
logger.info(f"Processing images on device {cuda_device}")
# image loading and featurizing
self.image_loader = image_loader
self.image_loader.device = self.cuda_device
self._lazy_image_featurizer = image_featurizer
self._image_featurizer = None
self._lazy_region_detector = region_detector
self._region_detector = None
self.image_processing_batch_size = image_processing_batch_size
self.produce_featurized_images = False
if self.feature_cache_dir and self.coordinates_cache_dir:
logger.info(f"Featurizing images with a cache at {self.feature_cache_dir}")
self.produce_featurized_images = True
if image_loader and image_featurizer and region_detector:
if self.produce_featurized_images:
logger.info("Falling back to a full image featurization pipeline")
else:
logger.info("Featurizing images with a full image featurization pipeline")
self.produce_featurized_images = True
if self.produce_featurized_images:
if image_dir is None:
if image_loader and image_featurizer and region_detector:
raise ConfigurationError("We need an image_dir to featurize images.")
else:
raise ConfigurationError(
"We need an image_dir to use a cache of featurized images. Images won't be "
"read if they are cached, but we need the image_dir to determine the right "
"cache keys from the file names."
)
logger.info("Discovering images ...")
self.images = {
os.path.basename(filename): filename
for extension in {"png", "jpg", "jpeg"}
for filename in tqdm(
glob.iglob(os.path.join(image_dir, "**", f"*.{extension}"), recursive=True),
desc=f"Discovering {extension} images",
)
}
logger.info("Done discovering images")
@property
def image_featurizer(self) -> Optional[GridEmbedder]:
if self._image_featurizer is None:
if self._lazy_image_featurizer is None:
return None
self._image_featurizer = self._lazy_image_featurizer.construct().to(self.cuda_device) # type: ignore
self._image_featurizer.eval() # type: ignore[attr-defined]
return self._image_featurizer # type: ignore[return-value]
@property
def region_detector(self) -> Optional[RegionDetector]:
if self._region_detector is None:
if self._lazy_region_detector is None:
return None
self._region_detector = self._lazy_region_detector.construct().to(self.cuda_device) # type: ignore
self._region_detector.eval() # type: ignore[attr-defined]
return self._region_detector # type: ignore[return-value]
def _create_cache(
self,
cache_name: str,
cache_dir: Optional[Union[str, PathLike]] = None,
) -> MutableMapping[str, Tensor]:
if cache_dir is None:
return {}
os.makedirs(cache_dir, exist_ok=True)
return TensorCache(
os.path.join(cache_dir, cache_name),
read_only=not self.write_to_cache,
)
@property
def _feature_cache(self) -> MutableMapping[str, Tensor]:
if self._feature_cache_instance is None:
self._feature_cache_instance = self._create_cache("features", self.feature_cache_dir)
return self._feature_cache_instance
@property
def _coordinates_cache(self) -> MutableMapping[str, Tensor]:
if self._coordinates_cache_instance is None:
self._coordinates_cache_instance = self._create_cache(
"coordinates", self.coordinates_cache_dir
)
return self._coordinates_cache_instance
@property
def _class_probs_cache(self) -> MutableMapping[str, Tensor]:
if self._class_probs_cache_instance is None:
self._class_probs_cache_instance = self._create_cache(
"class_probs", self.class_probs_cache_dir
)
return self._class_probs_cache_instance
@property
def _class_labels_cache(self) -> MutableMapping[str, Tensor]:
if self._class_labels_cache_instance is None:
self._class_labels_cache_instance = self._create_cache(
"class_labels", self.class_labels_cache_dir
)
return self._class_labels_cache_instance
def _process_image_paths(
self, image_paths: Iterable[str], *, use_cache: bool = True
) -> Iterator[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]:
"""
Processes the given image paths and returns featurized images.
This consumes image paths one at a time, featurizes them either by going to the cache, or
by running the featurization models, and yields tensors one at a time. It runs the
featurization pipeline in batches for performance.
image_paths: `Iterable[str]`
the image paths to process
use_cache: `bool`, default = `True`
Usually the cache behavior is governed by the `write_to_cache` parameter given to
`__init__()`. But sometimes we want to override this behavior and turn off the
cache completely. This parameter lets you do that. This is useful for the
`Predictor`, so we can make predictions without having to touch a cache,
even if the model was trained with a cache.
"""
assert self.produce_featurized_images, (
"For _process_image_paths() to work, we need either a feature cache, or an image loader, "
"an image featurizer, and a region detector."
)
batch: List[Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]] = []
unprocessed_paths: Set[str] = set()
def yield_batch():
# process the images
paths = list(unprocessed_paths)
images, sizes = self.image_loader(paths)
with torch.no_grad():
images = images.to(self.cuda_device)
sizes = sizes.to(self.cuda_device)
featurized_images = self.image_featurizer(images, sizes)
detector_results = self.region_detector(images, sizes, featurized_images)
features = detector_results.features
coordinates = detector_results.boxes
class_probs = detector_results.class_probs
class_labels = detector_results.class_labels
# store the processed results in memory, so we can complete the batch
paths_to_tensors = {}
for i, path in enumerate(paths):
if class_probs:
class_probs_tensor = class_probs[i]
else:
class_probs_tensor = None
if class_labels:
class_labels_tensor = class_labels[i]
else:
class_labels_tensor = None
paths_to_tensors[path] = (
features[i],
coordinates[i],
class_probs_tensor,
class_labels_tensor,
)
# store the processed results in the cache
if use_cache and self.write_to_cache:
for path, (
features,
coordinates,
class_probs,
class_labels,
) in paths_to_tensors.items():
basename = os.path.basename(path)
self._feature_cache[basename] = features
self._coordinates_cache[basename] = coordinates
if class_probs is not None:
self._class_probs_cache[basename] = class_probs
if class_labels is not None:
self._class_labels_cache[basename] = class_labels
# yield the batch
for b in batch:
if isinstance(b, str):
yield paths_to_tensors[b]
else:
yield b
for image_path in image_paths:
basename = os.path.basename(image_path)
try:
if use_cache:
features: Tensor = self._feature_cache[basename]
coordinates: Tensor = self._coordinates_cache[basename]
class_probs: Optional[Tensor] = self._class_probs_cache.get(basename)
class_labels: Optional[Tensor] = self._class_labels_cache.get(basename)
if len(batch) <= 0:
yield features, coordinates, class_probs, class_labels
else:
batch.append((features, coordinates, class_probs, class_labels))
else:
# If we're not using the cache, we pretend we had a cache miss here.
raise KeyError
except KeyError:
if not (self.image_loader and self.region_detector and self.image_featurizer):
if use_cache:
raise KeyError(
f"Could not find {basename} in the feature cache, and "
"image featurizers are not defined."
)
else:
raise KeyError(
"Reading the feature cache is disabled, and image featurizers "
"are not defined. I can't process anything."
)
batch.append(image_path)
unprocessed_paths.add(image_path)
if len(unprocessed_paths) >= self.image_processing_batch_size:
yield from yield_batch()
batch = []
unprocessed_paths = set()
if len(batch) > 0:
yield from yield_batch()
| allennlp-models-main | allennlp_models/vision/dataset_readers/vision_reader.py |
from allennlp_models.vision.predictors.vilbert_vqa import VilbertVqaPredictor
from allennlp_models.vision.predictors.visual_entailment import VisualEntailmentPredictor
| allennlp-models-main | allennlp_models/vision/predictors/__init__.py |
from typing import List, Dict
import numpy
from allennlp.common.file_utils import cached_path
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register("vilbert_vqa")
class VilbertVqaPredictor(Predictor):
def predict(self, image: str, question: str) -> JsonDict:
image = cached_path(image)
return self.predict_json({"question": question, "image": image})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
from allennlp_models.vision.dataset_readers.vqav2 import VQAv2Reader
from allennlp_models.vision import GQAReader
question = json_dict["question"]
image = cached_path(json_dict["image"])
if isinstance(self._dataset_reader, VQAv2Reader) or isinstance(
self._dataset_reader, GQAReader
):
return self._dataset_reader.text_to_instance(question, image, use_cache=False)
else:
raise ValueError(
f"Dataset reader is of type f{self._dataset_reader.__class__.__name__}. "
f"Expected {VQAv2Reader.__name__}."
)
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
return [instance] # TODO
| allennlp-models-main | allennlp_models/vision/predictors/vilbert_vqa.py |
from typing import List, Dict
import numpy
from allennlp.common.file_utils import cached_path
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.data.fields import LabelField
from allennlp.predictors.predictor import Predictor
@Predictor.register("nlvr2")
class Nlvr2Predictor(Predictor):
def predict(self, image1: str, image2: str, hypothesis: str) -> JsonDict:
image1 = cached_path(image1)
image2 = cached_path(image2)
return self.predict_json({"image1": image1, "image2": image2, "hypothesis": hypothesis})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
from allennlp_models.vision.dataset_readers.nlvr2 import Nlvr2Reader
image1 = cached_path(json_dict["image1"])
image2 = cached_path(json_dict["image2"])
hypothesis = json_dict["hypothesis"]
if isinstance(self._dataset_reader, Nlvr2Reader):
return self._dataset_reader.text_to_instance(
None, hypothesis, image1, image2, use_cache=False
)
else:
raise ValueError(
f"Dataset reader is of type f{self._dataset_reader.__class__.__name__}. "
f"Expected {Nlvr2Reader.__name__}."
)
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = instance.duplicate()
label = numpy.argmax(outputs["probs"])
new_instance.add_field("label", LabelField(int(label), skip_indexing=True))
return [new_instance]
| allennlp-models-main | allennlp_models/vision/predictors/nlvr2.py |
from typing import List, Dict
import numpy
from allennlp.common.file_utils import cached_path
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.data.fields import LabelField
from allennlp.predictors.predictor import Predictor
@Predictor.register("vilbert_ve")
class VisualEntailmentPredictor(Predictor):
def predict(self, image: str, hypothesis: str) -> JsonDict:
image = cached_path(image)
return self.predict_json({"image": image, "hypothesis": hypothesis})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
from allennlp_models.vision.dataset_readers.visual_entailment import VisualEntailmentReader
image = cached_path(json_dict["image"])
hypothesis = json_dict["hypothesis"]
if isinstance(self._dataset_reader, VisualEntailmentReader):
return self._dataset_reader.text_to_instance(image, hypothesis, use_cache=False)
else:
raise ValueError(
f"Dataset reader is of type f{self._dataset_reader.__class__.__name__}. "
f"Expected {VisualEntailmentReader.__name__}."
)
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = instance.duplicate()
label = numpy.argmax(outputs["probs"])
new_instance.add_field("label", LabelField(int(label), skip_indexing=True))
return [new_instance]
| allennlp-models-main | allennlp_models/vision/predictors/visual_entailment.py |
import logging
from typing import Dict, Optional
import torch
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules.transformer import (
TransformerEmbeddings,
ImageFeatureEmbeddings,
BiModalEncoder,
)
from allennlp.training.metrics import CategoricalAccuracy
from torch.nn import CrossEntropyLoss
from allennlp_models.vision.models.vision_text_model import VisionTextModel
logger = logging.getLogger(__name__)
@Model.register("vilbert_ir")
@Model.register("vilbert_ir_from_huggingface", constructor="from_huggingface_model_name")
class ImageRetrievalVilbert(VisionTextModel):
"""
Model for image retrieval task based on the VilBERT paper.
# Parameters
vocab : `Vocabulary`
text_embeddings : `TransformerEmbeddings`
image_embeddings : `ImageFeatureEmbeddings`
encoder : `BiModalEncoder`
pooled_output_dim : `int`
fusion_method : `str`, optional (default = `"mul"`)
dropout : `float`, optional (default = `0.1`)
label_namespace : `str`, optional (default = `answers`)
k: `int`, optional (default = `1`)
"""
def __init__(
self,
vocab: Vocabulary,
text_embeddings: TransformerEmbeddings,
image_embeddings: ImageFeatureEmbeddings,
encoder: BiModalEncoder,
pooled_output_dim: int,
fusion_method: str = "mul",
dropout: float = 0.1,
k: int = 1,
*,
ignore_text: bool = False,
ignore_image: bool = False,
) -> None:
super().__init__(
vocab,
text_embeddings,
image_embeddings,
encoder,
pooled_output_dim,
fusion_method,
dropout,
is_multilabel=False,
ignore_text=ignore_text,
ignore_image=ignore_image,
)
self.classifier = torch.nn.Linear(pooled_output_dim, 1)
self.top_1_acc = CategoricalAccuracy()
self.top_5_acc = CategoricalAccuracy(top_k=5)
self.top_10_acc = CategoricalAccuracy(top_k=10)
self.loss = CrossEntropyLoss()
self.k = k
def forward( # type: ignore
self,
box_features: torch.Tensor,
box_coordinates: torch.Tensor,
box_mask: torch.Tensor,
caption: TextFieldTensors,
label: torch.Tensor,
) -> Dict[str, torch.Tensor]:
batch_size = box_features.shape[0]
if self.training:
# Shape: (batch_size, num_images, pooled_output_dim)
pooled_output = self.backbone(box_features, box_coordinates, box_mask, caption)[
"pooled_boxes_and_text"
]
# Shape: (batch_size, num_images)
logits = self.classifier(pooled_output).squeeze(-1)
probs = torch.softmax(logits, dim=-1)
else:
with torch.no_grad():
# Shape: (batch_size, num_images, pooled_output_dim)
pooled_output = self.backbone(box_features, box_coordinates, box_mask, caption)[
"pooled_boxes_and_text"
]
# Shape: (batch_size, num_images)
logits = self.classifier(pooled_output).squeeze(-1)
probs = torch.softmax(logits, dim=-1)
outputs = {"logits": logits, "probs": probs}
outputs = self._compute_loss_and_metrics(batch_size, outputs, label)
return outputs
def _compute_loss_and_metrics(
self,
batch_size: int,
outputs: Dict[str, torch.Tensor],
labels: torch.Tensor,
label_weights: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
if label_weights is not None:
raise NotImplementedError("This implementation does not support label_weights.")
outputs["loss"] = self.loss(outputs["logits"], labels) / batch_size
self.top_1_acc(outputs["logits"], labels)
self.top_5_acc(outputs["logits"], labels)
self.top_10_acc(outputs["logits"], labels)
return outputs
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
"top_1_acc": self.top_1_acc.get_metric(reset),
"top_5_acc": self.top_5_acc.get_metric(reset),
"top_10_acc": self.top_10_acc.get_metric(reset),
}
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
return output_dict
default_predictor = "vilbert_ir"
| allennlp-models-main | allennlp_models/vision/models/vilbert_image_retrieval.py |
from allennlp_models.vision.models.nlvr2 import Nlvr2Model
from allennlp_models.vision.models.vision_text_model import VisionTextModel
from allennlp_models.vision.models.visual_entailment import VisualEntailmentModel
from allennlp_models.vision.models.vilbert_image_retrieval import ImageRetrievalVilbert
from allennlp_models.vision.models.vilbert_vqa import VqaVilbert
from allennlp_models.vision.models.heads.vqa_head import VqaHead
from allennlp_models.vision.models.heads.visual_entailment_head import VisualEntailmentHead
| allennlp-models-main | allennlp_models/vision/models/__init__.py |
import logging
from typing import Dict, Optional
import torch
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules.transformer import (
TransformerEmbeddings,
ImageFeatureEmbeddings,
BiModalEncoder,
)
from allennlp.nn import util
from allennlp_models.vision.models.vision_text_model import VisionTextModel
logger = logging.getLogger(__name__)
@Model.register("vqa_vilbert")
@Model.register("vqa_vilbert_from_huggingface", constructor="from_huggingface_model_name")
class VqaVilbert(VisionTextModel):
"""
Model for VQA task based on the VilBERT paper.
# Parameters
vocab : `Vocabulary`
text_embeddings : `TransformerEmbeddings`
image_embeddings : `ImageFeatureEmbeddings`
encoder : `BiModalEncoder`
pooled_output_dim : `int`
fusion_method : `str`, optional (default = `"sum"`)
dropout : `float`, optional (default = `0.1`)
label_namespace : `str`, optional (default = `answers`)
"""
def __init__(
self,
vocab: Vocabulary,
text_embeddings: TransformerEmbeddings,
image_embeddings: ImageFeatureEmbeddings,
encoder: BiModalEncoder,
pooled_output_dim: int,
fusion_method: str = "sum",
dropout: float = 0.1,
label_namespace: str = "answers",
*,
ignore_text: bool = False,
ignore_image: bool = False
) -> None:
super().__init__(
vocab,
text_embeddings,
image_embeddings,
encoder,
pooled_output_dim,
fusion_method,
dropout,
label_namespace,
is_multilabel=True,
ignore_text=ignore_text,
ignore_image=ignore_image,
)
from allennlp.training.metrics import F1MultiLabelMeasure
from allennlp_models.vision.metrics.vqa import VqaMeasure
self.f1_metric = F1MultiLabelMeasure(average="micro")
self.vqa_metric = VqaMeasure()
def forward(
self, # type: ignore
box_features: torch.Tensor,
box_coordinates: torch.Tensor,
box_mask: torch.Tensor,
question: TextFieldTensors,
labels: Optional[torch.Tensor] = None,
label_weights: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
return super().forward(
box_features,
box_coordinates,
box_mask,
text=question,
labels=labels,
label_weights=label_weights,
)
def _compute_loss_and_metrics(
self,
batch_size: int,
outputs: torch.Tensor,
label: torch.Tensor,
label_weights: Optional[torch.Tensor] = None,
):
if label is not None and label_weights is not None:
logits = outputs["logits"]
label_mask = label > 1 # 0 is padding, 1 is OOV, which we want to ignore
weighted_labels = util.masked_index_replace(
logits.new_zeros(logits.size() + (1,)),
label.clamp(min=0),
label_mask,
label_weights.unsqueeze(-1),
).squeeze(-1)
# weighted_labels now has shape (batch_size, num_labels). We need to ignore the first
# two columns of this in our loss function and accuracy metric. The first column is a
# padding label, and the second column is an OOV label. We want the loss function to
# be computed on every other label.
binary_label_mask = weighted_labels.new_ones(logits.size())
binary_label_mask[:, 0] = 0
binary_label_mask[:, 1] = 0
outputs["loss"] = (
torch.nn.functional.binary_cross_entropy_with_logits(
logits, weighted_labels, weight=binary_label_mask, reduction="sum"
)
/ batch_size
)
self.f1_metric(logits, weighted_labels, binary_label_mask.bool())
self.vqa_metric(logits, label, label_weights)
return outputs
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
result = self.f1_metric.get_metric(reset)
result["vqa_score"] = self.vqa_metric.get_metric(reset)["score"]
return result
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
batch_tokens = []
for batch_index, batch in enumerate(output_dict["probs"]):
tokens = {}
for i, prob in enumerate(batch):
tokens[self.vocab.get_token_from_index(i, self.label_namespace)] = float(prob)
batch_tokens.append(tokens)
output_dict["tokens"] = batch_tokens
return output_dict
default_predictor = "vilbert_vqa"
| allennlp-models-main | allennlp_models/vision/models/vilbert_vqa.py |
import logging
from typing import Dict, List, Optional
import numpy as np
import torch
from allennlp.data.fields.text_field import TextFieldTensors
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules.transformer import (
TransformerEmbeddings,
ImageFeatureEmbeddings,
BiModalEncoder,
)
logger = logging.getLogger(__name__)
@Model.register("vision_model")
class VisionTextModel(Model):
"""
`VisionTextModel` takes as input a single text input and a single image input
to produce some output. Example tasks include visual question-answering, visual
entailment, etc.
# Parameters
vocab : `Vocabulary`
text_embeddings : `TransformerEmbeddings`
image_embeddings : `ImageFeatureEmbeddings`
encoder : `BiModalEncoder`
pooled_output_dim : `int`
fusion_method : `str`, optional (default = `"sum"`)
dropout : `float`, optional (default = `0.1`)
label_namespace : `str`, optional (default = `"labels"`)
is_multilabel: `bool`, optional (default = `False`)
Whether the output classification is multilabel.
(i.e., can have multiple correct answers)
"""
def __init__(
self,
vocab: Vocabulary,
text_embeddings: TransformerEmbeddings,
image_embeddings: ImageFeatureEmbeddings,
encoder: BiModalEncoder,
pooled_output_dim: int,
fusion_method: str = "sum",
dropout: float = 0.1,
label_namespace: str = "labels",
is_multilabel: bool = False,
*,
ignore_text: bool = False,
ignore_image: bool = False,
) -> None:
super().__init__(vocab)
from allennlp.modules.backbones import VilbertBackbone
self.backbone = VilbertBackbone(
vocab,
text_embeddings,
image_embeddings,
encoder,
pooled_output_dim,
fusion_method,
dropout,
)
num_labels = vocab.get_vocab_size(label_namespace)
self.label_namespace = label_namespace
self.classifier = torch.nn.Linear(pooled_output_dim, num_labels)
self.dropout = torch.nn.Dropout(dropout)
self.is_multilabel = is_multilabel
self.ignore_text = ignore_text
self.ignore_images = ignore_image
@classmethod
def from_huggingface_model_name(
cls,
vocab: Vocabulary,
model_name: str,
image_feature_dim: int,
image_num_hidden_layers: int,
image_hidden_size: int,
image_num_attention_heads: int,
combined_hidden_size: int,
combined_num_attention_heads: int,
pooled_output_dim: int,
image_intermediate_size: int,
image_attention_dropout: float,
image_hidden_dropout: float,
image_biattention_id: List[int],
text_biattention_id: List[int],
text_fixed_layer: int,
image_fixed_layer: int,
pooled_dropout: float = 0.1,
fusion_method: str = "sum",
*,
ignore_text: bool = False,
ignore_image: bool = False,
):
text_embeddings = TransformerEmbeddings.from_pretrained_module(model_name)
image_embeddings = ImageFeatureEmbeddings(
feature_size=image_feature_dim,
embedding_size=image_hidden_size,
dropout=image_hidden_dropout,
)
encoder = BiModalEncoder.from_pretrained_module(
model_name,
num_hidden_layers2=image_num_hidden_layers,
hidden_size2=image_hidden_size,
num_attention_heads2=image_num_attention_heads,
combined_hidden_size=combined_hidden_size,
combined_num_attention_heads=combined_num_attention_heads,
intermediate_size2=image_intermediate_size,
attention_dropout2=image_attention_dropout,
hidden_dropout2=image_hidden_dropout,
biattention_id1=text_biattention_id,
biattention_id2=image_biattention_id,
fixed_layer1=text_fixed_layer,
fixed_layer2=image_fixed_layer,
)
return cls(
vocab=vocab,
text_embeddings=text_embeddings,
image_embeddings=image_embeddings,
encoder=encoder,
pooled_output_dim=pooled_output_dim,
fusion_method=fusion_method,
dropout=pooled_dropout,
ignore_text=ignore_text,
ignore_image=ignore_image,
)
def forward(
self, # type: ignore
box_features: torch.Tensor,
box_coordinates: torch.Tensor,
box_mask: torch.Tensor,
text: TextFieldTensors,
labels: Optional[torch.Tensor] = None,
label_weights: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
box_features : `Tensor`
Shape: `(batch_size, num_boxes, feature_size)`
box_coordinates : `Tensor`
Shape: `(batch_size, num_boxes, 4)`
box_mask : `Tensor`
A bool and 0-1 tensor of shape `(batch_size, num_boxes)`.
text : `TextFieldTensors`
label : `Optional[Tensor]`
label_weights : `Optional[Tensor]`
"""
batch_size = box_features.size(0)
if self.ignore_images:
box_features = torch.zeros_like(box_features)
box_coordinates = torch.zeros_like(box_coordinates)
box_coordinates[..., 2] = 1
box_coordinates[..., 3] = 1
box_mask = torch.ones_like(box_mask)
if self.ignore_text:
dummy_text = {}
for embedder_name, tensor_dict in text.items():
dummy_tensor_dict = {}
for tensor_name, tensor in tensor_dict.items():
if "mask" in tensor_name:
tensor = torch.ones_like(tensor)
else:
tensor = torch.zeros_like(tensor)
dummy_tensor_dict[tensor_name] = tensor
dummy_text[embedder_name] = dummy_tensor_dict
text = dummy_text
backbone_outputs = self.backbone(box_features, box_coordinates, box_mask, text)
# Shape: (batch_size, num_labels)
logits = self.classifier(backbone_outputs["pooled_boxes_and_text"])
# Shape: (batch_size, num_labels)
if self.is_multilabel:
probs = torch.sigmoid(logits)
else:
probs = torch.softmax(logits, dim=-1)
outputs = {"logits": logits, "probs": probs}
outputs = self._compute_loss_and_metrics(batch_size, outputs, labels, label_weights)
return outputs
def _compute_loss_and_metrics(
self,
batch_size: int,
outputs: Dict[str, torch.Tensor],
label: torch.Tensor,
label_weights: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
return outputs
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
result = self.accuracy.get_metric(reset)
return {"accuracy": result}
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
batch_labels = []
for batch_index, batch in enumerate(output_dict["probs"]):
labels = np.argmax(batch, axis=-1)
batch_labels.append(labels)
output_dict["labels"] = batch_labels
return output_dict
| allennlp-models-main | allennlp_models/vision/models/vision_text_model.py |
import logging
from typing import Dict, Optional, List, Any
import numpy as np
import torch
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules.transformer import (
TransformerEmbeddings,
ImageFeatureEmbeddings,
BiModalEncoder,
)
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.metrics import FBetaMeasure
from allennlp_models.vision.models.vision_text_model import VisionTextModel
logger = logging.getLogger(__name__)
@Model.register("nlvr2")
@Model.register("nlvr2_from_huggingface", constructor="from_huggingface_model_name")
class Nlvr2Model(VisionTextModel):
"""
Model for visual entailment task based on the paper
[A Corpus for Reasoning About Natural Language Grounded in Photographs]
(https://api.semanticscholar.org/CorpusID:53178856).
# Parameters
vocab : `Vocabulary`
text_embeddings : `TransformerEmbeddings`
image_embeddings : `ImageFeatureEmbeddings`
encoder : `BiModalEncoder`
pooled_output_dim : `int`
fusion_method : `str`, optional (default = `"mul"`)
dropout : `float`, optional (default = `0.1`)
label_namespace : `str`, optional (default = `labels`)
"""
def __init__(
self,
vocab: Vocabulary,
text_embeddings: TransformerEmbeddings,
image_embeddings: ImageFeatureEmbeddings,
encoder: BiModalEncoder,
pooled_output_dim: int,
fusion_method: str = "mul",
dropout: float = 0.1,
label_namespace: str = "labels",
*,
ignore_text: bool = False,
ignore_image: bool = False,
) -> None:
super().__init__(
vocab,
text_embeddings,
image_embeddings,
encoder,
pooled_output_dim,
fusion_method,
dropout,
label_namespace,
is_multilabel=False,
)
self.pooled_output_dim = pooled_output_dim
self.layer1 = torch.nn.Linear(pooled_output_dim * 2, pooled_output_dim)
self.layer2 = torch.nn.Linear(pooled_output_dim, 2)
self.activation = torch.nn.ReLU()
self.accuracy = CategoricalAccuracy()
self.fbeta = FBetaMeasure(beta=1.0, average="macro")
def forward(
self, # type: ignore
box_features: torch.Tensor,
box_coordinates: torch.Tensor,
box_mask: torch.Tensor,
hypothesis: TextFieldTensors,
label: Optional[torch.Tensor] = None,
identifier: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
batch_size = box_features.shape[0]
pooled_outputs = self.backbone(box_features, box_coordinates, box_mask, hypothesis)[
"pooled_boxes_and_text"
].transpose(0, 1)
hidden = self.layer1(torch.cat((pooled_outputs[0], pooled_outputs[1]), dim=-1))
# Shape: (batch_size, num_labels)
logits = self.layer2(self.activation(hidden))
# Shape: (batch_size, num_labels)
probs = torch.softmax(logits, dim=-1)
outputs = {"logits": logits, "probs": probs}
outputs = self._compute_loss_and_metrics(batch_size, outputs, label)
return outputs
def _compute_loss_and_metrics(
self,
batch_size: int,
outputs: Dict[str, torch.Tensor],
label: torch.Tensor,
label_weights: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
if label_weights is not None:
raise NotImplementedError("This implementation does not support label_weights.")
if label is not None:
outputs["loss"] = (
torch.nn.functional.cross_entropy(outputs["logits"], label) / batch_size
)
self.accuracy(outputs["logits"], label)
self.fbeta(outputs["probs"], label)
return outputs
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = self.fbeta.get_metric(reset)
accuracy = self.accuracy.get_metric(reset)
metrics.update({"accuracy": accuracy})
return metrics
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
batch_labels = []
for batch_index, batch in enumerate(output_dict["probs"]):
labels = np.argmax(batch, axis=-1)
batch_labels.append(labels)
output_dict["labels"] = batch_labels
return output_dict
default_predictor = "nlvr2"
| allennlp-models-main | allennlp_models/vision/models/nlvr2.py |
import logging
from typing import Dict, Optional
import numpy as np
import torch
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules.transformer import (
TransformerEmbeddings,
ImageFeatureEmbeddings,
BiModalEncoder,
)
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.metrics import FBetaMeasure
from allennlp_models.vision.models.vision_text_model import VisionTextModel
logger = logging.getLogger(__name__)
@Model.register("ve_vilbert")
@Model.register("ve_vilbert_from_huggingface", constructor="from_huggingface_model_name")
class VisualEntailmentModel(VisionTextModel):
"""
Model for visual entailment task based on the paper
[Visual Entailment: A Novel Task for Fine-Grained Image Understanding]
(https://api.semanticscholar.org/CorpusID:58981654).
# Parameters
vocab : `Vocabulary`
text_embeddings : `TransformerEmbeddings`
image_embeddings : `ImageFeatureEmbeddings`
encoder : `BiModalEncoder`
pooled_output_dim : `int`
fusion_method : `str`, optional (default = `"sum"`)
dropout : `float`, optional (default = `0.1`)
label_namespace : `str`, optional (default = `labels`)
"""
def __init__(
self,
vocab: Vocabulary,
text_embeddings: TransformerEmbeddings,
image_embeddings: ImageFeatureEmbeddings,
encoder: BiModalEncoder,
pooled_output_dim: int,
fusion_method: str = "sum",
dropout: float = 0.1,
label_namespace: str = "labels",
*,
ignore_text: bool = False,
ignore_image: bool = False,
) -> None:
super().__init__(
vocab,
text_embeddings,
image_embeddings,
encoder,
pooled_output_dim,
fusion_method,
dropout,
label_namespace,
is_multilabel=False,
)
self.accuracy = CategoricalAccuracy()
self.fbeta = FBetaMeasure(beta=1.0, average="macro")
def forward( # type: ignore
self,
box_features: torch.Tensor,
box_coordinates: torch.Tensor,
box_mask: torch.Tensor,
hypothesis: TextFieldTensors,
labels: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
return super().forward(
box_features,
box_coordinates,
box_mask,
text=hypothesis,
labels=labels,
label_weights=None,
)
def _compute_loss_and_metrics(
self,
batch_size: int,
outputs: torch.Tensor,
label: torch.Tensor,
label_weights: Optional[torch.Tensor] = None,
):
assert label_weights is None
if label is not None:
outputs["loss"] = (
torch.nn.functional.cross_entropy(outputs["logits"], label) / batch_size
)
self.accuracy(outputs["logits"], label)
self.fbeta(outputs["probs"], label)
return outputs
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = self.fbeta.get_metric(reset)
accuracy = self.accuracy.get_metric(reset)
metrics.update({"accuracy": accuracy})
return metrics
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
batch_labels = []
for batch_index, batch in enumerate(output_dict["probs"]):
labels = np.argmax(batch, axis=-1)
batch_labels.append(labels)
output_dict["labels"] = batch_labels
return output_dict
default_predictor = "vilbert_ve"
| allennlp-models-main | allennlp_models/vision/models/visual_entailment.py |
from typing import Dict, Optional
import torch
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.heads.head import Head
@Head.register("nlvr2")
class Nlvr2Head(Head):
def __init__(self, vocab: Vocabulary, embedding_dim: int, label_namespace: str = "labels"):
super().__init__(vocab)
self.label_namespace = label_namespace
self.layer1 = torch.nn.Linear(embedding_dim * 2, embedding_dim)
self.layer2 = torch.nn.Linear(embedding_dim, 2)
self.activation = torch.nn.ReLU()
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.metrics import FBetaMeasure
self.accuracy = CategoricalAccuracy()
self.fbeta = FBetaMeasure(beta=1.0, average="macro")
def forward(
self, # type: ignore
encoded_boxes: torch.Tensor,
encoded_boxes_mask: torch.Tensor,
encoded_boxes_pooled: torch.Tensor,
encoded_text: torch.Tensor,
encoded_text_mask: torch.Tensor,
encoded_text_pooled: torch.Tensor,
pooled_boxes_and_text: torch.Tensor,
label: Optional[torch.Tensor] = None,
label_weights: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
pooled_boxes_and_text = pooled_boxes_and_text.transpose(0, 1)
hidden = self.layer1(
torch.cat((pooled_boxes_and_text[0], pooled_boxes_and_text[1]), dim=-1)
)
logits = self.layer2(self.activation(hidden))
probs = torch.softmax(logits, dim=-1)
output = {"logits": logits, "probs": probs}
assert label_weights is None
if label is not None:
output["loss"] = torch.nn.functional.cross_entropy(logits, label) / logits.size(0)
self.accuracy(logits, label)
self.fbeta(probs, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
result = self.fbeta.get_metric(reset)
result["accuracy"] = self.accuracy.get_metric(reset)
return result
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
if len(output_dict) <= 0:
return output_dict
logits = output_dict["logits"]
entailment_answer_index = logits.argmax(-1)
entailment_answer = [
self.vocab.get_token_from_index(int(i), "labels") for i in entailment_answer_index
]
output_dict["entailment_answer"] = entailment_answer
return output_dict
default_predictor = "nlvr2"
| allennlp-models-main | allennlp_models/vision/models/heads/nlvr2_head.py |
from typing import Dict, Optional
import torch
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.heads.head import Head
@Head.register("visual_entailment")
class VisualEntailmentHead(Head):
def __init__(self, vocab: Vocabulary, embedding_dim: int, label_namespace: str = "labels"):
super().__init__(vocab)
num_labels = vocab.get_vocab_size(label_namespace)
self.label_namespace = label_namespace
self.classifier = torch.nn.Linear(embedding_dim, num_labels)
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.metrics import FBetaMeasure
self.accuracy = CategoricalAccuracy()
self.fbeta = FBetaMeasure(beta=1.0, average="macro")
def forward(
self, # type: ignore
encoded_boxes: torch.Tensor,
encoded_boxes_mask: torch.Tensor,
encoded_boxes_pooled: torch.Tensor,
encoded_text: torch.Tensor,
encoded_text_mask: torch.Tensor,
encoded_text_pooled: torch.Tensor,
pooled_boxes_and_text: torch.Tensor,
labels: Optional[torch.Tensor] = None,
label_weights: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
logits = self.classifier(pooled_boxes_and_text)
probs = torch.softmax(logits, dim=-1)
output = {"logits": logits, "probs": probs}
assert label_weights is None
if labels is not None:
output["loss"] = torch.nn.functional.cross_entropy(logits, labels) / logits.size(0)
self.accuracy(logits, labels)
self.fbeta(probs, labels)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
result = self.fbeta.get_metric(reset)
result["acc"] = self.accuracy.get_metric(reset)
return result
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
if len(output_dict) <= 0:
return output_dict
logits = output_dict["logits"]
entailment_answer_index = logits.argmax(-1)
entailment_answer = [
self.vocab.get_token_from_index(int(i), "labels") for i in entailment_answer_index
]
output_dict["entailment_answer"] = entailment_answer
return output_dict
default_predictor = "vilbert_ve"
| allennlp-models-main | allennlp_models/vision/models/heads/visual_entailment_head.py |
from allennlp_models.vision.models.heads.nlvr2_head import Nlvr2Head
from allennlp_models.vision.models.heads.vqa_head import VqaHead
from allennlp_models.vision.models.heads.visual_entailment_head import VisualEntailmentHead
| allennlp-models-main | allennlp_models/vision/models/heads/__init__.py |
from typing import Dict, Optional
import torch
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.heads.head import Head
@Head.register("vqa")
class VqaHead(Head):
def __init__(self, vocab: Vocabulary, embedding_dim: int, label_namespace: str = "answers"):
from allennlp_models.vision.metrics.vqa import VqaMeasure
from allennlp.training.metrics import F1MultiLabelMeasure
super().__init__(vocab)
num_labels = vocab.get_vocab_size(label_namespace)
self.label_namespace = label_namespace
self.classifier = torch.nn.Linear(embedding_dim, num_labels)
self.f1_metric = F1MultiLabelMeasure(average="micro")
self.vqa_metric = VqaMeasure()
def forward(
self, # type: ignore
encoded_boxes: torch.Tensor,
encoded_boxes_mask: torch.Tensor,
encoded_boxes_pooled: torch.Tensor,
encoded_text: torch.Tensor,
encoded_text_mask: torch.Tensor,
encoded_text_pooled: torch.Tensor,
pooled_boxes_and_text: torch.Tensor,
labels: Optional[torch.Tensor] = None,
label_weights: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
logits = self.classifier(pooled_boxes_and_text)
output = {
"logits": logits,
"probs": torch.sigmoid(logits),
}
if labels is not None and label_weights is not None:
label_mask = labels > 1 # 0 is padding, 1 is OOV, which we want to ignore
from allennlp.nn import util
weighted_labels = util.masked_index_replace(
logits.new_zeros(logits.size() + (1,)),
labels.clamp(min=0),
label_mask,
label_weights.unsqueeze(-1),
).squeeze(-1)
# weighted_labels now has shape (batch_size, num_labels). We need to ignore the first
# two columns of this in our loss function and accuracy metric. The first column is a
# padding label, and the second column is an OOV label. We want the loss function to
# be computed on every other label.
binary_label_mask = weighted_labels.new_ones(logits.size())
binary_label_mask[:, 0] = 0
binary_label_mask[:, 1] = 0
output["loss"] = torch.nn.functional.binary_cross_entropy_with_logits(
logits, weighted_labels, weight=binary_label_mask, reduction="sum"
) / logits.size(0)
self.f1_metric(logits, weighted_labels, binary_label_mask.bool())
self.vqa_metric(logits, labels, label_weights)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
result = self.f1_metric.get_metric(reset)
result["vqa"] = self.vqa_metric.get_metric(reset)["score"]
return result
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
if len(output_dict) <= 0:
return output_dict
logits = output_dict["logits"]
best_answer_index = logits.argmax(-1)
best_answer = [
self.vocab.get_token_from_index(int(i), "answers") for i in best_answer_index
]
output_dict["best_answer"] = best_answer
return output_dict
default_predictor = "vilbert_vqa"
| allennlp-models-main | allennlp_models/vision/models/heads/vqa_head.py |
# flake8: noqa: F403
from allennlp_models.pair_classification.dataset_readers import *
from allennlp_models.pair_classification.models import *
from allennlp_models.pair_classification.predictors import *
| allennlp-models-main | allennlp_models/pair_classification/__init__.py |
from allennlp_models.pair_classification.dataset_readers.quora_paraphrase import (
QuoraParaphraseDatasetReader,
)
from allennlp_models.pair_classification.dataset_readers.snli import SnliReader
from allennlp_models.pair_classification.dataset_readers.transformer_superglue_rte import (
TransformerSuperGlueRteReader,
)
| allennlp-models-main | allennlp_models/pair_classification/dataset_readers/__init__.py |
from typing import Dict, Optional
import json
import logging
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, LabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer, PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
def maybe_collapse_label(label: str, collapse: bool):
"""
Helper function that optionally collapses the "contradiction" and "neutral" labels
into "non-entailment".
"""
assert label in ["contradiction", "neutral", "entailment"]
if collapse and label in ["contradiction", "neutral"]:
return "non-entailment"
return label
@DatasetReader.register("snli")
class SnliReader(DatasetReader):
"""
Reads a file from the Stanford Natural Language Inference (SNLI) dataset. This data is
formatted as jsonl, one json-formatted instance per line. The keys in the data are
"gold_label", "sentence1", and "sentence2". We convert these keys into fields named "label",
"premise" and "hypothesis", along with a metadata field containing the tokenized strings of the
premise and hypothesis.
Registered as a `DatasetReader` with name "snli".
# Parameters
tokenizer : `Tokenizer`, optional (default=`SpacyTokenizer()`)
We use this `Tokenizer` for both the premise and the hypothesis. See :class:`Tokenizer`.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`.
combine_input_fields : `bool`, optional
(default=`isinstance(tokenizer, PretrainedTransformerTokenizer)`)
If False, represent the premise and the hypothesis as separate fields in the instance.
If True, tokenize them together using `tokenizer.tokenize_sentence_pair()`
and provide a single `tokens` field in the instance.
collapse_labels : `bool`, optional (default=`False`)
If `True`, the "neutral" and "contradiction" labels will be collapsed into "non-entailment";
"entailment" will be left unchanged.
"""
def __init__(
self,
tokenizer: Optional[Tokenizer] = None,
token_indexers: Dict[str, TokenIndexer] = None,
combine_input_fields: Optional[bool] = None,
collapse_labels: Optional[bool] = False,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True, manual_multiprocess_sharding=True, **kwargs
)
self._tokenizer = tokenizer or SpacyTokenizer()
if isinstance(self._tokenizer, PretrainedTransformerTokenizer):
assert not self._tokenizer._add_special_tokens
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
if combine_input_fields is not None:
self._combine_input_fields = combine_input_fields
else:
self._combine_input_fields = isinstance(self._tokenizer, PretrainedTransformerTokenizer)
self.collapse_labels = collapse_labels
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as snli_file:
example_iter = (json.loads(line) for line in snli_file)
filtered_example_iter = (
example for example in example_iter if example.get("gold_label") != "-"
)
for example in self.shard_iterable(filtered_example_iter):
label = example.get("gold_label")
premise = example["sentence1"]
hypothesis = example["sentence2"]
yield self.text_to_instance(premise, hypothesis, label)
def text_to_instance(
self, # type: ignore
premise: str,
hypothesis: str,
label: str = None,
) -> Instance:
fields: Dict[str, Field] = {}
premise = self._tokenizer.tokenize(premise)
hypothesis = self._tokenizer.tokenize(hypothesis)
if self._combine_input_fields:
tokens = self._tokenizer.add_special_tokens(premise, hypothesis)
fields["tokens"] = TextField(tokens)
else:
premise_tokens = self._tokenizer.add_special_tokens(premise)
hypothesis_tokens = self._tokenizer.add_special_tokens(hypothesis)
fields["premise"] = TextField(premise_tokens)
fields["hypothesis"] = TextField(hypothesis_tokens)
metadata = {
"premise_tokens": [x.text for x in premise_tokens],
"hypothesis_tokens": [x.text for x in hypothesis_tokens],
}
fields["metadata"] = MetadataField(metadata)
if label:
maybe_collapsed_label = maybe_collapse_label(label, self.collapse_labels)
fields["label"] = LabelField(maybe_collapsed_label)
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> Instance:
if "tokens" in instance.fields:
instance.fields["tokens"]._token_indexers = self._token_indexers
else:
instance.fields["premise"]._token_indexers = self._token_indexers
instance.fields["hypothesis"]._token_indexers = self._token_indexers
| allennlp-models-main | allennlp_models/pair_classification/dataset_readers/snli.py |
import logging
from typing import Any, Dict
from allennlp.data.fields import MetadataField, TextField, LabelField
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("transformer_superglue_rte")
class TransformerSuperGlueRteReader(DatasetReader):
"""
Dataset reader for the SuperGLUE Recognizing Textual Entailment task, to be used with a transformer
model such as RoBERTa. The dataset is in the JSON Lines format.
It will generate `Instances` with the following fields:
* `tokens`, a `TextField` that contains the concatenation of premise and hypothesis,
* `label`, a `LabelField` containing the label, if one exists.
* `metadata`, a `MetadataField` that stores the instance's index in the file, the original premise,
the original hypothesis, both of these in tokenized form, and the gold label, accessible as
`metadata['index']`, `metadata['premise']`, `metadata['hypothesis']`, `metadata['tokens']`,
and `metadata['label']`.
# Parameters
type : `str`, optional (default=`'roberta-base'`)
This reader chooses tokenizer according to this setting.
"""
def __init__(
self,
transformer_model_name: str = "roberta-base",
tokenizer_kwargs: Dict[str, Any] = None,
**kwargs
) -> None:
super().__init__(
manual_distributed_sharding=True, manual_multiprocess_sharding=True, **kwargs
)
self._tokenizer = PretrainedTransformerTokenizer(
transformer_model_name,
add_special_tokens=False,
tokenizer_kwargs=tokenizer_kwargs,
)
self._token_indexers = {
"tokens": PretrainedTransformerIndexer(
transformer_model_name, tokenizer_kwargs=tokenizer_kwargs, max_length=512
)
}
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path, extract_archive=True)
logger.info("Reading file at %s", file_path)
yielded_relation_count = 0
from allennlp.common.file_utils import json_lines_from_file
for relation in self.shard_iterable(json_lines_from_file(file_path)):
premise = relation["premise"]
hypothesis = relation["hypothesis"]
if "label" in relation:
label = relation["label"]
else:
label = None
index = relation["idx"]
# todo: see if we even need this to be in a separate method
instance = self.text_to_instance(index, label, premise, hypothesis)
yield instance
yielded_relation_count += 1
def text_to_instance(
self,
index: int,
label: str,
premise: str,
hypothesis: str,
) -> Instance:
tokenized_premise = self._tokenizer.tokenize(premise)
tokenized_hypothesis = self._tokenizer.tokenize(hypothesis)
fields = {}
premise_and_hypothesis = TextField(
self._tokenizer.add_special_tokens(tokenized_premise, tokenized_hypothesis),
)
fields["tokens"] = premise_and_hypothesis
# make the metadata
metadata = {
"premise": premise,
"premise_tokens": tokenized_premise,
"hypothesis": hypothesis,
"hypothesis_tokens": tokenized_hypothesis,
"index": index,
}
if label:
fields["label"] = LabelField(label)
metadata["label"] = label
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance["tokens"].token_indexers = self._token_indexers
| allennlp-models-main | allennlp_models/pair_classification/dataset_readers/transformer_superglue_rte.py |
from typing import Optional, Dict
import logging
import csv
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField, Field
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, PretrainedTransformerTokenizer
from allennlp.data.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
@DatasetReader.register("quora_paraphrase")
class QuoraParaphraseDatasetReader(DatasetReader):
"""
Reads a file from the Quora Paraphrase dataset. The train/validation/test split of the data
comes from the paper [Bilateral Multi-Perspective Matching for
Natural Language Sentences](https://arxiv.org/abs/1702.03814) by Zhiguo Wang et al., 2017.
Each file of the data is a tsv file without header. The columns are is_duplicate, question1,
question2, and id. All questions are pre-tokenized and tokens are space separated. We convert
these keys into fields named "label", "premise" and "hypothesis", so that it is compatible t
some existing natural language inference algorithms.
Registered as a `DatasetReader` with name "quora_paraphrase".
# Parameters
tokenizer : `Tokenizer`, optional
Tokenizer to use to split the premise and hypothesis into words or other kinds of tokens.
Defaults to `WhitespaceTokenizer`.
token_indexers : `Dict[str, TokenIndexer]`, optional
Indexers used to define input token representations. Defaults to `{"tokens":
SingleIdTokenIndexer()}`.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
combine_input_fields: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or WhitespaceTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
if isinstance(self._tokenizer, PretrainedTransformerTokenizer):
assert not self._tokenizer._add_special_tokens
if combine_input_fields is not None:
self._combine_input_fields = combine_input_fields
else:
self._combine_input_fields = isinstance(self._tokenizer, PretrainedTransformerTokenizer)
def _read(self, file_path):
logger.info("Reading instances from lines in file at: %s", file_path)
with open(cached_path(file_path), "r") as data_file:
tsv_in = csv.reader(data_file, delimiter="\t")
for row in tsv_in:
if len(row) == 4:
yield self.text_to_instance(premise=row[1], hypothesis=row[2], label=row[0])
def text_to_instance(
self, # type: ignore
premise: str,
hypothesis: str,
label: str = None,
) -> Instance:
fields: Dict[str, Field] = {}
premise = self._tokenizer.tokenize(premise)
hypothesis = self._tokenizer.tokenize(hypothesis)
if self._combine_input_fields:
tokens = self._tokenizer.add_special_tokens(premise, hypothesis)
fields["tokens"] = TextField(tokens, self._token_indexers)
else:
premise_tokens = self._tokenizer.add_special_tokens(premise)
hypothesis_tokens = self._tokenizer.add_special_tokens(hypothesis)
fields["premise"] = TextField(premise_tokens, self._token_indexers)
fields["hypothesis"] = TextField(hypothesis_tokens, self._token_indexers)
if label is not None:
fields["label"] = LabelField(label)
return Instance(fields)
| allennlp-models-main | allennlp_models/pair_classification/dataset_readers/quora_paraphrase.py |
from allennlp_models.pair_classification.predictors.textual_entailment import (
TextualEntailmentPredictor,
)
| allennlp-models-main | allennlp_models/pair_classification/predictors/__init__.py |
from typing import List, Dict
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import LabelField
@Predictor.register("textual_entailment")
class TextualEntailmentPredictor(Predictor):
"""
Predictor for the [`DecomposableAttention`](../models/decomposable_attention.md) model.
Registered as a `Predictor` with name "textual_entailment".
"""
def predict(self, premise: str, hypothesis: str) -> JsonDict:
"""
Predicts whether the hypothesis is entailed by the premise text.
# Parameters
premise : `str`
A passage representing what is assumed to be true.
hypothesis : `str`
A sentence that may be entailed by the premise.
# Returns
`JsonDict`
A dictionary where the key "label_probs" determines the probabilities of each of
[entailment, contradiction, neutral].
"""
return self.predict_json({"premise": premise, "hypothesis": hypothesis})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"premise": "...", "hypothesis": "..."}`.
"""
premise_text = json_dict["premise"]
hypothesis_text = json_dict["hypothesis"]
return self._dataset_reader.text_to_instance(premise_text, hypothesis_text)
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = instance.duplicate()
label = numpy.argmax(outputs["label_logits"])
# Skip indexing, we have integer representations of the strings "entailment", etc.
new_instance.add_field("label", LabelField(int(label), skip_indexing=True))
return [new_instance]
| allennlp-models-main | allennlp_models/pair_classification/predictors/textual_entailment.py |
from typing import Dict, Optional, List, Any
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask, masked_softmax, weighted_sum
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("decomposable_attention")
class DecomposableAttention(Model):
"""
This `Model` implements the Decomposable Attention model described in [A Decomposable
Attention Model for Natural Language Inference](https://api.semanticscholar.org/CorpusID:8495258)
by Parikh et al., 2016, with some optional enhancements before the decomposable attention
actually happens. Parikh's original model allowed for computing an "intra-sentence" attention
before doing the decomposable entailment step. We generalize this to any
[`Seq2SeqEncoder`](../modules/seq2seq_encoders/seq2seq_encoder.md) that can be applied to
the premise and/or the hypothesis before computing entailment.
The basic outline of this model is to get an embedded representation of each word in the
premise and hypothesis, align words between the two, compare the aligned phrases, and make a
final entailment decision based on this aggregated comparison. Each step in this process uses
a feedforward network to modify the representation.
Registered as a `Model` with name "decomposable_attention".
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the `premise` and `hypothesis` `TextFields` we get as input to the
model.
attend_feedforward : `FeedForward`
This feedforward network is applied to the encoded sentence representations before the
similarity matrix is computed between words in the premise and words in the hypothesis.
matrix_attention : `MatrixAttention`
This is the attention function used when computing the similarity matrix between words in
the premise and words in the hypothesis.
compare_feedforward : `FeedForward`
This feedforward network is applied to the aligned premise and hypothesis representations,
individually.
aggregate_feedforward : `FeedForward`
This final feedforward network is applied to the concatenated, summed result of the
`compare_feedforward` network, and its output is used as the entailment class logits.
premise_encoder : `Seq2SeqEncoder`, optional (default=`None`)
After embedding the premise, we can optionally apply an encoder. If this is `None`, we
will do nothing.
hypothesis_encoder : `Seq2SeqEncoder`, optional (default=`None`)
After embedding the hypothesis, we can optionally apply an encoder. If this is `None`,
we will use the `premise_encoder` for the encoding (doing nothing if `premise_encoder`
is also `None`).
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
attend_feedforward: FeedForward,
matrix_attention: MatrixAttention,
compare_feedforward: FeedForward,
aggregate_feedforward: FeedForward,
premise_encoder: Optional[Seq2SeqEncoder] = None,
hypothesis_encoder: Optional[Seq2SeqEncoder] = None,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
self._attend_feedforward = TimeDistributed(attend_feedforward)
self._matrix_attention = matrix_attention
self._compare_feedforward = TimeDistributed(compare_feedforward)
self._aggregate_feedforward = aggregate_feedforward
self._premise_encoder = premise_encoder
self._hypothesis_encoder = hypothesis_encoder or premise_encoder
self._num_labels = vocab.get_vocab_size(namespace="labels")
check_dimensions_match(
text_field_embedder.get_output_dim(),
attend_feedforward.get_input_dim(),
"text field embedding dim",
"attend feedforward input dim",
)
check_dimensions_match(
aggregate_feedforward.get_output_dim(),
self._num_labels,
"final output dimension",
"number of labels",
)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward( # type: ignore
self,
premise: TextFieldTensors,
hypothesis: TextFieldTensors,
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
premise : `TextFieldTensors`
From a `TextField`
hypothesis : `TextFieldTensors`
From a `TextField`
label : `torch.IntTensor`, optional (default = `None`)
From a `LabelField`
metadata : `List[Dict[str, Any]]`, optional (default = `None`)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
# Returns
An output dictionary consisting of:
label_logits : `torch.FloatTensor`
A tensor of shape `(batch_size, num_labels)` representing unnormalised log
probabilities of the entailment label.
label_probs : `torch.FloatTensor`
A tensor of shape `(batch_size, num_labels)` representing probabilities of the
entailment label.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
embedded_premise = self._text_field_embedder(premise)
embedded_hypothesis = self._text_field_embedder(hypothesis)
premise_mask = get_text_field_mask(premise)
hypothesis_mask = get_text_field_mask(hypothesis)
if self._premise_encoder:
embedded_premise = self._premise_encoder(embedded_premise, premise_mask)
if self._hypothesis_encoder:
embedded_hypothesis = self._hypothesis_encoder(embedded_hypothesis, hypothesis_mask)
projected_premise = self._attend_feedforward(embedded_premise)
projected_hypothesis = self._attend_feedforward(embedded_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
similarity_matrix = self._matrix_attention(projected_premise, projected_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
p2h_attention = masked_softmax(similarity_matrix, hypothesis_mask)
# Shape: (batch_size, premise_length, embedding_dim)
attended_hypothesis = weighted_sum(embedded_hypothesis, p2h_attention)
# Shape: (batch_size, hypothesis_length, premise_length)
h2p_attention = masked_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask)
# Shape: (batch_size, hypothesis_length, embedding_dim)
attended_premise = weighted_sum(embedded_premise, h2p_attention)
premise_compare_input = torch.cat([embedded_premise, attended_hypothesis], dim=-1)
hypothesis_compare_input = torch.cat([embedded_hypothesis, attended_premise], dim=-1)
compared_premise = self._compare_feedforward(premise_compare_input)
compared_premise = compared_premise * premise_mask.unsqueeze(-1)
# Shape: (batch_size, compare_dim)
compared_premise = compared_premise.sum(dim=1)
compared_hypothesis = self._compare_feedforward(hypothesis_compare_input)
compared_hypothesis = compared_hypothesis * hypothesis_mask.unsqueeze(-1)
# Shape: (batch_size, compare_dim)
compared_hypothesis = compared_hypothesis.sum(dim=1)
aggregate_input = torch.cat([compared_premise, compared_hypothesis], dim=-1)
label_logits = self._aggregate_feedforward(aggregate_input)
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
output_dict = {
"label_logits": label_logits,
"label_probs": label_probs,
"h2p_attention": h2p_attention,
"p2h_attention": p2h_attention,
}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label)
output_dict["loss"] = loss
if metadata is not None:
output_dict["premise_tokens"] = [x["premise_tokens"] for x in metadata]
output_dict["hypothesis_tokens"] = [x["hypothesis_tokens"] for x in metadata]
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self._accuracy.get_metric(reset)}
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the probabilities, converts index to string label, and
add `"label"` key to the dictionary with the result.
"""
predictions = output_dict["label_probs"]
if predictions.dim() == 2:
predictions_list = [predictions[i] for i in range(predictions.shape[0])]
else:
predictions_list = [predictions]
classes = []
for prediction in predictions_list:
label_idx = prediction.argmax(dim=-1).item()
label_str = self.vocab.get_index_to_token_vocabulary("labels").get(
label_idx, str(label_idx)
)
classes.append(label_str)
output_dict["label"] = classes
return output_dict
default_predictor = "textual_entailment"
| allennlp-models-main | allennlp_models/pair_classification/models/decomposable_attention.py |
from allennlp_models.pair_classification.models.bimpm import BiMpm
from allennlp_models.pair_classification.models.decomposable_attention import DecomposableAttention
from allennlp_models.pair_classification.models.esim import ESIM
| allennlp-models-main | allennlp_models/pair_classification/models/__init__.py |
"""
BiMPM (Bilateral Multi-Perspective Matching) model implementation.
"""
from typing import Dict, List, Any
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import FeedForward, Seq2SeqEncoder, Seq2VecEncoder, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.modules.bimpm_matching import BiMpmMatching
@Model.register("bimpm")
class BiMpm(Model):
"""
This `Model` implements BiMPM model described in [Bilateral Multi-Perspective Matching
for Natural Language Sentences](https://arxiv.org/abs/1702.03814) by Zhiguo Wang et al., 2017.
Also please refer to the [TensorFlow implementation](https://github.com/zhiguowang/BiMPM/) and
[PyTorch implementation](https://github.com/galsang/BIMPM-pytorch).
Registered as a `Model` with name "bimpm".
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the `premise` and `hypothesis` `TextFields` we get as input to the
model.
matcher_word : `BiMpmMatching`
BiMPM matching on the output of word embeddings of premise and hypothesis.
encoder1 : `Seq2SeqEncoder`
First encoder layer for the premise and hypothesis
matcher_forward1 : `BiMPMMatching`
BiMPM matching for the forward output of first encoder layer
matcher_backward1 : `BiMPMMatching`
BiMPM matching for the backward output of first encoder layer
encoder2 : `Seq2SeqEncoder`
Second encoder layer for the premise and hypothesis
matcher_forward2 : `BiMPMMatching`
BiMPM matching for the forward output of second encoder layer
matcher_backward2 : `BiMPMMatching`
BiMPM matching for the backward output of second encoder layer
aggregator : `Seq2VecEncoder`
Aggregator of all BiMPM matching vectors
classifier_feedforward : `FeedForward`
Fully connected layers for classification.
dropout : `float`, optional (default=`0.1`)
Dropout percentage to use.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
If provided, will be used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
matcher_word: BiMpmMatching,
encoder1: Seq2SeqEncoder,
matcher_forward1: BiMpmMatching,
matcher_backward1: BiMpmMatching,
encoder2: Seq2SeqEncoder,
matcher_forward2: BiMpmMatching,
matcher_backward2: BiMpmMatching,
aggregator: Seq2VecEncoder,
classifier_feedforward: FeedForward,
dropout: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.text_field_embedder = text_field_embedder
self.matcher_word = matcher_word
self.encoder1 = encoder1
self.matcher_forward1 = matcher_forward1
self.matcher_backward1 = matcher_backward1
self.encoder2 = encoder2
self.matcher_forward2 = matcher_forward2
self.matcher_backward2 = matcher_backward2
self.aggregator = aggregator
matching_dim = (
self.matcher_word.get_output_dim()
+ self.matcher_forward1.get_output_dim()
+ self.matcher_backward1.get_output_dim()
+ self.matcher_forward2.get_output_dim()
+ self.matcher_backward2.get_output_dim()
)
check_dimensions_match(
matching_dim,
self.aggregator.get_input_dim(),
"sum of dim of all matching layers",
"aggregator input dim",
)
self.classifier_feedforward = classifier_feedforward
self.dropout = torch.nn.Dropout(dropout)
self.metrics = {"accuracy": CategoricalAccuracy()}
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(
self, # type: ignore
premise: TextFieldTensors,
hypothesis: TextFieldTensors,
label: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
premise : `TextFieldTensors`
The premise from a `TextField`
hypothesis : `TextFieldTensors`
The hypothesis from a `TextField`
label : `torch.LongTensor`, optional (default = `None`)
The label for the pair of the premise and the hypothesis
metadata : `List[Dict[str, Any]]`, optional, (default = `None`)
Additional information about the pair
# Returns
An output dictionary consisting of:
logits : `torch.FloatTensor`
A tensor of shape `(batch_size, num_labels)` representing unnormalised log
probabilities of the entailment label.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
mask_premise = util.get_text_field_mask(premise)
mask_hypothesis = util.get_text_field_mask(hypothesis)
# embedding and encoding of the premise
embedded_premise = self.dropout(self.text_field_embedder(premise))
encoded_premise1 = self.dropout(self.encoder1(embedded_premise, mask_premise))
encoded_premise2 = self.dropout(self.encoder2(encoded_premise1, mask_premise))
# embedding and encoding of the hypothesis
embedded_hypothesis = self.dropout(self.text_field_embedder(hypothesis))
encoded_hypothesis1 = self.dropout(self.encoder1(embedded_hypothesis, mask_hypothesis))
encoded_hypothesis2 = self.dropout(self.encoder2(encoded_hypothesis1, mask_hypothesis))
matching_vector_premise: List[torch.Tensor] = []
matching_vector_hypothesis: List[torch.Tensor] = []
def add_matching_result(matcher, encoded_premise, encoded_hypothesis):
# utility function to get matching result and add to the result list
matching_result = matcher(
encoded_premise, mask_premise, encoded_hypothesis, mask_hypothesis
)
matching_vector_premise.extend(matching_result[0])
matching_vector_hypothesis.extend(matching_result[1])
# calculate matching vectors from word embedding, first layer encoding, and second layer encoding
add_matching_result(self.matcher_word, embedded_premise, embedded_hypothesis)
half_hidden_size_1 = self.encoder1.get_output_dim() // 2
add_matching_result(
self.matcher_forward1,
encoded_premise1[:, :, :half_hidden_size_1],
encoded_hypothesis1[:, :, :half_hidden_size_1],
)
add_matching_result(
self.matcher_backward1,
encoded_premise1[:, :, half_hidden_size_1:],
encoded_hypothesis1[:, :, half_hidden_size_1:],
)
half_hidden_size_2 = self.encoder2.get_output_dim() // 2
add_matching_result(
self.matcher_forward2,
encoded_premise2[:, :, :half_hidden_size_2],
encoded_hypothesis2[:, :, :half_hidden_size_2],
)
add_matching_result(
self.matcher_backward2,
encoded_premise2[:, :, half_hidden_size_2:],
encoded_hypothesis2[:, :, half_hidden_size_2:],
)
# concat the matching vectors
matching_vector_cat_premise = self.dropout(torch.cat(matching_vector_premise, dim=2))
matching_vector_cat_hypothesis = self.dropout(torch.cat(matching_vector_hypothesis, dim=2))
# aggregate the matching vectors
aggregated_premise = self.dropout(
self.aggregator(matching_vector_cat_premise, mask_premise)
)
aggregated_hypothesis = self.dropout(
self.aggregator(matching_vector_cat_hypothesis, mask_hypothesis)
)
# the final forward layer
logits = self.classifier_feedforward(
torch.cat([aggregated_premise, aggregated_hypothesis], dim=-1)
)
probs = torch.nn.functional.softmax(logits, dim=-1)
output_dict = {"logits": logits, "label_probs": probs}
if label is not None:
loss = self.loss(logits, label)
for metric in self.metrics.values():
metric(logits, label)
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()
}
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the probabilities, converts index to string label, and
add `"label"` key to the dictionary with the result.
"""
predictions = output_dict["label_probs"]
if predictions.dim() == 2:
predictions_list = [predictions[i] for i in range(predictions.shape[0])]
else:
predictions_list = [predictions]
classes = []
for prediction in predictions_list:
label_idx = prediction.argmax(dim=-1).item()
label_str = self.vocab.get_index_to_token_vocabulary("labels").get(
label_idx, str(label_idx)
)
classes.append(label_str)
output_dict["label"] = classes
return output_dict
default_predictor = "textual_entailment"
| allennlp-models-main | allennlp_models/pair_classification/models/bimpm.py |
from typing import Dict, List, Any
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, InputVariationalDropout
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import (
get_text_field_mask,
masked_softmax,
weighted_sum,
masked_max,
)
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("esim")
class ESIM(Model):
"""
This `Model` implements the ESIM sequence model described in [Enhanced LSTM for Natural Language Inference]
(https://api.semanticscholar.org/CorpusID:34032948) by Chen et al., 2017.
Registered as a `Model` with name "esim".
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the `premise` and `hypothesis` `TextFields` we get as input to the
model.
encoder : `Seq2SeqEncoder`
Used to encode the premise and hypothesis.
matrix_attention : `MatrixAttention`
This is the attention function used when computing the similarity matrix between encoded
words in the premise and words in the hypothesis.
projection_feedforward : `FeedForward`
The feedforward network used to project down the encoded and enhanced premise and hypothesis.
inference_encoder : `Seq2SeqEncoder`
Used to encode the projected premise and hypothesis for prediction.
output_feedforward : `FeedForward`
Used to prepare the concatenated premise and hypothesis for prediction.
output_logit : `FeedForward`
This feedforward network computes the output logits.
dropout : `float`, optional (default=`0.5`)
Dropout percentage to use.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
matrix_attention: MatrixAttention,
projection_feedforward: FeedForward,
inference_encoder: Seq2SeqEncoder,
output_feedforward: FeedForward,
output_logit: FeedForward,
dropout: float = 0.5,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
self._encoder = encoder
self._matrix_attention = matrix_attention
self._projection_feedforward = projection_feedforward
self._inference_encoder = inference_encoder
if dropout:
self.dropout = torch.nn.Dropout(dropout)
self.rnn_input_dropout = InputVariationalDropout(dropout)
else:
self.dropout = None
self.rnn_input_dropout = None
self._output_feedforward = output_feedforward
self._output_logit = output_logit
self._num_labels = vocab.get_vocab_size(namespace="labels")
check_dimensions_match(
text_field_embedder.get_output_dim(),
encoder.get_input_dim(),
"text field embedding dim",
"encoder input dim",
)
check_dimensions_match(
encoder.get_output_dim() * 4,
projection_feedforward.get_input_dim(),
"encoder output dim",
"projection feedforward input",
)
check_dimensions_match(
projection_feedforward.get_output_dim(),
inference_encoder.get_input_dim(),
"proj feedforward output dim",
"inference lstm input dim",
)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward( # type: ignore
self,
premise: TextFieldTensors,
hypothesis: TextFieldTensors,
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
premise : `TextFieldTensors`
From a `TextField`
hypothesis : `TextFieldTensors`
From a `TextField`
label : `torch.IntTensor`, optional (default = `None`)
From a `LabelField`
metadata : `List[Dict[str, Any]]`, optional (default = `None`)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
# Returns
An output dictionary consisting of:
label_logits : `torch.FloatTensor`
A tensor of shape `(batch_size, num_labels)` representing unnormalised log
probabilities of the entailment label.
label_probs : `torch.FloatTensor`
A tensor of shape `(batch_size, num_labels)` representing probabilities of the
entailment label.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
embedded_premise = self._text_field_embedder(premise)
embedded_hypothesis = self._text_field_embedder(hypothesis)
premise_mask = get_text_field_mask(premise)
hypothesis_mask = get_text_field_mask(hypothesis)
# apply dropout for LSTM
if self.rnn_input_dropout:
embedded_premise = self.rnn_input_dropout(embedded_premise)
embedded_hypothesis = self.rnn_input_dropout(embedded_hypothesis)
# encode premise and hypothesis
encoded_premise = self._encoder(embedded_premise, premise_mask)
encoded_hypothesis = self._encoder(embedded_hypothesis, hypothesis_mask)
# Shape: (batch_size, premise_length, hypothesis_length)
similarity_matrix = self._matrix_attention(encoded_premise, encoded_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
p2h_attention = masked_softmax(similarity_matrix, hypothesis_mask)
# Shape: (batch_size, premise_length, embedding_dim)
attended_hypothesis = weighted_sum(encoded_hypothesis, p2h_attention)
# Shape: (batch_size, hypothesis_length, premise_length)
h2p_attention = masked_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask)
# Shape: (batch_size, hypothesis_length, embedding_dim)
attended_premise = weighted_sum(encoded_premise, h2p_attention)
# the "enhancement" layer
premise_enhanced = torch.cat(
[
encoded_premise,
attended_hypothesis,
encoded_premise - attended_hypothesis,
encoded_premise * attended_hypothesis,
],
dim=-1,
)
hypothesis_enhanced = torch.cat(
[
encoded_hypothesis,
attended_premise,
encoded_hypothesis - attended_premise,
encoded_hypothesis * attended_premise,
],
dim=-1,
)
# The projection layer down to the model dimension. Dropout is not applied before
# projection.
projected_enhanced_premise = self._projection_feedforward(premise_enhanced)
projected_enhanced_hypothesis = self._projection_feedforward(hypothesis_enhanced)
# Run the inference layer
if self.rnn_input_dropout:
projected_enhanced_premise = self.rnn_input_dropout(projected_enhanced_premise)
projected_enhanced_hypothesis = self.rnn_input_dropout(projected_enhanced_hypothesis)
v_ai = self._inference_encoder(projected_enhanced_premise, premise_mask)
v_bi = self._inference_encoder(projected_enhanced_hypothesis, hypothesis_mask)
# The pooling layer -- max and avg pooling.
# (batch_size, model_dim)
v_a_max = masked_max(v_ai, premise_mask.unsqueeze(-1), dim=1)
v_b_max = masked_max(v_bi, hypothesis_mask.unsqueeze(-1), dim=1)
v_a_avg = torch.sum(v_ai * premise_mask.unsqueeze(-1), dim=1) / torch.sum(
premise_mask, 1, keepdim=True
)
v_b_avg = torch.sum(v_bi * hypothesis_mask.unsqueeze(-1), dim=1) / torch.sum(
hypothesis_mask, 1, keepdim=True
)
# Now concat
# (batch_size, model_dim * 2 * 4)
v_all = torch.cat([v_a_avg, v_a_max, v_b_avg, v_b_max], dim=1)
# the final MLP -- apply dropout to input, and MLP applies to output & hidden
if self.dropout:
v_all = self.dropout(v_all)
output_hidden = self._output_feedforward(v_all)
label_logits = self._output_logit(output_hidden)
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
output_dict = {"label_logits": label_logits, "label_probs": label_probs}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label)
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self._accuracy.get_metric(reset)}
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the probabilities, converts index to string label, and
add `"label"` key to the dictionary with the result.
"""
predictions = output_dict["label_probs"]
if predictions.dim() == 2:
predictions_list = [predictions[i] for i in range(predictions.shape[0])]
else:
predictions_list = [predictions]
classes = []
for prediction in predictions_list:
label_idx = prediction.argmax(dim=-1).item()
label_str = self.vocab.get_index_to_token_vocabulary("labels").get(
label_idx, str(label_idx)
)
classes.append(label_str)
output_dict["label"] = classes
return output_dict
default_predictor = "textual_entailment"
| allennlp-models-main | allennlp_models/pair_classification/models/esim.py |
# flake8: noqa: F403
from allennlp_models.tagging.predictors import *
from allennlp_models.tagging.models import *
from allennlp_models.tagging.dataset_readers import *
| allennlp-models-main | allennlp_models/tagging/__init__.py |
from allennlp.data.dataset_readers.conll2003 import Conll2003DatasetReader # noqa: F401
# This component lives in the main repo because we need it there for tests.
| allennlp-models-main | allennlp_models/tagging/dataset_readers/conll2003.py |
from typing import Dict, List, Sequence
import logging
import re
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, SequenceLabelField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
_VALID_LABELS = {"ccg", "modified_pos", "original_pos", "predicate_arg"}
@DatasetReader.register("ccgbank")
class CcgBankDatasetReader(DatasetReader):
"""
Reads data in the "machine-readable derivation" format of the CCGbank dataset.
(see https://catalog.ldc.upenn.edu/docs/LDC2005T13/CCGbankManual.pdf, section D.2)
In particular, it pulls out the leaf nodes, which are represented as
(<L ccg_category modified_pos original_pos token predicate_arg_category>)
The tarballed version of the dataset contains many files worth of this data,
in files /data/AUTO/xx/wsj_xxxx.auto
This dataset reader expects a single text file. Accordingly, if you're using that dataset,
you'll need to first concatenate some of those files into a training set, a validation set,
and a test set.
Registered as a `DatasetReader` with name "ccgbank".
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Note that the `output` tags will always correspond to single token IDs based on how they
are pre-tokenised in the data file.
tag_label : `str`, optional (default=`ccg`)
Specify `ccg`, `modified_pos`, `original_pos`, or `predicate_arg` to
have that tag loaded into the instance field `tag`.
feature_labels : `Sequence[str]`, optional (default=`()`)
These labels will be loaded as features into the corresponding instance fields:
`ccg` -> `ccg_tags`, `modified_pos` -> `modified_pos_tags`,
`original_pos` -> `original_pos_tags`, or `predicate_arg` -> `predicate_arg_tags`
Each will have its own namespace : `ccg_tags`, `modified_pos_tags`,
`original_pos_tags`, `predicate_arg_tags`. If you want to use one of the tags
as a feature in your model, it should be specified here.
label_namespace : `str`, optional (default=`labels`)
Specifies the namespace for the chosen `tag_label`.
"""
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
tag_label: str = "ccg",
feature_labels: Sequence[str] = (),
label_namespace: str = "labels",
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.tag_label = tag_label
if tag_label is not None and tag_label not in _VALID_LABELS:
raise ConfigurationError("unknown tag label type: {}".format(tag_label))
self.feature_labels = set(feature_labels)
for label in feature_labels:
if label not in _VALID_LABELS:
raise ConfigurationError("unknown feature label type: {}".format(label))
self.label_namespace = label_namespace
def _read(self, file_path):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading instances from lines in file at: %s", file_path)
with open(file_path) as input_file:
for line in input_file:
if line.startswith("(<"):
# Each leaf looks like
# (<L ccg_category modified_pos original_pos token predicate_arg_category>)
leaves = re.findall("<L (.*?)>", line)
# Use magic unzipping trick to split into tuples
tuples = zip(*[leaf.split() for leaf in leaves])
# Convert to lists and assign to variables.
(
ccg_categories,
modified_pos_tags,
original_pos_tags,
tokens,
predicate_arg_categories,
) = [list(result) for result in tuples]
yield self.text_to_instance(
tokens,
ccg_categories,
original_pos_tags,
modified_pos_tags,
predicate_arg_categories,
)
def text_to_instance(
self, # type: ignore
tokens: List[str],
ccg_categories: List[str] = None,
original_pos_tags: List[str] = None,
modified_pos_tags: List[str] = None,
predicate_arg_categories: List[str] = None,
) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
# Parameters
tokens : `List[str]`, required.
The tokens in a given sentence.
ccg_categories : `List[str]`, optional, (default = `None`).
The CCG categories for the words in the sentence. (e.g. N/N)
original_pos_tags : `List[str]`, optional, (default = `None`).
The tag assigned to the word in the Penn Treebank.
modified_pos_tags : `List[str]`, optional, (default = `None`).
The POS tag might have changed during the translation to CCG.
predicate_arg_categories : `List[str]`, optional, (default = `None`).
Encodes the word-word dependencies in the underlying predicate-
argument structure.
# Returns
An `Instance` containing the following fields:
tokens : `TextField`
The tokens in the sentence.
tags : `SequenceLabelField`
The tags corresponding to the `tag_label` constructor argument.
feature_label_tags : `SequenceLabelField`
Tags corresponding to each feature_label (if any) specified in the
`feature_labels` constructor argument.
"""
text_field = TextField([Token(x) for x in tokens], token_indexers=self._token_indexers)
fields: Dict[str, Field] = {"tokens": text_field}
# Add "feature labels" to instance
if "ccg" in self.feature_labels:
if ccg_categories is None:
raise ConfigurationError(
"Dataset reader was specified to use CCG categories as "
"features. Pass them to text_to_instance."
)
fields["ccg_tags"] = SequenceLabelField(ccg_categories, text_field, "ccg_tags")
if "original_pos" in self.feature_labels:
if original_pos_tags is None:
raise ConfigurationError(
"Dataset reader was specified to use original POS tags as "
"features. Pass them to text_to_instance."
)
fields["original_pos_tags"] = SequenceLabelField(
original_pos_tags, text_field, "original_pos_tags"
)
if "modified_pos" in self.feature_labels:
if modified_pos_tags is None:
raise ConfigurationError(
"Dataset reader was specified to use modified POS tags as "
" features. Pass them to text_to_instance."
)
fields["modified_pos_tags"] = SequenceLabelField(
modified_pos_tags, text_field, "modified_pos_tags"
)
if "predicate_arg" in self.feature_labels:
if predicate_arg_categories is None:
raise ConfigurationError(
"Dataset reader was specified to use predicate arg tags as "
" features. Pass them to text_to_instance."
)
fields["predicate_arg_tags"] = SequenceLabelField(
predicate_arg_categories, text_field, "predicate_arg_tags"
)
# Add "tag label" to instance
if self.tag_label == "ccg" and ccg_categories is not None:
fields["tags"] = SequenceLabelField(ccg_categories, text_field, self.label_namespace)
elif self.tag_label == "original_pos" and original_pos_tags is not None:
fields["tags"] = SequenceLabelField(original_pos_tags, text_field, self.label_namespace)
elif self.tag_label == "modified_pos" and modified_pos_tags is not None:
fields["tags"] = SequenceLabelField(modified_pos_tags, text_field, self.label_namespace)
elif self.tag_label == "predicate_arg" and predicate_arg_categories is not None:
fields["tags"] = SequenceLabelField(
predicate_arg_categories, text_field, self.label_namespace
)
return Instance(fields)
| allennlp-models-main | allennlp_models/tagging/dataset_readers/ccgbank.py |
from allennlp_models.tagging.dataset_readers.ccgbank import CcgBankDatasetReader
from allennlp_models.tagging.dataset_readers.conll2000 import Conll2000DatasetReader
from allennlp_models.tagging.dataset_readers.conll2003 import Conll2003DatasetReader
from allennlp_models.tagging.dataset_readers.ontonotes_ner import OntonotesNamedEntityRecognition
| allennlp-models-main | allennlp_models/tagging/dataset_readers/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.