python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import logging
from typing import Dict, List, Iterable
from allennlp.data.dataset_readers.dataset_utils import to_bioul
from allennlp.common.file_utils import cached_path
from allennlp.common.checks import ConfigurationError
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, SequenceLabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from allennlp_models.common.ontonotes import Ontonotes, OntonotesSentence
logger = logging.getLogger(__name__)
def _normalize_word(word: str):
if word in ("/.", "/?"):
return word[1:]
else:
return word
@DatasetReader.register("ontonotes_ner")
class OntonotesNamedEntityRecognition(DatasetReader):
"""
This DatasetReader is designed to read in the English OntoNotes v5.0 data
for fine-grained named entity recognition. It returns a dataset of instances with the
following fields:
tokens : `TextField`
The tokens in the sentence.
tags : `SequenceLabelField`
A sequence of BIO tags for the NER classes.
Note that the "/pt/" directory of the Onotonotes dataset representing annotations
on the new and old testaments of the Bible are excluded, because they do not contain
NER annotations.
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional
We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
domain_identifier : `str`, (default = `None`)
A string denoting a sub-domain of the Ontonotes 5.0 dataset to use. If present, only
conll files under paths containing this domain identifier will be processed.
coding_scheme : `str`, (default = `None`).
The coding scheme to use for the NER labels. Valid options are "BIO" or "BIOUL".
# Returns
A `Dataset` of `Instances` for Fine-Grained NER.
"""
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
domain_identifier: str = None,
coding_scheme: str = "BIO",
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._domain_identifier = domain_identifier
if domain_identifier == "pt":
raise ConfigurationError(
"The Ontonotes 5.0 dataset does not contain annotations for"
" the old and new testament sections."
)
self._coding_scheme = coding_scheme
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
ontonotes_reader = Ontonotes()
logger.info("Reading Fine-Grained NER instances from dataset files at: %s", file_path)
if self._domain_identifier is not None:
logger.info(
"Filtering to only include file paths containing the %s domain",
self._domain_identifier,
)
for sentence in self._ontonotes_subset(
ontonotes_reader, file_path, self._domain_identifier
):
tokens = [Token(_normalize_word(t)) for t in sentence.words]
yield self.text_to_instance(tokens, sentence.named_entities)
@staticmethod
def _ontonotes_subset(
ontonotes_reader: Ontonotes, file_path: str, domain_identifier: str
) -> Iterable[OntonotesSentence]:
"""
Iterates over the Ontonotes 5.0 dataset using an optional domain identifier.
If the domain identifier is present, only examples which contain the domain
identifier in the file path are yielded.
"""
for conll_file in ontonotes_reader.dataset_path_iterator(file_path):
if (
domain_identifier is None or f"/{domain_identifier}/" in conll_file
) and "/pt/" not in conll_file:
yield from ontonotes_reader.sentence_iterator(conll_file)
def text_to_instance(
self, # type: ignore
tokens: List[Token],
ner_tags: List[str] = None,
) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {"tokens": sequence}
instance_fields["metadata"] = MetadataField({"words": [x.text for x in tokens]})
# Add "tag label" to instance
if ner_tags is not None:
if self._coding_scheme == "BIOUL":
ner_tags = to_bioul(ner_tags, encoding="BIO")
instance_fields["tags"] = SequenceLabelField(ner_tags, sequence)
return Instance(instance_fields)
| allennlp-models-main | allennlp_models/tagging/dataset_readers/ontonotes_ner.py |
from typing import Dict, List, Optional, Sequence, Iterable
import itertools
import logging
import warnings
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.dataset_readers.dataset_utils import to_bioul
from allennlp.data.fields import TextField, SequenceLabelField, Field, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
def _is_divider(line: str) -> bool:
return line.strip() == ""
@DatasetReader.register("conll2000")
class Conll2000DatasetReader(DatasetReader):
"""
Reads instances from a pretokenised file where each line is in the following format:
```
WORD POS-TAG CHUNK-TAG
```
with a blank line indicating the end of each sentence
and converts it into a `Dataset` suitable for sequence tagging.
Each `Instance` contains the words in the `"tokens"` `TextField`.
The values corresponding to the `tag_label`
values will get loaded into the `"tags"` `SequenceLabelField`.
And if you specify any `feature_labels` (you probably shouldn't),
the corresponding values will get loaded into their own `SequenceLabelField` s.
Registered as a `DatasetReader` with name "conll2000".
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
tag_label : `str`, optional (default=`chunk`)
Specify `pos`, or `chunk` to have that tag loaded into the instance field `tag`.
feature_labels : `Sequence[str]`, optional (default=`()`)
These labels will be loaded as features into the corresponding instance fields:
`pos` -> `pos_tags` or `chunk` -> `chunk_tags`.
Each will have its own namespace : `pos_tags` or `chunk_tags`.
If you want to use one of the tags as a `feature` in your model, it should be
specified here.
convert_to_coding_scheme : `str`, optional (default=`BIO`)
Specifies the coding scheme for `chunk_labels`.
`Conll2000DatasetReader` assumes a coding scheme of input data is `BIO`.
Valid options are `None` and `BIOUL`. The `None` default maintains
the original BIO scheme in the CoNLL 2000 chunking data.
In the BIO scheme, B is a token starting a span, I is a token continuing a span, and
O is a token outside of a span.
coding_scheme: `str`, optional (default=`BIO`)
This parameter is deprecated. If you specify `coding_scheme` to
`BIO`, consider simply removing it or specifying `convert_to_coding_scheme`
to `None`. If you want to specify `BIOUL` for `coding_scheme`,
replace it with `convert_to_coding_scheme`.
label_namespace : `str`, optional (default=`labels`)
Specifies the namespace for the chosen `tag_label`.
"""
_VALID_LABELS = {"pos", "chunk"}
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
tag_label: str = "chunk",
feature_labels: Sequence[str] = (),
convert_to_coding_scheme: Optional[str] = None,
label_namespace: str = "labels",
**kwargs,
) -> None:
if "coding_scheme" in kwargs:
warnings.warn("`coding_scheme` is deprecated.", DeprecationWarning)
coding_scheme = kwargs.pop("coding_scheme")
if coding_scheme not in ("BIO", "BIOUL"):
raise ConfigurationError("unknown coding_scheme: {}".format(coding_scheme))
if coding_scheme == "BIO":
convert_to_coding_scheme = None
else:
convert_to_coding_scheme = coding_scheme
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
if tag_label is not None and tag_label not in self._VALID_LABELS:
raise ConfigurationError("unknown tag label type: {}".format(tag_label))
for label in feature_labels:
if label not in self._VALID_LABELS:
raise ConfigurationError("unknown feature label type: {}".format(label))
if convert_to_coding_scheme not in (None, "BIOUL"):
raise ConfigurationError(
"unknown convert_to_coding_scheme: {}".format(convert_to_coding_scheme)
)
self.tag_label = tag_label
self.feature_labels = set(feature_labels)
self.convert_to_coding_scheme = convert_to_coding_scheme
self.label_namespace = label_namespace
self._original_coding_scheme = "BIO"
def _read(self, file_path: str) -> Iterable[Instance]:
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
# Group into alternative divider / sentence chunks.
for is_divider, lines in itertools.groupby(data_file, _is_divider):
# Ignore the divider chunks, so that `lines` corresponds to the words
# of a single sentence.
if not is_divider:
fields = [line.strip().split() for line in lines]
# unzipping trick returns tuples, but our Fields need lists
fields = [list(field) for field in zip(*fields)]
tokens_, pos_tags, chunk_tags = fields
# TextField requires `Token` objects
tokens = [Token(token) for token in tokens_]
yield self.text_to_instance(tokens, pos_tags, chunk_tags)
def text_to_instance( # type: ignore
self, tokens: List[Token], pos_tags: List[str] = None, chunk_tags: List[str] = None
) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {"tokens": sequence}
instance_fields["metadata"] = MetadataField({"words": [x.text for x in tokens]})
# Recode the labels if necessary.
if self.convert_to_coding_scheme == "BIOUL":
coded_chunks = (
to_bioul(chunk_tags, encoding=self._original_coding_scheme)
if chunk_tags is not None
else None
)
else:
# the default BIO
coded_chunks = chunk_tags
# Add "feature labels" to instance
if "pos" in self.feature_labels:
if pos_tags is None:
raise ConfigurationError(
"Dataset reader was specified to use pos_tags as "
"features. Pass them to text_to_instance."
)
instance_fields["pos_tags"] = SequenceLabelField(pos_tags, sequence, "pos_tags")
if "chunk" in self.feature_labels:
if coded_chunks is None:
raise ConfigurationError(
"Dataset reader was specified to use chunk tags as "
"features. Pass them to text_to_instance."
)
instance_fields["chunk_tags"] = SequenceLabelField(coded_chunks, sequence, "chunk_tags")
# Add "tag label" to instance
if self.tag_label == "pos" and pos_tags is not None:
instance_fields["tags"] = SequenceLabelField(pos_tags, sequence, self.label_namespace)
elif self.tag_label == "chunk" and coded_chunks is not None:
instance_fields["tags"] = SequenceLabelField(
coded_chunks, sequence, self.label_namespace
)
return Instance(instance_fields)
| allennlp-models-main | allennlp_models/tagging/dataset_readers/conll2000.py |
from allennlp_models.tagging.predictors.sentence_tagger import SentenceTaggerPredictor
| allennlp-models-main | allennlp_models/tagging/predictors/__init__.py |
from allennlp.predictors.sentence_tagger import SentenceTaggerPredictor # noqa: F401
# This component lives in the main repo because we need it there for tests.
| allennlp-models-main | allennlp_models/tagging/predictors/sentence_tagger.py |
from typing import Dict, Optional, List, Any, cast
import torch
from torch.nn.modules.linear import Linear
from allennlp.common.checks import check_dimensions_match, ConfigurationError
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules import (
ConditionalRandomField,
FeedForward,
)
from allennlp.modules.conditional_random_field import (
ConditionalRandomFieldWeightEmission,
ConditionalRandomFieldWeightTrans,
ConditionalRandomFieldWeightLannoy,
)
from allennlp.modules.conditional_random_field.conditional_random_field import allowed_transitions
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
import allennlp.nn.util as util
from allennlp.training.metrics import CategoricalAccuracy, SpanBasedF1Measure, FBetaVerboseMeasure
@Model.register("crf_tagger")
class CrfTagger(Model):
"""
The `CrfTagger` encodes a sequence of text with a `Seq2SeqEncoder`,
then uses a Conditional Random Field model to predict a tag for each token in the sequence.
Registered as a `Model` with name "crf_tagger".
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the tokens `TextField` we get as input to the model.
encoder : `Seq2SeqEncoder`
The encoder that we will use in between embedding tokens and predicting output tags.
label_namespace : `str`, optional (default=`labels`)
This is needed to compute the SpanBasedF1Measure metric.
Unless you did something unusual, the default value should be what you want.
feedforward : `FeedForward`, optional, (default = `None`).
An optional feedforward layer to apply after the encoder.
label_encoding : `str`, optional (default=`None`)
Label encoding to use when calculating span f1 and constraining
the CRF at decoding time . Valid options are "BIO", "BIOUL", "IOB1", "BMES".
Required if `calculate_span_f1` or `constrain_crf_decoding` is true.
include_start_end_transitions : `bool`, optional (default=`True`)
Whether to include start and end transition parameters in the CRF.
constrain_crf_decoding : `bool`, optional (default=`None`)
If `True`, the CRF is constrained at decoding time to
produce valid sequences of tags. If this is `True`, then
`label_encoding` is required. If `None` and
label_encoding is specified, this is set to `True`.
If `None` and label_encoding is not specified, it defaults
to `False`.
calculate_span_f1 : `bool`, optional (default=`None`)
Calculate span-level F1 metrics during training. If this is `True`, then
`label_encoding` is required. If `None` and
label_encoding is specified, this is set to `True`.
If `None` and label_encoding is not specified, it defaults
to `False`.
dropout: `float`, optional (default=`None`)
Dropout probability.
verbose_metrics : `bool`, optional (default = `False`)
If true, metrics will be returned per label class in addition
to the overall statistics.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
top_k : `int`, optional (default=`1`)
If provided, the number of parses to return from the crf in output_dict['top_k_tags'].
Top k parses are returned as a list of dicts, where each dictionary is of the form:
{"tags": List, "score": float}.
The "tags" value for the first dict in the list for each data_item will be the top
choice, and will equal the corresponding item in output_dict['tags']
ignore_loss_on_o_tags : `bool`, optional (default=`False`)
If True, we compute the loss only for actual spans in `tags`, and not on `O` tokens.
This is useful for computing gradients of the loss on a _single span_, for
interpretation / attacking.
label_weights : `Dict[str, float]`, optional (default=`None`)
A mapping {label : weight} to be used in the loss function in order to
give different weights for each token depending on its label. This is useful to
deal with highly unbalanced datasets. There are three available strategies to deal
with weighted labels (see below). The default strategy is "emission".
weight_strategy : `str`, optional (default=`None`)
If `label_weights` is given and this is `None`, then it is the same as "emission".
It indicates which strategy is used to sample weighting. Valid options are:
"emission", "emission_transition", "lannoy". If "emission" then the emission score
of each tag is multiplied by the corresponding weight (as given by `label_weights`).
If "emission_transition", both emission and transition scores of each tag are multiplied
by the corresponding weight. In this case, a transition score `t(i,j)`, between consecutive
tokens `i` and `j`, is multiplied by `w[tags[i]]`, i.e., the weight related to the tag of token `i`.
If `weight_strategy` is "lannoy" then we use the strategy proposed by
[Lannoy et al. (2019)](https://perso.uclouvain.be/michel.verleysen/papers/ieeetbe12gdl.pdf).
You can see an experimental comparison among these three strategies and a brief discussion
of their differences [here](https://eraldoluis.github.io/2022/05/10/weighted-crf.html).
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
label_namespace: str = "labels",
feedforward: Optional[FeedForward] = None,
label_encoding: Optional[str] = None,
include_start_end_transitions: bool = True,
constrain_crf_decoding: bool = None,
calculate_span_f1: bool = None,
dropout: Optional[float] = None,
verbose_metrics: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
top_k: int = 1,
ignore_loss_on_o_tags: bool = False,
label_weights: Optional[Dict[str, float]] = None,
weight_strategy: str = None,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.label_namespace = label_namespace
self.text_field_embedder = text_field_embedder
self.num_tags = self.vocab.get_vocab_size(label_namespace)
self.encoder = encoder
self.top_k = top_k
self.ignore_loss_on_o_tags = ignore_loss_on_o_tags
self._verbose_metrics = verbose_metrics
if dropout:
self.dropout = torch.nn.Dropout(dropout)
else:
self.dropout = None
self._feedforward = feedforward
if feedforward is not None:
output_dim = feedforward.get_output_dim()
else:
output_dim = self.encoder.get_output_dim()
self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_tags))
# if constrain_crf_decoding and calculate_span_f1 are not
# provided, (i.e., they're None), set them to True
# if label_encoding is provided and False if it isn't.
if constrain_crf_decoding is None:
constrain_crf_decoding = label_encoding is not None
if calculate_span_f1 is None:
calculate_span_f1 = label_encoding is not None
self.label_encoding = label_encoding
if constrain_crf_decoding:
if not label_encoding:
raise ConfigurationError(
"constrain_crf_decoding is True, but no label_encoding was specified."
)
labels = self.vocab.get_index_to_token_vocabulary(label_namespace)
constraints = allowed_transitions(label_encoding, labels)
else:
constraints = None
# Label weights are given as a dict {label: weight} but we convert it to a list of weights for each label,
# and weights for omitted labels are set to 1.
if label_weights is None:
if weight_strategy is not None:
raise ConfigurationError(
"`weight_strategy` can only be used when `label_weights` is given"
)
# ordinary CRF (not weighted)
self.crf = ConditionalRandomField(
self.num_tags,
constraints,
include_start_end_transitions,
)
else: # label_weights is not None
label_to_index = vocab.get_token_to_index_vocabulary(label_namespace)
self.label_weights = [1.0] * len(label_to_index)
for label, weight in label_weights.items():
try:
self.label_weights[label_to_index[label]] = weight
except KeyError:
raise ConfigurationError(
f"'{label}' not found in vocab namespace '{label_namespace}')"
)
if weight_strategy is None or weight_strategy == "emission":
self.crf = ConditionalRandomFieldWeightEmission(
self.num_tags,
self.label_weights,
constraints,
include_start_end_transitions,
)
elif weight_strategy == "emission_transition":
self.crf = ConditionalRandomFieldWeightTrans(
self.num_tags,
self.label_weights,
constraints,
include_start_end_transitions,
)
elif weight_strategy == "lannoy":
self.crf = ConditionalRandomFieldWeightLannoy(
self.num_tags,
self.label_weights,
constraints,
include_start_end_transitions,
)
else:
raise ConfigurationError(
"weight_strategy must be one of 'emission', 'emission_transition' or 'lannoy'"
)
self.include_start_end_transitions = include_start_end_transitions
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3),
}
self.calculate_span_f1 = calculate_span_f1
if calculate_span_f1:
if not label_encoding:
raise ConfigurationError(
"calculate_span_f1 is True, but no label_encoding was specified."
)
self._f1_metric = SpanBasedF1Measure(
vocab, tag_namespace=label_namespace, label_encoding=label_encoding
)
elif verbose_metrics:
# verbose metrics for token classification (not span-based)
self._f_beta_measure = FBetaVerboseMeasure(
index_to_label=vocab.get_index_to_token_vocabulary(label_namespace),
)
check_dimensions_match(
text_field_embedder.get_output_dim(),
encoder.get_input_dim(),
"text field embedding dim",
"encoder input dim",
)
if feedforward is not None:
check_dimensions_match(
encoder.get_output_dim(),
feedforward.get_input_dim(),
"encoder output dim",
"feedforward input dim",
)
initializer(self)
def forward(
self, # type: ignore
tokens: TextFieldTensors,
tags: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
ignore_loss_on_o_tags: Optional[bool] = None,
**kwargs, # to allow for a more general dataset reader that passes args we don't need
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`
tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens":
Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used
for the `TokenIndexers` when you created the `TextField` representing your
sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,
which knows how to combine different word representations into a single vector per
token in your input.
tags : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer gold class labels of shape
`(batch_size, num_tokens)`.
metadata : `List[Dict[str, Any]]`, optional, (default = `None`)
metadata containing the original words in the sentence to be tagged under a 'words' key.
ignore_loss_on_o_tags : `Optional[bool]`, optional (default = `None`)
If True, we compute the loss only for actual spans in `tags`, and not on `O` tokens.
This is useful for computing gradients of the loss on a _single span_, for
interpretation / attacking.
If `None`, `self.ignore_loss_on_o_tags` is used instead.
# Returns
An output dictionary consisting of:
logits : `torch.FloatTensor`
The logits that are the output of the `tag_projection_layer`
mask : `torch.BoolTensor`
The text field mask for the input tokens
tags : `List[List[int]]`
The predicted tags using the Viterbi algorithm.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised. Only computed if gold label `tags` are provided.
"""
ignore_loss_on_o_tags = (
ignore_loss_on_o_tags
if ignore_loss_on_o_tags is not None
else self.ignore_loss_on_o_tags
)
embedded_text_input = self.text_field_embedder(tokens)
mask = util.get_text_field_mask(tokens)
if self.dropout:
embedded_text_input = self.dropout(embedded_text_input)
encoded_text = self.encoder(embedded_text_input, mask)
if self.dropout:
encoded_text = self.dropout(encoded_text)
if self._feedforward is not None:
encoded_text = self._feedforward(encoded_text)
logits = self.tag_projection_layer(encoded_text)
best_paths = self.crf.viterbi_tags(logits, mask, top_k=self.top_k)
# Just get the top tags and ignore the scores.
predicted_tags = cast(List[List[int]], [x[0][0] for x in best_paths])
output = {"logits": logits, "mask": mask, "tags": predicted_tags}
if self.top_k > 1:
output["top_k_tags"] = best_paths
if tags is not None:
if ignore_loss_on_o_tags:
o_tag_index = self.vocab.get_token_index("O", namespace=self.label_namespace)
crf_mask = mask & (tags != o_tag_index)
else:
crf_mask = mask
# Add negative log-likelihood as loss
log_likelihood = self.crf(logits, tags, crf_mask)
output["loss"] = -log_likelihood
# Represent viterbi tags as "class probabilities" that we can
# feed into the metrics
class_probabilities = logits * 0.0
for i, instance_tags in enumerate(predicted_tags):
for j, tag_id in enumerate(instance_tags):
class_probabilities[i, j, tag_id] = 1
for metric in self.metrics.values():
metric(class_probabilities, tags, mask)
if self.calculate_span_f1:
self._f1_metric(class_probabilities, tags, mask)
elif self._verbose_metrics:
self._f_beta_measure(class_probabilities, tags, mask)
if metadata is not None:
output["words"] = [x["words"] for x in metadata]
return output
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Converts the tag ids to the actual tags.
`output_dict["tags"]` is a list of lists of tag_ids,
so we use an ugly nested list comprehension.
"""
def decode_tags(tags):
return [
self.vocab.get_token_from_index(tag, namespace=self.label_namespace) for tag in tags
]
def decode_top_k_tags(top_k_tags):
return [
{"tags": decode_tags(scored_path[0]), "score": scored_path[1]}
for scored_path in top_k_tags
]
output_dict["tags"] = [decode_tags(t) for t in output_dict["tags"]]
if "top_k_tags" in output_dict:
output_dict["top_k_tags"] = [decode_top_k_tags(t) for t in output_dict["top_k_tags"]]
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics_to_return = {
metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()
}
if self.calculate_span_f1:
f1_dict = self._f1_metric.get_metric(reset=reset)
if self._verbose_metrics:
metrics_to_return.update(f1_dict)
else:
metrics_to_return.update({x: y for x, y in f1_dict.items() if "overall" in x})
elif self._verbose_metrics:
# verbose metrics for token classification (not span-based)
f_beta_dict = self._f_beta_measure.get_metric(reset=reset)
metrics_to_return.update(f_beta_dict)
return metrics_to_return
default_predictor = "sentence_tagger"
| allennlp-models-main | allennlp_models/tagging/models/crf_tagger.py |
from allennlp_models.tagging.models.crf_tagger import CrfTagger
| allennlp-models-main | allennlp_models/tagging/models/__init__.py |
from typing import DefaultDict, List, Optional, Iterator, Set, Tuple
from collections import defaultdict
import codecs
import os
import logging
from allennlp.data.dataset_readers.dataset_utils.span_utils import TypedSpan
from nltk import Tree
logger = logging.getLogger(__name__)
class OntonotesSentence:
"""
A class representing the annotations available for a single CONLL formatted sentence.
# Parameters
document_id : `str`
This is a variation on the document filename
sentence_id : `int`
The integer ID of the sentence within a document.
words : `List[str]`
This is the tokens as segmented/tokenized in the Treebank.
pos_tags : `List[str]`
This is the Penn-Treebank-style part of speech. When parse information is missing,
all parts of speech except the one for which there is some sense or proposition
annotation are marked with a XX tag. The verb is marked with just a VERB tag.
parse_tree : `nltk.Tree`
An nltk Tree representing the parse. It includes POS tags as pre-terminal nodes.
When the parse information is missing, the parse will be `None`.
predicate_lemmas : `List[Optional[str]]`
The predicate lemma of the words for which we have semantic role
information or word sense information. All other indices are `None`.
predicate_framenet_ids : `List[Optional[int]]`
The PropBank frameset ID of the lemmas in `predicate_lemmas`, or `None`.
word_senses : `List[Optional[float]]`
The word senses for the words in the sentence, or `None`. These are floats
because the word sense can have values after the decimal, like `1.1`.
speakers : `List[Optional[str]]`
The speaker information for the words in the sentence, if present, or `None`
This is the speaker or author name where available. Mostly in Broadcast Conversation
and Web Log data. When not available the rows are marked with an "-".
named_entities : `List[str]`
The BIO tags for named entities in the sentence.
srl_frames : `List[Tuple[str, List[str]]]`
A dictionary keyed by the verb in the sentence for the given
Propbank frame labels, in a BIO format.
coref_spans : `Set[TypedSpan]`
The spans for entity mentions involved in coreference resolution within the sentence.
Each element is a tuple composed of (cluster_id, (start_index, end_index)). Indices
are `inclusive`.
"""
def __init__(
self,
document_id: str,
sentence_id: int,
words: List[str],
pos_tags: List[str],
parse_tree: Optional[Tree],
predicate_lemmas: List[Optional[str]],
predicate_framenet_ids: List[Optional[str]],
word_senses: List[Optional[float]],
speakers: List[Optional[str]],
named_entities: List[str],
srl_frames: List[Tuple[str, List[str]]],
coref_spans: Set[TypedSpan],
) -> None:
self.document_id = document_id
self.sentence_id = sentence_id
self.words = words
self.pos_tags = pos_tags
self.parse_tree = parse_tree
self.predicate_lemmas = predicate_lemmas
self.predicate_framenet_ids = predicate_framenet_ids
self.word_senses = word_senses
self.speakers = speakers
self.named_entities = named_entities
self.srl_frames = srl_frames
self.coref_spans = coref_spans
class Ontonotes:
"""
This `DatasetReader` is designed to read in the English OntoNotes v5.0 data
in the format used by the CoNLL 2011/2012 shared tasks. In order to use this
Reader, you must follow the instructions provided [here (v12 release):]
(https://cemantix.org/data/ontonotes.html), which will allow you to download
the CoNLL style annotations for the OntoNotes v5.0 release -- LDC2013T19.tgz
obtained from LDC.
Once you have run the scripts on the extracted data, you will have a folder
structured as follows:
```
conll-formatted-ontonotes-5.0/
ββ data
βββ development
βββ data
βββ english
βββ annotations
βββ bc
βββ bn
βββ mz
βββ nw
βββ pt
βββ tc
βββ wb
βββ test
βββ data
βββ english
βββ annotations
βββ bc
βββ bn
βββ mz
βββ nw
βββ pt
βββ tc
βββ wb
βββ train
βββ data
βββ english
βββ annotations
βββ bc
βββ bn
βββ mz
βββ nw
βββ pt
βββ tc
βββ wb
```
The file path provided to this class can then be any of the train, test or development
directories(or the top level data directory, if you are not utilizing the splits).
The data has the following format, ordered by column.
1. Document ID : `str`
This is a variation on the document filename
2. Part number : `int`
Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3. Word number : `int`
This is the word index of the word in that sentence.
4. Word : `str`
This is the token as segmented/tokenized in the Treebank. Initially the `*_skel` file
contain the placeholder [WORD] which gets replaced by the actual token from the
Treebank which is part of the OntoNotes release.
5. POS Tag : `str`
This is the Penn Treebank style part of speech. When parse information is missing,
all part of speeches except the one for which there is some sense or proposition
annotation are marked with a XX tag. The verb is marked with just a VERB tag.
6. Parse bit : `str`
This is the bracketed structure broken before the first open parenthesis in the parse,
and the word/part-of-speech leaf replaced with a `*`. When the parse information is
missing, the first word of a sentence is tagged as `(TOP*` and the last word is tagged
as `*)` and all intermediate words are tagged with a `*`.
7. Predicate lemma : `str`
The predicate lemma is mentioned for the rows for which we have semantic role
information or word sense information. All other rows are marked with a "-".
8. Predicate Frameset ID : `int`
The PropBank frameset ID of the predicate in Column 7.
9. Word sense : `float`
This is the word sense of the word in Column 3.
10. Speaker/Author : `str`
This is the speaker or author name where available. Mostly in Broadcast Conversation
and Web Log data. When not available the rows are marked with an "-".
11. Named Entities : `str`
These columns identifies the spans representing various named entities. For documents
which do not have named entity annotation, each line is represented with an `*`.
12. Predicate Arguments : `str`
There is one column each of predicate argument structure information for the predicate
mentioned in Column 7. If there are no predicates tagged in a sentence this is a
single column with all rows marked with an `*`.
-1. Co-reference : `str`
Co-reference chain information encoded in a parenthesis structure. For documents that do
not have co-reference annotations, each line is represented with a "-".
"""
def dataset_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
"""
An iterator over the entire dataset, yielding all sentences processed.
"""
for conll_file in self.dataset_path_iterator(file_path):
yield from self.sentence_iterator(conll_file)
@staticmethod
def dataset_path_iterator(file_path: str) -> Iterator[str]:
"""
An iterator returning file_paths in a directory
containing CONLL-formatted files.
"""
logger.info("Reading CONLL sentences from dataset files at: %s", file_path)
for root, _, files in list(os.walk(file_path)):
for data_file in files:
# These are a relic of the dataset pre-processing. Every
# file will be duplicated - one file called filename.gold_skel
# and one generated from the preprocessing called filename.gold_conll.
if not data_file.endswith("gold_conll"):
continue
yield os.path.join(root, data_file)
def dataset_document_iterator(self, file_path: str) -> Iterator[List[OntonotesSentence]]:
"""
An iterator over CONLL formatted files which yields documents, regardless
of the number of document annotations in a particular file. This is useful
for conll data which has been preprocessed, such as the preprocessing which
takes place for the 2012 CONLL Coreference Resolution task.
"""
with codecs.open(file_path, "r", encoding="utf8") as open_file:
conll_rows = []
document: List[OntonotesSentence] = []
for line in open_file:
line = line.strip()
if line != "" and not line.startswith("#"):
# Non-empty line. Collect the annotation.
conll_rows.append(line)
else:
if conll_rows:
document.append(self._conll_rows_to_sentence(conll_rows))
conll_rows = []
if line.startswith("#end document"):
yield document
document = []
if document:
# Collect any stragglers or files which might not
# have the '#end document' format for the end of the file.
yield document
def sentence_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
"""
An iterator over the sentences in an individual CONLL formatted file.
"""
for document in self.dataset_document_iterator(file_path):
for sentence in document:
yield sentence
def _conll_rows_to_sentence(self, conll_rows: List[str]) -> OntonotesSentence:
document_id: str = None
sentence_id: int = None
# The words in the sentence.
sentence: List[str] = []
# The pos tags of the words in the sentence.
pos_tags: List[str] = []
# the pieces of the parse tree.
parse_pieces: List[str] = []
# The lemmatised form of the words in the sentence which
# have SRL or word sense information.
predicate_lemmas: List[str] = []
# The FrameNet ID of the predicate.
predicate_framenet_ids: List[str] = []
# The sense of the word, if available.
word_senses: List[float] = []
# The current speaker, if available.
speakers: List[str] = []
verbal_predicates: List[str] = []
span_labels: List[List[str]] = []
current_span_labels: List[str] = []
# Cluster id -> List of (start_index, end_index) spans.
clusters: DefaultDict[int, List[Tuple[int, int]]] = defaultdict(list)
# Cluster id -> List of start_indices which are open for this id.
coref_stacks: DefaultDict[int, List[int]] = defaultdict(list)
for index, row in enumerate(conll_rows):
conll_components = row.split()
document_id = conll_components[0]
sentence_id = int(conll_components[1])
word = conll_components[3]
pos_tag = conll_components[4]
parse_piece = conll_components[5]
# Replace brackets in text and pos tags
# with a different token for parse trees.
if pos_tag != "XX" and word != "XX":
if word == "(":
parse_word = "-LRB-"
elif word == ")":
parse_word = "-RRB-"
else:
parse_word = word
if pos_tag == "(":
pos_tag = "-LRB-"
if pos_tag == ")":
pos_tag = "-RRB-"
(left_brackets, right_hand_side) = parse_piece.split("*")
# only keep ')' if there are nested brackets with nothing in them.
right_brackets = right_hand_side.count(")") * ")"
parse_piece = f"{left_brackets} ({pos_tag} {parse_word}) {right_brackets}"
else:
# There are some bad annotations in the CONLL data.
# They contain no information, so to make this explicit,
# we just set the parse piece to be None which will result
# in the overall parse tree being None.
parse_piece = None
lemmatised_word = conll_components[6]
framenet_id = conll_components[7]
word_sense = conll_components[8]
speaker = conll_components[9]
if not span_labels:
# If this is the first word in the sentence, create
# empty lists to collect the NER and SRL BIO labels.
# We can't do this upfront, because we don't know how many
# components we are collecting, as a sentence can have
# variable numbers of SRL frames.
span_labels = [[] for _ in conll_components[10:-1]]
# Create variables representing the current label for each label
# sequence we are collecting.
current_span_labels = [None for _ in conll_components[10:-1]]
self._process_span_annotations_for_word(
conll_components[10:-1], span_labels, current_span_labels
)
# If any annotation marks this word as a verb predicate,
# we need to record its index. This also has the side effect
# of ordering the verbal predicates by their location in the
# sentence, automatically aligning them with the annotations.
word_is_verbal_predicate = any("(V" in x for x in conll_components[11:-1])
if word_is_verbal_predicate:
verbal_predicates.append(word)
self._process_coref_span_annotations_for_word(
conll_components[-1], index, clusters, coref_stacks
)
sentence.append(word)
pos_tags.append(pos_tag)
parse_pieces.append(parse_piece)
predicate_lemmas.append(lemmatised_word if lemmatised_word != "-" else None)
predicate_framenet_ids.append(framenet_id if framenet_id != "-" else None)
word_senses.append(float(word_sense) if word_sense != "-" else None)
speakers.append(speaker if speaker != "-" else None)
named_entities = span_labels[0]
srl_frames = [
(predicate, labels) for predicate, labels in zip(verbal_predicates, span_labels[1:])
]
if all(parse_pieces):
parse_tree = Tree.fromstring("".join(parse_pieces))
else:
parse_tree = None
coref_span_tuples: Set[TypedSpan] = {
(cluster_id, span) for cluster_id, span_list in clusters.items() for span in span_list
}
return OntonotesSentence(
document_id,
sentence_id,
sentence,
pos_tags,
parse_tree,
predicate_lemmas,
predicate_framenet_ids,
word_senses,
speakers,
named_entities,
srl_frames,
coref_span_tuples,
)
@staticmethod
def _process_coref_span_annotations_for_word(
label: str,
word_index: int,
clusters: DefaultDict[int, List[Tuple[int, int]]],
coref_stacks: DefaultDict[int, List[int]],
) -> None:
"""
For a given coref label, add it to a currently open span(s), complete a span(s) or
ignore it, if it is outside of all spans. This method mutates the clusters and coref_stacks
dictionaries.
# Parameters
label : `str`
The coref label for this word.
word_index : `int`
The word index into the sentence.
clusters : `DefaultDict[int, List[Tuple[int, int]]]`
A dictionary mapping cluster ids to lists of inclusive spans into the
sentence.
coref_stacks : `DefaultDict[int, List[int]]`
Stacks for each cluster id to hold the start indices of active spans (spans
which we are inside of when processing a given word). Spans with the same id
can be nested, which is why we collect these opening spans on a stack, e.g:
[Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1
"""
if label != "-":
for segment in label.split("|"):
# The conll representation of coref spans allows spans to
# overlap. If spans end or begin at the same word, they are
# separated by a "|".
if segment[0] == "(":
# The span begins at this word.
if segment[-1] == ")":
# The span begins and ends at this word (single word span).
cluster_id = int(segment[1:-1])
clusters[cluster_id].append((word_index, word_index))
else:
# The span is starting, so we record the index of the word.
cluster_id = int(segment[1:])
coref_stacks[cluster_id].append(word_index)
else:
# The span for this id is ending, but didn't start at this word.
# Retrieve the start index from the document state and
# add the span to the clusters for this id.
cluster_id = int(segment[:-1])
start = coref_stacks[cluster_id].pop()
clusters[cluster_id].append((start, word_index))
@staticmethod
def _process_span_annotations_for_word(
annotations: List[str],
span_labels: List[List[str]],
current_span_labels: List[Optional[str]],
) -> None:
"""
Given a sequence of different label types for a single word and the current
span label we are inside, compute the BIO tag for each label and append to a list.
# Parameters
annotations : `List[str]`
A list of labels to compute BIO tags for.
span_labels : `List[List[str]]`
A list of lists, one for each annotation, to incrementally collect
the BIO tags for a sequence.
current_span_labels : `List[Optional[str]]`
The currently open span per annotation type, or `None` if there is no open span.
"""
for annotation_index, annotation in enumerate(annotations):
# strip all bracketing information to
# get the actual propbank label.
label = annotation.strip("()*")
if "(" in annotation:
# Entering into a span for a particular semantic role label.
# We append the label and set the current span for this annotation.
bio_label = "B-" + label
span_labels[annotation_index].append(bio_label)
current_span_labels[annotation_index] = label
elif current_span_labels[annotation_index] is not None:
# If there's no '(' token, but the current_span_label is not None,
# then we are inside a span.
bio_label = "I-" + current_span_labels[annotation_index]
span_labels[annotation_index].append(bio_label)
else:
# We're outside a span.
span_labels[annotation_index].append("O")
# Exiting a span, so we reset the current span label for this annotation.
if ")" in annotation:
current_span_labels[annotation_index] = None
| allennlp-models-main | allennlp_models/common/ontonotes.py |
allennlp-models-main | allennlp_models/common/__init__.py |
|
allennlp-models-main | allennlp_models/mc/__init__.py |
|
import logging
from allennlp.data import DatasetReader
from allennlp_models.mc.dataset_readers.transformer_mc import TransformerMCReader
logger = logging.getLogger(__name__)
@DatasetReader.register("commonsenseqa")
class CommonsenseQaReader(TransformerMCReader):
"""
Reads the input data for the CommonsenseQA dataset (https://arxiv.org/abs/1811.00937).
"""
def _read(self, file_path: str):
from allennlp.common.file_utils import cached_path
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
from allennlp.common.file_utils import json_lines_from_file
for json in json_lines_from_file(file_path):
choices = [(choice["label"], choice["text"]) for choice in json["question"]["choices"]]
correct_choice = [
i for i, (label, _) in enumerate(choices) if label == json["answerKey"]
][0]
yield self.text_to_instance(
json["id"], json["question"]["stem"], [c[1] for c in choices], correct_choice
)
| allennlp-models-main | allennlp_models/mc/dataset_readers/commonsenseqa.py |
import logging
from allennlp.data import DatasetReader
from allennlp_models.mc.dataset_readers.transformer_mc import TransformerMCReader
logger = logging.getLogger(__name__)
@DatasetReader.register("swag")
class SwagReader(TransformerMCReader):
"""
Reads the input data for the SWAG dataset (https://arxiv.org/abs/1808.05326).
"""
def _read(self, file_path: str):
from allennlp.common.file_utils import cached_path
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path, "r", encoding="utf-8") as f:
import csv
for line_number, line in enumerate(csv.reader(f)):
if line_number == 0:
continue
yield self.text_to_instance(
qid=line[1], start=line[3], alternatives=line[7:11], label=int(line[11])
)
| allennlp-models-main | allennlp_models/mc/dataset_readers/swag.py |
import logging
from typing import List
from allennlp.data import DatasetReader, Instance
logger = logging.getLogger(__name__)
@DatasetReader.register("fake")
class FakeReader(DatasetReader):
"""
Creates fake multiple-choice input. If your model doesn't get 99% on this data, it is broken.
Instances have two fields:
* `alternatives`, a ListField of TextField
* `correct_alternative`, IndexField with the correct answer among `alternatives`
Parameters
----------
transformer_model_name : `str`, optional (default=`roberta-large`)
This reader chooses tokenizer and token indexer according to this setting.
length_limit : `int`, optional (default=512)
We will make sure that the length of the alternatives never exceeds this many word pieces.
"""
def __init__(
self, transformer_model_name: str = "roberta-large", length_limit: int = 512, **kwargs
) -> None:
super().__init__(**kwargs)
if self.max_instances is None:
raise ValueError("FakeReader requires max_instances to be set.")
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
self._tokenizer = PretrainedTransformerTokenizer(
transformer_model_name, add_special_tokens=False
)
from allennlp.data.token_indexers import PretrainedTransformerIndexer
self._token_indexers = {"tokens": PretrainedTransformerIndexer(transformer_model_name)}
self.length_limit = length_limit
def _read(self, file_path: str):
logger.info("Ignoring file at %s", file_path)
for i in range(self.max_instances):
label = i % 2
texts = [f"This is the false choice {i}."] * 2
texts[label] = f"This is the true choice {i}."
yield self.text_to_instance(texts, label)
def text_to_instance(
self, # type: ignore
alternatives: List[str],
correct_alternative: int,
) -> Instance:
# tokenize
alternatives = [self._tokenizer.tokenize(alternative) for alternative in alternatives]
# add special tokens
alternatives = [
self._tokenizer.add_special_tokens(alternative) for alternative in alternatives
]
# make fields
from allennlp.data.fields import TextField
alternatives = [
TextField(alternative, self._token_indexers) for alternative in alternatives
]
if correct_alternative < 0 or correct_alternative >= len(alternatives):
raise ValueError("Alternative %d does not exist.", correct_alternative)
from allennlp.data.fields import ListField
alternatives = ListField(alternatives)
from allennlp.data.fields import IndexField
return Instance(
{
"alternatives": alternatives,
"correct_alternative": IndexField(correct_alternative, alternatives),
}
)
| allennlp-models-main | allennlp_models/mc/dataset_readers/fake.py |
from allennlp_models.mc.dataset_readers.swag import SwagReader
from allennlp_models.mc.dataset_readers.commonsenseqa import CommonsenseQaReader
from allennlp_models.mc.dataset_readers.piqa import PiqaReader
| allennlp-models-main | allennlp_models/mc/dataset_readers/__init__.py |
import logging
from allennlp.data import DatasetReader
from allennlp_models.mc.dataset_readers.piqa import PiqaReader
from allennlp_models.mc.dataset_readers.transformer_mc_tt import (
TransformerMCReaderTransformerToolkit,
)
logger = logging.getLogger(__name__)
@DatasetReader.register("piqa_tt")
class PiqaReaderTransformerToolkit(TransformerMCReaderTransformerToolkit, PiqaReader):
pass
| allennlp-models-main | allennlp_models/mc/dataset_readers/piqa_tt.py |
import itertools
import logging
from typing import List, Optional
import torch
from allennlp.common import cached_transformers
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import TransformerTextField
logger = logging.getLogger(__name__)
class TransformerMCReaderTransformerToolkit(DatasetReader):
"""
Read input data for the TransformerMC model. This is the base class for all readers that produce
data for TransformerMCTransformerToolkit.
Instances have three fields:
* `alternatives`, a `ListField` of `TransformerTextField`
* `correct_alternative`, `IndexField` with the correct answer among `alternatives`
* `qid`, a `MetadataField` containing question ids
Parameters
----------
transformer_model_name : `str`, optional (default=`"roberta-large"`)
This reader chooses tokenizer and token indexer according to this setting.
length_limit : `int`, optional (default=`512`)
We will make sure that the length of an alternative never exceeds this many word pieces.
"""
def __init__(
self, transformer_model_name: str = "roberta-large", length_limit: int = 512, **kwargs
) -> None:
super().__init__(**kwargs)
self._tokenizer = cached_transformers.get_tokenizer(transformer_model_name)
self.length_limit = length_limit
def text_to_instance(
self, # type: ignore
qid: str,
start: str,
alternatives: List[str],
label: Optional[int] = None,
) -> Instance:
start = start.strip()
tokenized = self._tokenizer(
[(start, a) for a in alternatives],
truncation="longest_first",
max_length=self.length_limit,
return_attention_mask=False,
)
sequences = [
TransformerTextField(
torch.IntTensor(input_ids),
torch.IntTensor(token_type_ids) if token_type_ids is not None else None,
padding_token_id=self._tokenizer.pad_token_id,
)
for input_ids, token_type_ids in itertools.zip_longest(
tokenized["input_ids"], tokenized.get("token_type_ids", [])
)
]
from allennlp.data.fields import ListField
sequences = ListField(sequences)
from allennlp.data.fields import MetadataField
fields = {
"alternatives": sequences,
"qid": MetadataField(qid),
}
if label is not None:
if label < 0 or label >= len(sequences):
raise ValueError("Alternative %d does not exist", label)
from allennlp.data.fields import IndexField
fields["correct_alternative"] = IndexField(label, sequences)
return Instance(fields)
| allennlp-models-main | allennlp_models/mc/dataset_readers/transformer_mc_tt.py |
import logging
from allennlp.data import DatasetReader
from allennlp_models.mc.dataset_readers.transformer_mc import TransformerMCReader
logger = logging.getLogger(__name__)
@DatasetReader.register("piqa")
class PiqaReader(TransformerMCReader):
"""
Reads the input data for the PIQA dataset (https://arxiv.org/abs/1911.11641).
"""
def _read(self, file_path: str):
import re
labels_path = re.sub(r"\.jsonl$", "-labels.lst", file_path, 1)
if labels_path == file_path:
raise ValueError(
"Could not determine file name for the labels corresponding to %s.", file_path
)
from allennlp.common.file_utils import cached_path
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
from allennlp.common.file_utils import json_lines_from_file
json_lines = json_lines_from_file(file_path)
labels_path = cached_path(labels_path)
from allennlp.common.file_utils import text_lines_from_file
logger.info("Reading labels at %s", labels_path)
labels_lines = text_lines_from_file(labels_path)
for qid, (json, label) in enumerate(zip(json_lines, labels_lines)):
goal = json["goal"]
sol1 = json["sol1"]
sol2 = json["sol2"]
label = int(label)
yield self.text_to_instance(str(qid), goal, [sol1, sol2], label)
| allennlp-models-main | allennlp_models/mc/dataset_readers/piqa.py |
import logging
from typing import List, Optional
from allennlp.data import DatasetReader, Instance
logger = logging.getLogger(__name__)
class TransformerMCReader(DatasetReader):
"""
Read input data for the TransformerMC model. This is the base class for all readers that produce
data for TransformerMC.
Instances have two fields:
* `alternatives`, a `ListField` of `TextField`
* `correct_alternative`, `IndexField` with the correct answer among `alternatives`
* `qid`, a `MetadataField` containing question ids
Parameters
----------
transformer_model_name : `str`, optional (default=`"roberta-large"`)
This reader chooses tokenizer and token indexer according to this setting.
length_limit : `int`, optional (default=`512`)
We will make sure that the length of an alternative never exceeds this many word pieces.
"""
def __init__(
self, transformer_model_name: str = "roberta-large", length_limit: int = 512, **kwargs
) -> None:
super().__init__(**kwargs)
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
self._tokenizer = PretrainedTransformerTokenizer(
transformer_model_name, add_special_tokens=False
)
from allennlp.data.token_indexers import PretrainedTransformerIndexer
self._token_indexers = {"tokens": PretrainedTransformerIndexer(transformer_model_name)}
self.length_limit = length_limit
def text_to_instance(
self, # type: ignore
qid: str,
start: str,
alternatives: List[str],
label: Optional[int] = None,
) -> Instance:
# tokenize
start = self._tokenizer.tokenize(start)
sequences = []
for alternative in alternatives:
alternative = self._tokenizer.tokenize(alternative)
length_for_start = (
self.length_limit - len(alternative) - self._tokenizer.num_special_tokens_for_pair()
)
if length_for_start < 0:
# If the alternative is too long by itself, we take the beginning and add no tokens from the start.
alternative = alternative[:length_for_start]
length_for_start = 0
sequences.append(
self._tokenizer.add_special_tokens(start[:length_for_start], alternative)
)
# make fields
from allennlp.data.fields import TextField
sequences = [TextField(sequence, self._token_indexers) for sequence in sequences]
from allennlp.data.fields import ListField
sequences = ListField(sequences)
from allennlp.data.fields import MetadataField
fields = {
"alternatives": sequences,
"qid": MetadataField(qid),
}
if label is not None:
if label < 0 or label >= len(sequences):
raise ValueError("Alternative %d does not exist", label)
from allennlp.data.fields import IndexField
fields["correct_alternative"] = IndexField(label, sequences)
return Instance(fields)
| allennlp-models-main | allennlp_models/mc/dataset_readers/transformer_mc.py |
from allennlp_models.mc.predictors.transformer_mc import TransformerMCPredictor
| allennlp-models-main | allennlp_models/mc/predictors/__init__.py |
from typing import List
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register("transformer_mc")
class TransformerMCPredictor(Predictor):
"""
Predictor for the :class:`~allennlp_models.mc.models.TransformerMC` model.
"""
def predict(self, prefix: str, alternatives: List[str]) -> JsonDict:
return self.predict_json({"prefix": prefix, "alternatives": alternatives})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
return self._dataset_reader.text_to_instance(
"no_qid", json_dict["prefix"], json_dict["alternatives"]
)
| allennlp-models-main | allennlp_models/mc/predictors/transformer_mc.py |
from allennlp_models.mc.models.transformer_mc import TransformerMC
from allennlp_models.mc.models.transformer_mc_tt import TransformerMCTransformerToolkit
| allennlp-models-main | allennlp_models/mc/models/__init__.py |
import logging
from typing import Dict, List, Optional
import torch
from allennlp.data import Vocabulary
from allennlp.models import Model
from allennlp.modules.transformer import TransformerEmbeddings, TransformerStack, TransformerPooler
from torch.nn import Dropout
logger = logging.getLogger(__name__)
@Model.register("transformer_mc_tt")
class TransformerMCTransformerToolkit(Model):
"""
This class implements a multiple choice model patterned after the proposed model in
[RoBERTa: A Robustly Optimized BERT Pretraining Approach (Liu et al)]
(https://api.semanticscholar.org/CorpusID:198953378).
It is exactly like the `TransformerMC` model, except it uses the `TransformerTextField` for its input.
It calculates a score for each sequence on top of the CLS token, and then chooses the alternative
with the highest score.
Parameters
----------
vocab : ``Vocabulary``
transformer_model : ``str``, optional (default=``"roberta-large"``)
This model chooses the embedder according to this setting. You probably want to make sure this matches the
setting in the reader.
"""
def __init__(
self,
vocab: Vocabulary,
transformer_model: str = "roberta-large",
override_weights_file: Optional[str] = None,
**kwargs
) -> None:
super().__init__(vocab, **kwargs)
transformer_kwargs = {
"model_name": transformer_model,
"weights_path": override_weights_file,
}
self.embeddings = TransformerEmbeddings.from_pretrained_module(**transformer_kwargs)
self.transformer_stack = TransformerStack.from_pretrained_module(**transformer_kwargs)
self.pooler = TransformerPooler.from_pretrained_module(**transformer_kwargs)
self.pooler_dropout = Dropout(p=0.1)
self.linear_layer = torch.nn.Linear(self.pooler.get_output_dim(), 1)
self.linear_layer.weight.data.normal_(mean=0.0, std=0.02)
self.linear_layer.bias.data.zero_()
self.loss = torch.nn.CrossEntropyLoss()
from allennlp.training.metrics import CategoricalAccuracy
self.accuracy = CategoricalAccuracy()
def forward( # type: ignore
self,
alternatives: Dict[str, torch.Tensor],
correct_alternative: Optional[torch.IntTensor] = None,
qid: Optional[List[str]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
alternatives : ``Dict[str, torch.LongTensor]``
From a ``ListField[TensorTextField]``. Contains a list of alternatives to evaluate for every instance.
correct_alternative : ``Optional[torch.IntTensor]``
From an ``IndexField``. Contains the index of the correct answer for every instance.
qid : `Optional[List[str]]`
A list of question IDs for the questions being processed now.
Returns
-------
An output dictionary consisting of:
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised. This is only returned when `correct_alternative` is not `None`.
logits : ``torch.FloatTensor``
The logits for every possible answer choice
best_alternative : ``List[int]``
The index of the highest scoring alternative for every instance in the batch
"""
batch_size, num_alternatives, seq_length = alternatives["input_ids"].size()
alternatives = {
name: t.view(batch_size * num_alternatives, seq_length)
for name, t in alternatives.items()
}
embedded_alternatives = self.embeddings(**alternatives)
embedded_alternatives = self.transformer_stack(
embedded_alternatives, alternatives["attention_mask"]
)
embedded_alternatives = self.pooler(embedded_alternatives.final_hidden_states)
embedded_alternatives = self.pooler_dropout(embedded_alternatives)
logits = self.linear_layer(embedded_alternatives)
logits = logits.view(batch_size, num_alternatives)
result = {"logits": logits, "best_alternative": logits.argmax(1)}
if correct_alternative is not None:
correct_alternative = correct_alternative.squeeze(1)
result["loss"] = self.loss(logits, correct_alternative)
self.accuracy(logits, correct_alternative)
return result
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
"acc": self.accuracy.get_metric(reset),
}
| allennlp-models-main | allennlp_models/mc/models/transformer_mc_tt.py |
import logging
from typing import Dict, List, Optional
import torch
from allennlp.data import Vocabulary, TextFieldTensors
from allennlp.models import Model
logger = logging.getLogger(__name__)
@Model.register("transformer_mc")
class TransformerMC(Model):
"""
This class implements a multiple choice model patterned after the proposed model in
[RoBERTa: A Robustly Optimized BERT Pretraining Approach (Liu et al)]
(https://api.semanticscholar.org/CorpusID:198953378).
It calculates a score for each sequence on top of the CLS token, and then chooses the alternative
with the highest score.
Parameters
----------
vocab : ``Vocabulary``
transformer_model : ``str``, optional (default=``"roberta-large"``)
This model chooses the embedder according to this setting. You probably want to make sure this matches the
setting in the reader.
"""
def __init__(
self,
vocab: Vocabulary,
transformer_model: str = "roberta-large",
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None,
**kwargs
) -> None:
super().__init__(vocab, **kwargs)
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from allennlp.modules.seq2vec_encoders import BertPooler
self._text_field_embedder = PretrainedTransformerEmbedder(
transformer_model,
override_weights_file=override_weights_file,
override_weights_strip_prefix=override_weights_strip_prefix,
)
self._text_field_embedder = BasicTextFieldEmbedder({"tokens": self._text_field_embedder})
self._pooler = BertPooler(
transformer_model,
override_weights_file=override_weights_file,
override_weights_strip_prefix=override_weights_strip_prefix,
dropout=0.1,
)
self._linear_layer = torch.nn.Linear(self._text_field_embedder.get_output_dim(), 1)
self._linear_layer.weight.data.normal_(mean=0.0, std=0.02)
self._linear_layer.bias.data.zero_()
self._loss = torch.nn.CrossEntropyLoss()
from allennlp.training.metrics import CategoricalAccuracy
self._accuracy = CategoricalAccuracy()
def forward( # type: ignore
self,
alternatives: TextFieldTensors,
correct_alternative: Optional[torch.IntTensor] = None,
qid: Optional[List[str]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
alternatives : ``Dict[str, torch.LongTensor]``
From a ``ListField[TextField]``. Contains a list of alternatives to evaluate for every instance.
correct_alternative : ``Optional[torch.IntTensor]``
From an ``IndexField``. Contains the index of the correct answer for every instance.
qid : `Optional[List[str]]`
A list of question IDs for the questions being processed now.
Returns
-------
An output dictionary consisting of:
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised. This is only returned when `correct_alternative` is not `None`.
logits : ``torch.FloatTensor``
The logits for every possible answer choice
best_alternative : ``List[int]``
The index of the highest scoring alternative for every instance in the batch
"""
embedded_alternatives = self._text_field_embedder(alternatives, num_wrapping_dims=1)
flattened_embedded_alternatives = embedded_alternatives.view(
embedded_alternatives.size(0) * embedded_alternatives.size(1),
embedded_alternatives.size(2),
embedded_alternatives.size(3),
)
flattened_pooled_alternatives = self._pooler(flattened_embedded_alternatives)
flattened_logit_alternatives = self._linear_layer(flattened_pooled_alternatives)
logit_alternatives = flattened_logit_alternatives.view(
embedded_alternatives.size(0), embedded_alternatives.size(1)
)
result = {"logits": logit_alternatives, "best_alternative": logit_alternatives.argmax(1)}
if correct_alternative is not None:
correct_alternative = correct_alternative.squeeze(1)
result["loss"] = self._loss(logit_alternatives, correct_alternative)
self._accuracy(logit_alternatives, correct_alternative)
return result
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
"acc": self._accuracy.get_metric(reset),
}
default_predictor = "transformer_mc"
| allennlp-models-main | allennlp_models/mc/models/transformer_mc.py |
# flake8: noqa: F403
from allennlp_models.rc.models import *
from allennlp_models.rc.predictors import *
from allennlp_models.rc.dataset_readers import *
from allennlp_models.rc.modules import *
| allennlp-models-main | allennlp_models/rc/__init__.py |
from allennlp_models.rc.metrics.drop_em_and_f1 import DropEmAndF1
from allennlp_models.rc.metrics.squad_em_and_f1 import SquadEmAndF1
| allennlp-models-main | allennlp_models/rc/metrics/__init__.py |
from typing import Tuple, Union, List, cast
from allennlp.nn.util import dist_reduce_sum
from allennlp.training.metrics.metric import Metric
from allennlp_models.rc.tools import squad
@Metric.register("squad")
class SquadEmAndF1(Metric):
"""
This :class:`Metric` takes the best span string computed by a model, along with the answer
strings labeled in the data, and computed exact match and F1 score using functions from the
official SQuAD2 and SQuAD1.1 evaluation scripts.
"""
def __init__(self) -> None:
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __call__(
self,
best_span_strings: Union[str, List[str]],
answer_strings: Union[List[str], List[List[str]]],
):
if not isinstance(best_span_strings, list):
best_span_strings = [best_span_strings]
answer_strings = [answer_strings] # type: ignore
cast(List[str], best_span_strings)
cast(List[List[str]], answer_strings)
assert len(best_span_strings) == len(answer_strings)
count = len(best_span_strings)
exact_match = 0
f1_score = 0.0
for prediction, gold_answers in zip(best_span_strings, answer_strings):
exact_match += squad.metric_max_over_ground_truths(
squad.compute_exact, prediction, gold_answers
)
f1_score += squad.metric_max_over_ground_truths(
squad.compute_f1, prediction, gold_answers
)
# Converting to int here, since we want to count the number of exact matches.
self._total_em += dist_reduce_sum(int(exact_match))
self._total_f1 += dist_reduce_sum(f1_score)
self._count += dist_reduce_sum(count)
def get_metric(self, reset: bool = False) -> Tuple[float, float]:
"""
Returns
-------
Average exact match and F1 score (in that order) as computed by the official SQuAD script
over all inputs.
"""
exact_match = self._total_em / self._count if self._count > 0 else 0
f1_score = self._total_f1 / self._count if self._count > 0 else 0
if reset:
self.reset()
return exact_match, f1_score
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return f"SquadEmAndF1(em={self._total_em}, f1={self._total_f1})"
| allennlp-models-main | allennlp_models/rc/metrics/squad_em_and_f1.py |
from typing import Tuple, List, Union
from allennlp.nn.util import dist_reduce_sum
from allennlp.training.metrics.metric import Metric
from allennlp_models.rc.tools.drop import (
get_metrics as drop_em_and_f1,
answer_json_to_strings,
)
from allennlp_models.rc.tools.squad import metric_max_over_ground_truths
@Metric.register("drop")
class DropEmAndF1(Metric):
"""
This :class:`Metric` takes the best span string computed by a model, along with the answer
strings labeled in the data, and computes exact match and F1 score using the official DROP
evaluator (which has special handling for numbers and for questions with multiple answer spans,
among other things).
"""
def __init__(self) -> None:
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __call__(self, prediction: Union[str, List], ground_truths: List): # type: ignore
"""
Parameters
----------
prediction: ``Union[str, List]``
The predicted answer from the model evaluated. This could be a string, or a list of string
when multiple spans are predicted as answer.
ground_truths: ``List``
All the ground truth answer annotations.
"""
# If you wanted to split this out by answer type, you could look at [1] here and group by
# that, instead of only keeping [0].
ground_truth_answer_strings = [
answer_json_to_strings(annotation)[0] for annotation in ground_truths
]
exact_match, f1_score = metric_max_over_ground_truths(
drop_em_and_f1, prediction, ground_truth_answer_strings
)
# Converting to int here, since we want to count the number of exact matches.
self._total_em += dist_reduce_sum(int(exact_match))
self._total_f1 += dist_reduce_sum(f1_score)
self._count += dist_reduce_sum(1)
def get_metric(self, reset: bool = False) -> Tuple[float, float]:
"""
Returns
-------
Average exact match and F1 score (in that order) as computed by the official DROP script
over all inputs.
"""
exact_match = self._total_em / self._count if self._count > 0 else 0
f1_score = self._total_f1 / self._count if self._count > 0 else 0
if reset:
self.reset()
return exact_match, f1_score
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return f"DropEmAndF1(em={self._total_em}, f1={self._total_f1})"
| allennlp-models-main | allennlp_models/rc/metrics/drop_em_and_f1.py |
"""
This evaluation script relies heavily on the one for DROP (``allennlp/tools/drop_eval.py``). We need a separate
script for Quoref only because the data formats are slightly different.
"""
import json
from typing import Dict, Tuple, List, Any, Optional
import argparse
import numpy as np
from allennlp_models.rc.tools import drop
def _get_answers_from_data(annotations: Dict[str, Any]) -> Dict[str, List[str]]:
"""
If the annotations file is in the same format as the original data files, this method can be used to extract a
dict of query ids and answers.
"""
answers_dict: Dict[str, List[str]] = {}
for article_info in annotations["data"]:
for paragraph_info in article_info["paragraphs"]:
for qa_pair in paragraph_info["qas"]:
query_id = qa_pair["id"]
candidate_answers = [answer["text"] for answer in qa_pair["answers"]]
answers_dict[query_id] = candidate_answers
return answers_dict
def evaluate_json(
annotations: Dict[str, Any], predicted_answers: Dict[str, Any]
) -> Tuple[float, float]:
"""
Takes gold annotations and predicted answers and evaluates the predictions for each question
in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to
match predictions to gold annotations.
The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a
list of strings (or just one string) that is the answer.
The ``annotations`` are assumed to have either the format of the dev set in the Quoref data release, or the
same format as the predicted answers file.
"""
instance_exact_match = []
instance_f1 = []
if "data" in annotations:
# We're looking at annotations in the original data format. Let's extract the answers.
annotated_answers = _get_answers_from_data(annotations)
else:
annotated_answers = annotations
for query_id, candidate_answers in annotated_answers.items():
max_em_score = 0.0
max_f1_score = 0.0
if query_id in predicted_answers:
predicted = predicted_answers[query_id]
gold_answer = tuple(candidate_answers)
em_score, f1_score = drop.get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
else:
print("Missing prediction for question: {}".format(query_id))
max_em_score = 0.0
max_f1_score = 0.0
instance_exact_match.append(max_em_score)
instance_f1.append(max_f1_score)
global_em = np.mean(instance_exact_match)
global_f1 = np.mean(instance_f1)
print("Exact-match accuracy {0:.2f}".format(global_em * 100))
print("F1 score {0:.2f}".format(global_f1 * 100))
print("{0:.2f} & {1:.2f}".format(global_em * 100, global_f1 * 100))
return global_em, global_f1
def evaluate_prediction_file(
prediction_path: str, gold_path: str, output_path: Optional[str] = None
) -> Tuple[float, float]:
"""
Takes a prediction file and a gold file and evaluates the predictions for each question in the gold file. Both
files must be json formatted and must have query_id keys, which are used to match predictions to gold
annotations. Writes a json with global_em and global_f1 metrics to file at the specified output
path, unless None is passed as output path.
"""
predicted_answers = json.load(open(prediction_path, encoding="utf-8"))
annotations = json.load(open(gold_path, encoding="utf-8"))
global_em, global_f1 = evaluate_json(annotations, predicted_answers)
# Output predictions to file if an output path is given
if output_path is not None:
output_dict = {"global_em": global_em, "global_f1": global_f1}
with open(output_path, "w", encoding="utf8") as outfile:
json.dump(output_dict, outfile)
return (global_em, global_f1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate Quoref predictions")
parser.add_argument(
"--gold_path",
type=str,
required=False,
default="quoref-test-v0.1.json",
help="location of the gold file",
)
parser.add_argument(
"--prediction_path",
type=str,
required=False,
default="sample_predictions.json",
help="location of the prediction file",
)
parser.add_argument(
"--output_path",
type=str,
required=False,
default=None,
help="location of the output metrics file",
)
args = parser.parse_args()
evaluate_prediction_file(args.prediction_path, args.gold_path, args.output_path)
| allennlp-models-main | allennlp_models/rc/tools/quoref.py |
"""Official evaluation script for ORB.
Usage:
python evaluation_script.py
--dataset_file <file_path>
--prediction_file <file_path>
--metrics_output_file <file_path>
"""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
from allennlp_models.rc.tools.orb_utils import evaluate_dataset
def read_predictions(json_file):
return json.load(open(json_file))
def read_labels(jsonl_file):
qid_answer_map = {}
with open(jsonl_file) as f:
for line in f:
data = json.loads(line)
for qa_pair in data["qa_pairs"]:
qid_answer_map[str(qa_pair["qid"])] = {
"dataset": qa_pair["dataset"],
"answers": qa_pair["answers"],
}
return qid_answer_map
def compute_averages(all_metrics):
for dataset, dataset_metric in all_metrics.items():
if len(dataset_metric) > 0:
total = dataset_metric["total"]
for metric, value in dataset_metric.items():
if metric != "total":
dataset_metric[metric] = value / float(total)
return all_metrics
def evaluate(answers, predictions):
metrics = {
"drop": {},
"squad1": {},
"squad2": {},
"newsqa": {},
"quoref": {},
"ropes": {},
"narrativeqa": {},
"duorc": {},
"drop_syn": {},
"squad1_syn": {},
"quoref_syn": {},
"newsqa_syn": {},
"ropes_syn": {},
"duorc_syn": {},
}
for qid, ground_truth_dict in answers.items():
if qid in predictions:
predicted_answer = predictions[qid]
dataset_name = ground_truth_dict["dataset"].lower()
try:
metrics = evaluate_dataset(
dataset_name, predicted_answer, ground_truth_dict["answers"], metrics
)
except KeyError:
print("Incorrect dataset name at : {0}.".format(dataset_name))
exit(0)
except Exception as err:
print(str(err))
metrics = compute_averages(metrics)
return metrics
def process_for_output(metrics):
processed_metrics = {}
average_f1 = 0
f1_instance_count = 0
for dataset, metric_dict in metrics.items():
for metric_name, metric_value in metric_dict.items():
if metric_name != "total":
processed_metrics["{0}_{1}".format(dataset, metric_name)] = round(metric_value, 4)
if metric_name in ["f1", "rouge_f"] and dataset != "ropes":
average_f1 += metric_value
f1_instance_count += 1
elif metric_name == "exact_match" and dataset == "ropes":
average_f1 += metric_value
f1_instance_count += 1
processed_metrics["average_f1"] = round(average_f1 / f1_instance_count, 4)
return processed_metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluation for ORB")
parser.add_argument("--dataset_file", type=str, help="Dataset File")
parser.add_argument("--prediction_file", type=str, help="Prediction File")
parser.add_argument("--metrics_output_file", type=str, help="Metrics File")
args = parser.parse_args()
answers = read_labels(args.dataset_file)
predictions = read_predictions(args.prediction_file)
metrics = evaluate(answers, predictions)
processed_metrics = process_for_output(metrics)
json.dump(processed_metrics, open(args.metrics_output_file, "w"), indent=2)
| allennlp-models-main | allennlp_models/rc/tools/orb.py |
allennlp-models-main | allennlp_models/rc/tools/__init__.py |
|
"""Functions taken from [the official evaluation script]
(https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/)
for SQuAD version 2.0.
"""
import collections
import re
import string
from typing import Callable, Sequence, TypeVar, Tuple
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
qid_to_has_ans[qa["id"]] = bool(qa["answers"])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_pred: str, a_gold: str) -> int:
return int(normalize_answer(a_pred) == normalize_answer(a_gold))
def compute_f1(a_pred: str, a_gold: str) -> float:
pred_toks = get_tokens(a_pred)
gold_toks = get_tokens(a_gold)
common = collections.Counter(pred_toks) & collections.Counter(gold_toks) # type: ignore[var-annotated]
num_same = sum(common.values())
if len(pred_toks) == 0 or len(gold_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return float(pred_toks == gold_toks)
if num_same == 0:
return 0.0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
_P = TypeVar("_P")
_G = TypeVar("_G")
_T = TypeVar("_T", int, float, Tuple[int, ...], Tuple[float, ...])
def metric_max_over_ground_truths(
metric_fn: Callable[[_P, _G], _T], prediction: _P, ground_truths: Sequence[_G]
) -> _T:
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def get_metric_score(prediction: str, gold_answers: Sequence[str]) -> Tuple[int, float]:
exact_scores = metric_max_over_ground_truths(compute_exact, prediction, gold_answers)
f1_scores = metric_max_over_ground_truths(compute_f1, prediction, gold_answers)
return exact_scores, f1_scores
| allennlp-models-main | allennlp_models/rc/tools/squad.py |
from typing import List, Tuple
from allennlp_models.rc.tools.squad import get_metric_score as get_metric_squad
from allennlp_models.rc.tools.drop import get_metrics as drop_metrics
from allennlp_models.rc.tools.narrativeqa import get_metric_score as get_metric_narrativeqa
def get_metric_drop(predicted: str, ground_truths: List[str]) -> Tuple[float, float]:
em_scores = []
f1_scores = []
for ground_truth in ground_truths:
exact_match, f1 = drop_metrics(predicted, ground_truth)
em_scores.append(exact_match)
f1_scores.append(f1)
return max(em_scores), max(f1_scores)
def update_extractive_metrics(metrics, dataset_name, exact_match, f1):
metrics[dataset_name]["exact_match"] = (
metrics[dataset_name]["exact_match"] + exact_match
if "exact_match" in metrics[dataset_name]
else exact_match
)
metrics[dataset_name]["f1"] = (
metrics[dataset_name]["f1"] + f1 if "f1" in metrics[dataset_name] else f1
)
metrics[dataset_name]["total"] = (
metrics[dataset_name]["total"] + 1 if "total" in metrics[dataset_name] else 1
)
return metrics
def update_abstractive_metrics(
metrics, bleu_1_score, bleu_4_score, meteor_score, rouge_f, rouge_p, rouge_r
):
metrics["narrativeqa"]["bleu_1"] = (
metrics["narrativeqa"]["bleu_1"] + bleu_1_score
if "bleu_1" in metrics["narrativeqa"]
else bleu_1_score
)
metrics["narrativeqa"]["bleu_4"] = (
metrics["narrativeqa"]["bleu_4"] + bleu_4_score
if "bleu_4" in metrics["narrativeqa"]
else bleu_4_score
)
metrics["narrativeqa"]["meteor"] = (
metrics["narrativeqa"]["meteor"] + meteor_score
if "meteor" in metrics["narrativeqa"]
else meteor_score
)
metrics["narrativeqa"]["rouge_f"] = (
metrics["narrativeqa"]["rouge_f"] + rouge_f
if "rouge_f" in metrics["narrativeqa"]
else rouge_f
)
metrics["narrativeqa"]["rouge_p"] = (
metrics["narrativeqa"]["rouge_p"] + rouge_p
if "rouge_p" in metrics["narrativeqa"]
else rouge_p
)
metrics["narrativeqa"]["rouge_r"] = (
metrics["narrativeqa"]["rouge_r"] + rouge_r
if "rouge_r" in metrics["narrativeqa"]
else rouge_r
)
metrics["narrativeqa"]["total"] = (
metrics["narrativeqa"]["total"] + 1 if "total" in metrics["narrativeqa"] else 1
)
return metrics
def evaluate_dataset(dataset_name, prediction, ground_truths, metrics):
prediction = prediction[0] if isinstance(prediction, list) else prediction
if dataset_name in [
"squad1",
"squad2",
"ropes",
"newsqa",
"duorc",
"squad1_syn",
"ropes_syn",
"newsqa_syn",
"duorc_syn",
]:
exact_match, f1 = get_metric_squad(prediction, [truth[0] for truth in ground_truths])
metrics = update_extractive_metrics(metrics, dataset_name, exact_match, f1)
elif dataset_name in ["drop", "quoref", "drop_syn", "quoref_syn"]:
exact_match, f1 = get_metric_drop(prediction, [truth[0] for truth in ground_truths])
metrics = update_extractive_metrics(metrics, dataset_name, exact_match, f1)
elif dataset_name == "narrativeqa":
prediction = prediction[0] if isinstance(prediction, list) else prediction
ground_truths = [truth[0] for truth in ground_truths]
bleu1, bleu4, meteor, rouge_f, rouge_p, rouge_r = get_metric_narrativeqa(
prediction, ground_truths
)
metrics = update_abstractive_metrics(
metrics, bleu1, bleu4, meteor, rouge_f, rouge_p, rouge_r
)
else:
print("Incorrect dataset name at :{0}".format(dataset_name))
raise ValueError
return metrics
| allennlp-models-main | allennlp_models/rc/tools/orb_utils.py |
""" Evaluation script for NarrativeQA dataset. """
import nltk
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
nltk.download("wordnet")
import rouge
from nltk.translate.bleu_score import sentence_bleu
from nltk.tokenize import word_tokenize
from nltk.translate.meteor_score import meteor_score
import copy
rouge_l_evaluator = rouge.Rouge(
metrics=["rouge-l"],
max_n=4,
limit_length=True,
length_limit=100,
length_limit_type="words",
apply_avg=True,
apply_best=True,
alpha=0.5,
weight_factor=1.2,
stemming=True,
)
def bleu_1(p, g):
return sentence_bleu(g, p, weights=(1, 0, 0, 0))
def bleu_4(p, g):
return sentence_bleu(g, p, weights=(0, 0, 0, 1))
def meteor(p, g):
return meteor_score([x.split() for x in g], p.split())
def rouge_l(p, g):
return rouge_l_evaluator.get_scores(p, g)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths, tokenize=False):
scores_for_ground_truths = []
for ground_truth in ground_truths:
if tokenize:
score = metric_fn(word_tokenize(prediction), [word_tokenize(ground_truth)])
else:
score = metric_fn(prediction, [ground_truth])
scores_for_ground_truths.append(score)
if isinstance(score, dict) and "rouge-l" in score:
max_score = copy.deepcopy(score)
max_score["rouge-l"]["f"] = round(
max([score["rouge-l"]["f"] for score in scores_for_ground_truths]), 2
)
max_score["rouge-l"]["p"] = round(
max([score["rouge-l"]["p"] for score in scores_for_ground_truths]), 2
)
max_score["rouge-l"]["r"] = round(
max([score["rouge-l"]["r"] for score in scores_for_ground_truths]), 2
)
return max_score
else:
return round(max(scores_for_ground_truths), 2)
def get_metric_score(prediction, ground_truths):
bleu_1_score = metric_max_over_ground_truths(bleu_1, prediction, ground_truths, tokenize=True)
bleu_4_score = metric_max_over_ground_truths(bleu_4, prediction, ground_truths, tokenize=True)
meteor_score = metric_max_over_ground_truths(meteor, prediction, ground_truths, tokenize=False)
rouge_l_score = metric_max_over_ground_truths(
rouge_l, prediction, ground_truths, tokenize=False
)
return (
bleu_1_score,
bleu_4_score,
meteor_score,
rouge_l_score["rouge-l"]["f"],
rouge_l_score["rouge-l"]["p"],
rouge_l_score["rouge-l"]["r"],
)
| allennlp-models-main | allennlp_models/rc/tools/narrativeqa.py |
import json
import logging
import time
from typing import Iterable, List, Set
from allennlp.common.checks import check_for_gpu
from allennlp.data import Instance
from allennlp.predictors import Predictor
from tqdm import tqdm
from allennlp_models.rc.metrics import SquadEmAndF1
logger = logging.getLogger(__name__)
if __name__ == "__main__":
import allennlp_models.rc # noqa F401: Needed to register the registrables.
import argparse
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Evaluation for SQuAD 1.1")
parser.add_argument("--cuda-device", type=int, default=-1)
parser.add_argument("--qa-model", type=str)
parser.add_argument(
"--input-file",
type=str,
default="https://allennlp.s3.amazonaws.com/datasets/squad/squad-dev-v1.1.json",
)
args = parser.parse_args()
# Read inputs
check_for_gpu(args.cuda_device)
predictor = Predictor.from_path(
args.qa_model, predictor_name="transformer_qa", cuda_device=args.cuda_device
)
instances = predictor._dataset_reader.read(args.input_file)
# We have to make sure we put instances with the same qid all into the same batch.
def batch_instances_by_qid(instances: Iterable[Instance]) -> Iterable[List[Instance]]:
current_qid = None
current_batch = []
for instance in instances:
instance_qid = instance["metadata"]["id"]
if current_qid is None:
current_qid = instance_qid
if instance_qid == current_qid:
current_batch.append(instance)
else:
yield current_batch
current_batch = [instance]
current_qid = instance_qid
if len(current_batch) > 0:
yield current_batch
def make_batches(
instances: Iterable[Instance], batch_size: int = 64
) -> Iterable[List[Instance]]:
current_batch: List[Instance] = []
for qid_instances in batch_instances_by_qid(instances):
if len(qid_instances) + len(current_batch) < batch_size:
current_batch.extend(qid_instances)
else:
if len(current_batch) > 0:
yield current_batch
current_batch = qid_instances
if len(current_batch) > 0:
yield current_batch
# Run model and evaluate results
last_logged_scores_time = time.monotonic()
ids_seen: Set[str] = set()
metric = SquadEmAndF1()
answers = {}
for batch in make_batches(tqdm(instances, desc="Evaluating instances")):
gold_answers = {
instance["metadata"]["id"]: instance["metadata"]["answers"] for instance in batch
}
for result in predictor.predict_batch_instance(batch):
assert result["id"] not in ids_seen
ids_seen.add(result["id"])
gold_answer = gold_answers[result["id"]]
if len(gold_answer) == 0:
gold_answer = [""] # no-answer case
metric(result["best_span_str"], gold_answer)
answers[result["id"]] = result["best_span_str"]
if time.monotonic() - last_logged_scores_time > 30:
exact_match, f1_score = metric.get_metric()
logger.info(json.dumps({"em": exact_match, "f1": f1_score}))
last_logged_scores_time = time.monotonic()
# Print results
exact_match, f1_score = metric.get_metric()
print(json.dumps(answers))
print(json.dumps({"em": exact_match, "f1": f1_score}))
| allennlp-models-main | allennlp_models/rc/tools/transformer_qa_eval.py |
#!/usr/bin/python
from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple, Union, Optional
import json
import argparse
import string
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
# From here through _normalize_answer was originally copied from:
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
# Then cleaned up and modified a bit.
def _remove_articles(text: str) -> str:
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def _white_space_fix(text: str) -> str:
return " ".join(text.split())
EXCLUDE = set(string.punctuation)
def _remove_punc(text: str) -> str:
if not _is_number(text):
return "".join(ch for ch in text if ch not in EXCLUDE)
else:
return text
def _lower(text: str) -> str:
return text.lower()
def _tokenize(text: str) -> List[str]:
return re.split(" |-", text)
def _normalize_answer(text: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
parts = [
_white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token)))))
for token in _tokenize(text)
]
parts = [part for part in parts if part.strip()]
normalized = " ".join(parts).strip()
return normalized
def _is_number(text: str) -> bool:
try:
float(text)
return True
except ValueError:
return False
def _normalize_number(text: str) -> str:
if _is_number(text):
return str(float(text))
else:
return text
def _answer_to_bags(
answer: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[List[str], List[Set[str]]]:
if isinstance(answer, (list, tuple)):
raw_spans = answer
else:
raw_spans = [answer]
normalized_spans: List[str] = []
token_bags = []
for raw_span in raw_spans:
normalized_span = _normalize_answer(raw_span)
normalized_spans.append(normalized_span)
token_bags.append(set(normalized_span.split()))
return normalized_spans, token_bags
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]:
"""
Takes gold and predicted answer sets and first finds the optimal 1-1 alignment
between them and gets maximum metric values over all the answers.
"""
scores = np.zeros([len(gold), len(predicted)])
for gold_index, gold_item in enumerate(gold):
for pred_index, pred_item in enumerate(predicted):
if _match_numbers_if_present(gold_item, pred_item):
scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item)
row_ind, col_ind = linear_sum_assignment(-scores)
max_scores = np.zeros([max(len(gold), len(predicted))])
for row, column in zip(row_ind, col_ind):
max_scores[row] = max(max_scores[row], scores[row, column])
return max_scores
def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float:
intersection = len(gold_bag.intersection(predicted_bag))
if not predicted_bag:
precision = 1.0
else:
precision = intersection / float(len(predicted_bag))
if not gold_bag:
recall = 1.0
else:
recall = intersection / float(len(gold_bag))
f1 = (
(2 * precision * recall) / (precision + recall)
if not (precision == 0.0 and recall == 0.0)
else 0.0
)
return f1
def _match_numbers_if_present(gold_bag: Set[str], predicted_bag: Set[str]) -> bool:
gold_numbers = set()
predicted_numbers = set()
for word in gold_bag:
if _is_number(word):
gold_numbers.add(word)
for word in predicted_bag:
if _is_number(word):
predicted_numbers.add(word)
if (not gold_numbers) or gold_numbers.intersection(predicted_numbers):
return True
return False
def get_metrics(
predicted: Union[str, List[str], Tuple[str, ...]], gold: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[float, float]:
"""
Takes a predicted answer and a gold answer (that are both either a string or a list of
strings), and returns exact match and the DROP F1 metric for the prediction. If you are
writing a script for evaluating objects in memory (say, the output of predictions during
validation, or while training), this is the function you want to call, after using
:func:`answer_json_to_strings` when reading the gold answer from the released data file.
"""
predicted_bags = _answer_to_bags(predicted)
gold_bags = _answer_to_bags(gold)
if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len(gold_bags[0]):
exact_match = 1.0
else:
exact_match = 0.0
f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1])
f1 = np.mean(f1_per_bag)
f1 = round(f1, 2)
return exact_match, f1
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]:
"""
Takes an answer JSON blob from the DROP data release and converts it into strings used for
evaluation.
"""
if "number" in answer and answer["number"]:
return tuple([str(answer["number"])]), "number"
elif "spans" in answer and answer["spans"]:
return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans"
elif "date" in answer:
return (
tuple(
[
"{0} {1} {2}".format(
answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]
)
]
),
"date",
)
else:
raise ValueError(
f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}"
)
def evaluate_json(
annotations: Dict[str, Any], predicted_answers: Dict[str, Any]
) -> Tuple[float, float]:
"""
Takes gold annotations and predicted answers and evaluates the predictions for each question
in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to
match predictions to gold annotations (note that these are somewhat deep in the JSON for the
gold annotations, but must be top-level keys in the predicted answers).
The ``annotations`` are assumed to have the format of the dev set in the DROP data release.
The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a string
(or list of strings) that is the answer.
"""
instance_exact_match = []
instance_f1 = []
# for each type as well
type_to_em: Dict[str, List[float]] = defaultdict(list)
type_to_f1: Dict[str, List[float]] = defaultdict(list)
for _, annotation in annotations.items():
for qa_pair in annotation["qa_pairs"]:
query_id = qa_pair["query_id"]
max_em_score = 0.0
max_f1_score = 0.0
max_type = None
if query_id in predicted_answers:
predicted = predicted_answers[query_id]
candidate_answers = [qa_pair["answer"]]
if "validated_answers" in qa_pair and qa_pair["validated_answers"]:
candidate_answers += qa_pair["validated_answers"]
for answer in candidate_answers:
gold_answer, gold_type = answer_json_to_strings(answer)
em_score, f1_score = get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
if max_em_score == em_score and max_f1_score == f1_score:
max_type = gold_type
else:
print("Missing prediction for question: {}".format(query_id))
if qa_pair and qa_pair["answer"]:
max_type = answer_json_to_strings(qa_pair["answer"])[1]
else:
max_type = "number"
max_em_score = 0.0
max_f1_score = 0.0
instance_exact_match.append(max_em_score)
instance_f1.append(max_f1_score)
type_to_em[max_type].append(max_em_score)
type_to_f1[max_type].append(max_f1_score)
global_em = np.mean(instance_exact_match)
global_f1 = np.mean(instance_f1)
print("Exact-match accuracy {0:.2f}".format(global_em * 100))
print("F1 score {0:.2f}".format(global_f1 * 100))
print("{0:.2f} & {1:.2f}".format(global_em * 100, global_f1 * 100))
print("----")
total = np.sum([len(v) for v in type_to_em.values()])
for typ in sorted(type_to_em.keys()):
print(
"{0}: {1} ({2:.2f}%)".format(
typ, len(type_to_em[typ]), 100.0 * len(type_to_em[typ]) / total
)
)
print(" Exact-match accuracy {0:.3f}".format(100.0 * np.mean(type_to_em[typ])))
print(" F1 score {0:.3f}".format(100.0 * np.mean(type_to_f1[typ])))
return global_em, global_f1
def evaluate_prediction_file(
prediction_path: str, gold_path: str, output_path: Optional[str] = None
) -> Tuple[float, float]:
"""
Takes a prediction file and a gold file and evaluates the predictions for each question in the
gold file. Both files must be json formatted and must have query_id keys, which are used to
match predictions to gold annotations. The gold file is assumed to have the format of the dev
set in the DROP data release. The prediction file must be a JSON dictionary keyed by query id,
where the value is either a JSON dictionary with an "answer" key, or just a string (or list of
strings) that is the answer. Writes a json with global_em and global_f1 metrics to file at
the specified output path, unless None is passed as output path.
"""
predicted_answers = json.load(open(prediction_path, encoding="utf-8"))
annotations = json.load(open(gold_path, encoding="utf-8"))
global_em, global_f1 = evaluate_json(annotations, predicted_answers)
# Output predictions to file if an output path is given
if output_path is not None:
output_dict = {"global_em": global_em, "global_f1": global_f1}
with open(output_path, "w", encoding="utf8") as outfile:
json.dump(output_dict, outfile)
return (global_em, global_f1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="evaluate on drop dataset")
parser.add_argument(
"--gold_path",
type=str,
required=False,
default="drop_dataset_test.gold.json",
help="location of the gold file",
)
parser.add_argument(
"--prediction_path",
type=str,
required=False,
default="sample_predictions.json",
help="location of the prediction file",
)
parser.add_argument(
"--output_path",
type=str,
required=False,
default=None,
help="location of the output metrics file",
)
args = parser.parse_args()
evaluate_prediction_file(args.prediction_path, args.gold_path, args.output_path)
| allennlp-models-main | allennlp_models/rc/tools/drop.py |
import json
import logging
from typing import Dict, List
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, ListField, MetadataField, IndexField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("qangaroo")
class QangarooReader(DatasetReader):
"""
Reads a JSON-formatted Qangaroo file and returns a ``Dataset`` where the ``Instances`` have six
fields: ``candidates``, a ``ListField[TextField]``, ``query``, a ``TextField``, ``supports``, a
``ListField[TextField]``, ``answer``, a ``TextField``, and ``answer_index``, a ``IndexField``.
We also add a ``MetadataField`` that stores the instance's ID and annotations if they are present.
# Parameters
tokenizer : `Tokenizer`, optional (default=`SpacyTokenizer()`)
We use this `Tokenizer` for both the question and the passage. See :class:`Tokenizer`.
Default is ```SpacyTokenizer()``.
token_indexers : `Dict[str, TokenIndexer]`, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = json.load(dataset_file)
logger.info("Reading the dataset")
for sample in dataset:
instance = self.text_to_instance(
sample["candidates"],
sample["query"],
sample["supports"],
sample["id"],
sample["answer"],
sample["annotations"] if "annotations" in sample else [[]],
)
yield instance
def text_to_instance(
self, # type: ignore
candidates: List[str],
query: str,
supports: List[str],
_id: str = None,
answer: str = None,
annotations: List[List[str]] = None,
) -> Instance:
fields: Dict[str, Field] = {}
candidates_field = ListField(
[
TextField(candidate, self._token_indexers)
for candidate in self._tokenizer.batch_tokenize(candidates)
]
)
fields["query"] = TextField(self._tokenizer.tokenize(query), self._token_indexers)
fields["supports"] = ListField(
[
TextField(support, self._token_indexers)
for support in self._tokenizer.batch_tokenize(supports)
]
)
fields["answer"] = TextField(self._tokenizer.tokenize(answer), self._token_indexers)
fields["answer_index"] = IndexField(candidates.index(answer), candidates_field)
fields["candidates"] = candidates_field
fields["metadata"] = MetadataField({"annotations": annotations, "id": _id})
return Instance(fields)
| allennlp-models-main | allennlp_models/rc/dataset_readers/qangaroo.py |
import json
import logging
from typing import Any, Dict, List, Tuple, Optional, Iterable
from allennlp.common.util import sanitize_wordpiece
from allennlp.data.fields import MetadataField, TextField, SpanField, IndexField
from allennlp.common.file_utils import cached_path, open_compressed
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
from allennlp_models.rc.dataset_readers.utils import char_span_to_token_span
logger = logging.getLogger(__name__)
@DatasetReader.register("transformer_squad")
class TransformerSquadReader(DatasetReader):
"""
Dataset reader suitable for JSON-formatted SQuAD-like datasets to be used with a transformer-based
QA model, such as [`TransformerQA`](../../models/transformer_qa#TransformerQA).
It will generate `Instances` with the following fields:
* `question_with_context`, a `TextField` that contains the concatenation of question and context,
* `answer_span`, a `SpanField` into the `question` `TextField` denoting the answer.
* `context_span`, a `SpanField` into the `question` `TextField` denoting the context, i.e., the part of
the text that potential answers can come from.
* `cls_index` (optional), an `IndexField` that holds the index of the `[CLS]` token within the
`question_with_context` field. This is needed because the `[CLS]` token is used to indicate
an impossible question. Since most tokenizers/models have the `[CLS]` token as the first
token, this will only be included in the instance if the `[CLS]` token is NOT the first token.
* `metadata`, a `MetadataField` that stores the instance's ID, the original question, the original
passage text, both of these in tokenized form, and the gold answer strings, accessible as
`metadata['id']`, `metadata['question']`, `metadata['context']`, `metadata['question_tokens']`,
`metadata['context_tokens']`, and `metadata['answers']`. This is so that we can more easily use the
official SQuAD evaluation script to get metrics.
For SQuAD v2.0-style datasets that contain impossible questions, we set the gold answer span
to the span of the `[CLS]` token when there are no answers.
We also support limiting the maximum length for the question. When the context+question is too long, we run a
sliding window over the context and emit multiple instances for a single question.
If `skip_impossible_questions` is `True`, then we only emit instances that contain a gold answer.
As a result, the per-instance metrics you get during training and evaluation might not correspond
100% to the SQuAD task.
To get a final number for SQuAD v1.1, you have to run
```
python -m allennlp_models.rc.tools.transformer_qa_eval
```
# Parameters
transformer_model_name : `str`, optional (default=`'bert-base-cased'`)
This reader chooses tokenizer and token indexer according to this setting.
length_limit : `int`, optional (default=`384`)
We will make sure that the length of context+question never exceeds this many word pieces.
stride : `int`, optional (default=`128`)
When context+question are too long for the length limit, we emit multiple instances for one question,
where the context is shifted. This parameter specifies the overlap between the shifted context window. It
is called "stride" instead of "overlap" because that's what it's called in the original huggingface
implementation.
skip_impossible_questions : `bool`, optional (default=`False`)
If this is true, we will skip examples that don't have an answer. This could happen if the question
is marked impossible in the dataset, or if the question+context is truncated according to `length_limit`
such that the context no longer contains a gold answer.
For SQuAD v1.1-style datasets, you should set this to `True` during training, and `False` any other time.
For SQuAD v2.0-style datasets you should leave this as `False`.
max_query_length : `int`, optional (default=`64`)
The maximum number of wordpieces dedicated to the question. If the question is longer than this, it will be
truncated.
"""
def __init__(
self,
transformer_model_name: str = "bert-base-cased",
length_limit: int = 384,
stride: int = 128,
skip_impossible_questions: bool = False,
max_query_length: int = 64,
tokenizer_kwargs: Dict[str, Any] = None,
**kwargs
) -> None:
if "skip_invalid_examples" in kwargs:
import warnings
warnings.warn(
"'skip_invalid_examples' is deprecated, please use 'skip_impossible_questions' instead",
DeprecationWarning,
)
skip_impossible_questions = kwargs.pop("skip_invalid_examples")
super().__init__(
manual_distributed_sharding=True, manual_multiprocess_sharding=True, **kwargs
)
self._tokenizer = PretrainedTransformerTokenizer(
transformer_model_name,
add_special_tokens=False,
tokenizer_kwargs=tokenizer_kwargs,
)
self._token_indexers = {
"tokens": PretrainedTransformerIndexer(
transformer_model_name, tokenizer_kwargs=tokenizer_kwargs
)
}
self.length_limit = length_limit
self.stride = stride
self.skip_impossible_questions = skip_impossible_questions
self.max_query_length = max_query_length
self._cls_token = self._tokenizer.tokenizer.cls_token
# We'll include the `cls_index` IndexField in instances if the CLS token is
# not always the first token.
self._include_cls_index = (
self._find_cls_index(
self._tokenizer.add_special_tokens(
self._tokenizer.tokenize("a"), self._tokenizer.tokenize("a")
)
)
!= 0
)
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open_compressed(file_path) as dataset_file:
dataset_json = json.load(dataset_file)
dataset = dataset_json["data"]
logger.info("Reading the dataset")
yielded_question_count = 0
questions_with_more_than_one_instance = 0
for article in dataset:
for paragraph_json in article["paragraphs"]:
context = paragraph_json["context"]
cached_tokenized_context = self._tokenize_context(context)
for question_answer in self.shard_iterable(paragraph_json["qas"]):
answers = [answer_json["text"] for answer_json in question_answer["answers"]]
# Just like huggingface, we only use the first answer for training.
if len(answers) > 0:
first_answer_offset = int(question_answer["answers"][0]["answer_start"])
else:
first_answer_offset = None
instances = self.make_instances(
question_answer.get("id", None),
question_answer["question"],
answers,
context,
first_answer_offset=first_answer_offset,
always_add_answer_span=True,
is_training=True,
cached_tokenized_context=cached_tokenized_context,
)
instances_yielded = 0
for instance in instances:
yield instance
instances_yielded += 1
if instances_yielded > 1:
questions_with_more_than_one_instance += 1
yielded_question_count += 1
if questions_with_more_than_one_instance > 0:
logger.info(
"%d (%.2f%%) questions have more than one instance",
questions_with_more_than_one_instance,
100 * questions_with_more_than_one_instance / yielded_question_count,
)
def _tokenize_context(self, context: str) -> List[Token]:
# tokenize context by spaces first, and then with the wordpiece tokenizer
# For RoBERTa, this produces a bug where every token is marked as beginning-of-sentence. To fix it, we
# detect whether a space comes before a word, and if so, add "a " in front of the word.
def tokenize_slice(start: int, end: int) -> Iterable[Token]:
text_to_tokenize = context[start:end]
if start - 1 >= 0 and context[start - 1].isspace():
prefix = "a " # must end in a space, and be short so we can be sure it becomes only one token
wordpieces = self._tokenizer.tokenize(prefix + text_to_tokenize)
for wordpiece in wordpieces:
if wordpiece.idx is not None:
wordpiece.idx -= len(prefix)
return wordpieces[1:]
else:
return self._tokenizer.tokenize(text_to_tokenize)
tokenized_context = []
token_start = 0
for i, c in enumerate(context):
if c.isspace():
for wordpiece in tokenize_slice(token_start, i):
if wordpiece.idx is not None:
wordpiece.idx += token_start
tokenized_context.append(wordpiece)
token_start = i + 1
for wordpiece in tokenize_slice(token_start, len(context)):
if wordpiece.idx is not None:
wordpiece.idx += token_start
tokenized_context.append(wordpiece)
return tokenized_context
def make_instances(
self,
qid: str,
question: str,
answers: List[str],
context: str,
first_answer_offset: Optional[int],
always_add_answer_span: bool = False,
is_training: bool = False,
cached_tokenized_context: Optional[List[Token]] = None,
) -> Iterable[Instance]:
"""
Create training instances from a SQuAD example.
"""
if cached_tokenized_context is not None:
# In training, we will use the same context in multiple instances, so we use
# cached_tokenized_context to avoid duplicate tokenization
tokenized_context = cached_tokenized_context
else:
# In prediction, no cached_tokenized_context is provided, so we tokenize context here
tokenized_context = self._tokenize_context(context)
if first_answer_offset is None:
(token_answer_span_start, token_answer_span_end) = (-1, -1)
else:
(token_answer_span_start, token_answer_span_end), _ = char_span_to_token_span(
[
(t.idx, t.idx + len(sanitize_wordpiece(t.text))) if t.idx is not None else None
for t in tokenized_context
],
(first_answer_offset, first_answer_offset + len(answers[0])),
)
# Tokenize the question.
tokenized_question = self._tokenizer.tokenize(question)
tokenized_question = tokenized_question[: self.max_query_length]
# Stride over the context, making instances.
space_for_context = (
self.length_limit
- len(tokenized_question)
- len(self._tokenizer.sequence_pair_start_tokens)
- len(self._tokenizer.sequence_pair_mid_tokens)
- len(self._tokenizer.sequence_pair_end_tokens)
)
stride_start = 0
while True:
tokenized_context_window = tokenized_context[stride_start:]
tokenized_context_window = tokenized_context_window[:space_for_context]
window_token_answer_span = (
token_answer_span_start - stride_start,
token_answer_span_end - stride_start,
)
if any(i < 0 or i >= len(tokenized_context_window) for i in window_token_answer_span):
# The answer is not contained in the window.
window_token_answer_span = None
if (
not is_training
or not self.skip_impossible_questions
or window_token_answer_span is not None
):
additional_metadata = {"id": qid}
instance = self.text_to_instance(
question,
tokenized_question,
context,
tokenized_context_window,
answers=answers,
token_answer_span=window_token_answer_span,
additional_metadata=additional_metadata,
always_add_answer_span=always_add_answer_span,
)
yield instance
stride_start += space_for_context
if stride_start >= len(tokenized_context):
break
stride_start -= self.stride
def text_to_instance(
self, # type: ignore
question: str,
tokenized_question: List[Token],
context: str,
tokenized_context: List[Token],
answers: List[str] = None,
token_answer_span: Optional[Tuple[int, int]] = None,
additional_metadata: Dict[str, Any] = None,
always_add_answer_span: bool = False,
) -> Instance:
fields = {}
# make the question field
question_field = TextField(
self._tokenizer.add_special_tokens(tokenized_question, tokenized_context),
)
fields["question_with_context"] = question_field
cls_index = self._find_cls_index(question_field.tokens)
if self._include_cls_index:
fields["cls_index"] = IndexField(cls_index, question_field)
start_of_context = (
len(self._tokenizer.sequence_pair_start_tokens)
+ len(tokenized_question)
+ len(self._tokenizer.sequence_pair_mid_tokens)
)
# make the answer span
if token_answer_span is not None:
assert all(i >= 0 for i in token_answer_span)
assert token_answer_span[0] <= token_answer_span[1]
fields["answer_span"] = SpanField(
token_answer_span[0] + start_of_context,
token_answer_span[1] + start_of_context,
question_field,
)
elif always_add_answer_span:
fields["answer_span"] = SpanField(cls_index, cls_index, question_field)
# make the context span, i.e., the span of text from which possible answers should be drawn
fields["context_span"] = SpanField(
start_of_context, start_of_context + len(tokenized_context) - 1, question_field
)
# make the metadata
metadata = {
"question": question,
"question_tokens": tokenized_question,
"context": context,
"context_tokens": tokenized_context,
"answers": answers or [],
}
if additional_metadata is not None:
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance["question_with_context"].token_indexers = self._token_indexers
def _find_cls_index(self, tokens: List[Token]) -> int:
return next(i for i, t in enumerate(tokens) if t.text == self._cls_token)
| allennlp-models-main | allennlp_models/rc/dataset_readers/transformer_squad.py |
"""
Dataset reader for SuperGLUE's Reading Comprehension with Commonsense Reasoning task (Zhang Et
al. 2018).
Reader Implemented by Gabriel Orlanski
"""
import logging
from typing import Dict, List, Optional, Iterable, Union, Tuple, Any
from pathlib import Path
from allennlp.common.util import sanitize_wordpiece
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import MetadataField, TextField, SpanField
from allennlp.data.instance import Instance
from allennlp_models.rc.dataset_readers.utils import char_span_to_token_span
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
import json
logger = logging.getLogger(__name__)
__all__ = ["RecordTaskReader"]
# TODO: Optimize this reader
@DatasetReader.register("superglue_record")
class RecordTaskReader(DatasetReader):
"""
Reader for Reading Comprehension with Commonsense Reasoning(ReCoRD) task from SuperGLUE. The
task is detailed in the paper ReCoRD: Bridging the Gap between Human and Machine Commonsense
Reading Comprehension (arxiv.org/pdf/1810.12885.pdf) by Zhang et al. Leaderboards and the
official evaluation script for the ReCoRD task can be found sheng-z.github.io/ReCoRD-explorer/.
The reader reads a JSON file in the format from
sheng-z.github.io/ReCoRD-explorer/dataset-readme.txt
# Parameters
tokenizer: `Tokenizer`, optional
The tokenizer class to use. Defaults to SpacyTokenizer
token_indexers : `Dict[str, TokenIndexer]`, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
passage_length_limit : `int`, optional (default=`None`)
If specified, we will cut the passage if the length of passage exceeds this limit.
question_length_limit : `int`, optional (default=`None`)
If specified, we will cut the question if the length of question exceeds this limit.
raise_errors: `bool`, optional (default=`False`)
If the reader should raise errors or just continue.
kwargs: `Dict`
Keyword arguments to be passed to the DatasetReader parent class constructor.
"""
def __init__(
self,
transformer_model_name: str = "bert-base-cased",
length_limit: int = 384,
question_length_limit: int = 64,
stride: int = 128,
raise_errors: bool = False,
tokenizer_kwargs: Dict[str, Any] = None,
one_instance_per_query: bool = False,
max_instances: int = None,
**kwargs,
) -> None:
"""
Initialize the RecordTaskReader.
"""
super(RecordTaskReader, self).__init__(
manual_distributed_sharding=True, max_instances=max_instances, **kwargs
)
self._kwargs = kwargs
self._model_name = transformer_model_name
self._tokenizer_kwargs = tokenizer_kwargs or {}
# Save the values passed to __init__ to protected attributes
self._tokenizer = PretrainedTransformerTokenizer(
transformer_model_name,
add_special_tokens=False,
tokenizer_kwargs=tokenizer_kwargs,
)
self._token_indexers = {
"tokens": PretrainedTransformerIndexer(
transformer_model_name, tokenizer_kwargs=tokenizer_kwargs
)
}
self._length_limit = length_limit
self._query_len_limit = question_length_limit
self._stride = stride
self._raise_errors = raise_errors
self._cls_token = "@placeholder"
self._one_instance_per_query = one_instance_per_query
def _to_params(self) -> Dict[str, Any]:
"""
Get the configuration dictionary for this class.
# Returns
`Dict[str, Any]` The config dict.
"""
return {
"type": "superglue_record",
"transformer_model_name": self._model_name,
"length_limit": self._length_limit,
"question_length_limit": self._query_len_limit,
"stride": self._stride,
"raise_errors": self._raise_errors,
"tokenizer_kwargs": self._tokenizer_kwargs,
"one_instance_per_query": self._one_instance_per_query,
"max_instances": self.max_instances,
**self._kwargs,
}
def _read(self, file_path: Union[Path, str]) -> Iterable[Instance]:
# IF `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
# Read the 'data' key from the dataset
logger.info(f"Reading '{file_path}'")
with open(file_path) as fp:
dataset = json.load(fp)["data"]
logger.info(f"Found {len(dataset)} examples from '{file_path}'")
# Keep track of certain stats while reading the file
# examples_multiple_instance_count: The number of questions with more than
# one instance. Can happen because there is multiple queries for a
# single passage.
# passages_yielded: The total number of instances found/yielded.
examples_multiple_instance_count = 0
examples_no_instance_count = 0
passages_yielded = 0
# Iterate through every example from the ReCoRD data file.
for example in dataset:
# Get the list of instances for the current example
instances_for_example = self.get_instances_from_example(example)
# Keep track of number of instances for this specific example that
# have been yielded. Since it instances_for_example is a generator, we
# do not know its length. To address this, we create an counter int.
instance_count = 0
# Iterate through the instances and yield them.
for instance in instances_for_example:
yield instance
instance_count += 1
if instance_count == 0:
logger.warning(f"Example '{example['id']}' had no instances.")
examples_no_instance_count += 1
# Check if there was more than one instance for this example. If
# there was we increase examples_multiple_instance_count by 1.
# Otherwise we increase by 0.
examples_multiple_instance_count += 1 if instance_count > 1 else 0
passages_yielded += instance_count
# Check to see if we are over the max_instances to yield.
if self.max_instances and passages_yielded > self.max_instances:
logger.info("Passed max instances")
break
# Log pertinent information.
if passages_yielded:
logger.info(
f"{examples_multiple_instance_count}/{passages_yielded} "
f"({examples_multiple_instance_count / passages_yielded * 100:.2f}%) "
f"examples had more than one instance"
)
logger.info(
f"{examples_no_instance_count}/{passages_yielded} "
f"({examples_no_instance_count / passages_yielded * 100:.2f}%) "
f"examples had no instances"
)
else:
logger.warning(f"Could not find any instances in '{file_path}'")
def get_instances_from_example(
self, example: Dict, always_add_answer_span: bool = False
) -> Iterable[Instance]:
"""
Helper function to get instances from an example.
Much of this comes from `transformer_squad.make_instances`
# Parameters
example: `Dict[str,Any]`
The example dict.
# Returns:
`Iterable[Instance]` The instances for each example
"""
# Get the passage dict from the example, it has text and
# entities
example_id: str = example["id"]
passage_dict: Dict = example["passage"]
passage_text: str = passage_dict["text"]
# Tokenize the passage
tokenized_passage: List[Token] = self.tokenize_str(passage_text)
# TODO: Determine what to do with entities. Superglue marks them
# explicitly as input (https://arxiv.org/pdf/1905.00537.pdf)
# Get the queries from the example dict
queries: List = example["qas"]
logger.debug(f"{len(queries)} queries for example {example_id}")
# Tokenize and get the context windows for each queries
for query in queries:
# Create the additional metadata dict that will be passed w/ extra
# data for each query. We store the question & query ids, all
# answers, and other data following `transformer_qa`.
additional_metadata = {
"id": query["id"],
"example_id": example_id,
}
instances_yielded = 0
# Tokenize, and truncate, the query based on the max set in
# `__init__`
tokenized_query = self.tokenize_str(query["query"])[: self._query_len_limit]
# Calculate where the context needs to start and how many tokens we have
# for it. This is due to the limit on the number of tokens that a
# transformer can use because they have quadratic memory usage. But if
# you are reading this code, you probably know that.
space_for_context = (
self._length_limit
- len(list(tokenized_query))
# Used getattr so I can test without having to load a
# transformer model.
- len(getattr(self._tokenizer, "sequence_pair_start_tokens", []))
- len(getattr(self._tokenizer, "sequence_pair_mid_tokens", []))
- len(getattr(self._tokenizer, "sequence_pair_end_tokens", []))
)
# Check if answers exist for this query. We assume that there are no
# answers for this query, and set the start and end index for the
# answer span to -1.
answers = query.get("answers", [])
if not answers:
logger.warning(f"Skipping {query['id']}, no answers")
continue
# Create the arguments needed for `char_span_to_token_span`
token_offsets = [
(t.idx, t.idx + len(sanitize_wordpiece(t.text))) if t.idx is not None else None
for t in tokenized_passage
]
# Get the token offsets for the answers for this current passage.
answer_token_start, answer_token_end = (-1, -1)
for answer in answers:
# Try to find the offsets.
offsets, _ = char_span_to_token_span(
token_offsets, (answer["start"], answer["end"])
)
# If offsets for an answer were found, it means the answer is in
# the passage, and thus we can stop looking.
if offsets != (-1, -1):
answer_token_start, answer_token_end = offsets
break
# Go through the context and find the window that has the answer in it.
stride_start = 0
while True:
tokenized_context_window = tokenized_passage[stride_start:]
tokenized_context_window = tokenized_context_window[:space_for_context]
# Get the token offsets w.r.t the current window.
window_token_answer_span = (
answer_token_start - stride_start,
answer_token_end - stride_start,
)
if any(
i < 0 or i >= len(tokenized_context_window) for i in window_token_answer_span
):
# The answer is not contained in the window.
window_token_answer_span = None
if (
# not self.skip_impossible_questions
window_token_answer_span
is not None
):
# The answer WAS found in the context window, and thus we
# can make an instance for the answer.
instance = self.text_to_instance(
query["query"],
tokenized_query,
passage_text,
tokenized_context_window,
answers=[answer["text"] for answer in answers],
token_answer_span=window_token_answer_span,
additional_metadata=additional_metadata,
always_add_answer_span=always_add_answer_span,
)
yield instance
instances_yielded += 1
if instances_yielded == 1 and self._one_instance_per_query:
break
stride_start += space_for_context
# If we have reached the end of the passage, stop.
if stride_start >= len(tokenized_passage):
break
# I am not sure what this does...but it is here?
stride_start -= self._stride
def tokenize_slice(self, text: str, start: int = None, end: int = None) -> Iterable[Token]:
"""
Get + tokenize a span from a source text.
*Originally from the `transformer_squad.py`*
# Parameters
text: `str`
The text to draw from.
start: `int`
The start index for the span.
end: `int`
The end index for the span. Assumed that this is inclusive.
# Returns
`Iterable[Token]` List of tokens for the retrieved span.
"""
start = start or 0
end = end or len(text)
text_to_tokenize = text[start:end]
# Check if this is the start of the text. If the start is >= 0, check
# for a preceding space. If it exists, then we need to tokenize a
# special way because of a bug with RoBERTa tokenizer.
if start - 1 >= 0 and text[start - 1].isspace():
# Per the original tokenize_slice function, you need to add a
# garbage token before the actual text you want to tokenize so that
# the tokenizer does not add a beginning of sentence token.
prefix = "a "
# Tokenize the combined prefix and text
wordpieces = self._tokenizer.tokenize(prefix + text_to_tokenize)
# Go through each wordpiece in the tokenized wordpieces.
for wordpiece in wordpieces:
# Because we added the garbage prefix before tokenize, we need
# to adjust the idx such that it accounts for this. Therefore we
# subtract the length of the prefix from each token's idx.
if wordpiece.idx is not None:
wordpiece.idx -= len(prefix)
# We do not want the garbage token, so we return all but the first
# token.
return wordpieces[1:]
else:
# Do not need any sort of prefix, so just return all of the tokens.
return self._tokenizer.tokenize(text_to_tokenize)
def tokenize_str(self, text: str) -> List[Token]:
"""
Helper method to tokenize a string.
Adapted from the `transformer_squad.make_instances`
# Parameters
text: `str`
The string to tokenize.
# Returns
`Iterable[Tokens]` The resulting tokens.
"""
# We need to keep track of the current token index so that we can update
# the results from self.tokenize_slice such that they reflect their
# actual position in the string rather than their position in the slice
# passed to tokenize_slice. Also used to construct the slice.
token_index = 0
# Create the output list (can be any iterable) that will store the
# tokens we found.
tokenized_str = []
# Helper function to update the `idx` and add every wordpiece in the
# `tokenized_slice` to the `tokenized_str`.
def add_wordpieces(tokenized_slice: Iterable[Token]) -> None:
for wordpiece in tokenized_slice:
if wordpiece.idx is not None:
wordpiece.idx += token_index
tokenized_str.append(wordpiece)
# Iterate through every character and their respective index in the text
# to create the slices to tokenize.
for i, c in enumerate(text):
# Check if the current character is a space. If it is, we tokenize
# the slice of `text` from `token_index` to `i`.
if c.isspace():
add_wordpieces(self.tokenize_slice(text, token_index, i))
token_index = i + 1
# Add the end slice that is not collected by the for loop.
add_wordpieces(self.tokenize_slice(text, token_index, len(text)))
return tokenized_str
@staticmethod
def get_spans_from_text(text: str, spans: List[Tuple[int, int]]) -> List[str]:
"""
Helper function to get a span from a string
# Parameter
text: `str`
The source string
spans: `List[Tuple[int,int]]`
List of start and end indices for spans.
Assumes that the end index is inclusive. Therefore, for start
index `i` and end index `j`, retrieves the span at `text[i:j+1]`.
# Returns
`List[str]` The extracted string from text.
"""
return [text[start : end + 1] for start, end in spans]
def text_to_instance(
self,
query: str,
tokenized_query: List[Token],
passage: str,
tokenized_passage: List[Token],
answers: List[str],
token_answer_span: Optional[Tuple[int, int]] = None,
additional_metadata: Optional[Dict[str, Any]] = None,
always_add_answer_span: Optional[bool] = False,
) -> Instance:
"""
A lot of this comes directly from the `transformer_squad.text_to_instance`
"""
fields = {}
# Create the query field from the tokenized question and context. Use
# `self._tokenizer.add_special_tokens` function to add the necessary
# special tokens to the query.
query_field = TextField(
self._tokenizer.add_special_tokens(
# The `add_special_tokens` function automatically adds in the
# separation token to mark the separation between the two lists of
# tokens. Therefore, we can create the query field WITH context
# through passing them both as arguments.
tokenized_query,
tokenized_passage,
),
self._token_indexers,
)
# Add the query field to the fields dict that will be outputted as an
# instance. Do it here rather than assign above so that we can use
# attributes from `query_field` rather than continuously indexing
# `fields`.
fields["question_with_context"] = query_field
# Calculate the index that marks the start of the context.
start_of_context = (
+len(tokenized_query)
# Used getattr so I can test without having to load a
# transformer model.
+ len(getattr(self._tokenizer, "sequence_pair_start_tokens", []))
+ len(getattr(self._tokenizer, "sequence_pair_mid_tokens", []))
)
# make the answer span
if token_answer_span is not None:
assert all(i >= 0 for i in token_answer_span)
assert token_answer_span[0] <= token_answer_span[1]
fields["answer_span"] = SpanField(
token_answer_span[0] + start_of_context,
token_answer_span[1] + start_of_context,
query_field,
)
# make the context span, i.e., the span of text from which possible
# answers should be drawn
fields["context_span"] = SpanField(
start_of_context, start_of_context + len(tokenized_passage) - 1, query_field
)
# make the metadata
metadata = {
"question": query,
"question_tokens": tokenized_query,
"context": passage,
"context_tokens": tokenized_passage,
"answers": answers or [],
}
if additional_metadata is not None:
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
def _find_cls_index(self, tokens: List[Token]) -> int:
"""
From transformer_squad
Args:
self:
tokens:
Returns:
"""
return next(i for i, t in enumerate(tokens) if t.text == self._cls_token)
| allennlp-models-main | allennlp_models/rc/dataset_readers/record_reader.py |
from allennlp_models.rc.dataset_readers.drop import DropReader
from allennlp_models.rc.dataset_readers.qangaroo import QangarooReader
from allennlp_models.rc.dataset_readers.quac import QuACReader
from allennlp_models.rc.dataset_readers.squad import SquadReader
from allennlp_models.rc.dataset_readers.transformer_squad import TransformerSquadReader
from allennlp_models.rc.dataset_readers.triviaqa import TriviaQaReader
| allennlp-models-main | allennlp_models/rc/dataset_readers/__init__.py |
import json
import logging
from typing import Any, Dict, List, Tuple, Optional, Iterable
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp_models.rc.dataset_readers import utils
logger = logging.getLogger(__name__)
SQUAD2_NO_ANSWER_TOKEN = "@@<NO_ANSWER>@@"
"""
The default `no_answer_token` for the [`squad2`](#squad2) reader.
"""
@DatasetReader.register("squad")
class SquadReader(DatasetReader):
"""
!!! Note
If you're training on SQuAD v1.1 you should use the [`squad1()`](#squad1) classmethod
to instantiate this reader, and for SQuAD v2.0 you should use the
[`squad2()`](#squad2) classmethod.
Also, for transformer-based models you should be using the
[`TransformerSquadReader`](../transformer_squad#transformersquadreader).
Dataset reader suitable for JSON-formatted SQuAD-like datasets.
It will generate `Instances` with the following fields:
- `question`, a `TextField`,
- `passage`, another `TextField`,
- `span_start` and `span_end`, both `IndexFields` into the `passage` `TextField`,
- and `metadata`, a `MetadataField` that stores the instance's ID, the original passage text,
gold answer strings, and token offsets into the original passage, accessible as `metadata['id']`,
`metadata['original_passage']`, `metadata['answer_texts']` and
`metadata['token_offsets']`, respectively. This is so that we can more easily use the official
SQuAD evaluation scripts to get metrics.
We also support limiting the maximum length for both passage and question. However, some gold
answer spans may exceed the maximum passage length, which will cause error in making instances.
We simply skip these spans to avoid errors. If all of the gold answer spans of an example
are skipped, during training, we will skip this example. During validating or testing, since
we cannot skip examples, we use the last token as the pseudo gold answer span instead. The
computed loss will not be accurate as a result. But this will not affect the answer evaluation,
because we keep all the original gold answer texts.
# Parameters
tokenizer : `Tokenizer`, optional (default=`SpacyTokenizer()`)
We use this `Tokenizer` for both the question and the passage. See :class:`Tokenizer`.
Default is `SpacyTokenizer()`.
token_indexers : `Dict[str, TokenIndexer]`, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
passage_length_limit : `int`, optional (default=`None`)
If specified, we will cut the passage if the length of passage exceeds this limit.
question_length_limit : `int`, optional (default=`None`)
If specified, we will cut the question if the length of question exceeds this limit.
skip_impossible_questions: `bool`, optional (default=`False`)
If this is true, we will skip examples with questions that don't contain the answer spans.
no_answer_token: `Optional[str]`, optional (default=`None`)
A special token to append to each context. If using a SQuAD 2.0-style dataset, this
should be set, otherwise an exception will be raised if an impossible question is
encountered.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
passage_length_limit: int = None,
question_length_limit: int = None,
skip_impossible_questions: bool = False,
no_answer_token: Optional[str] = None,
**kwargs,
) -> None:
if "skip_invalid_examples" in kwargs:
import warnings
warnings.warn(
"'skip_invalid_examples' is deprecated, please use 'skip_impossible_questions' instead",
DeprecationWarning,
)
skip_impossible_questions = kwargs.pop("skip_invalid_examples")
super().__init__(
manual_distributed_sharding=True, manual_multiprocess_sharding=True, **kwargs
)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.passage_length_limit = passage_length_limit
self.question_length_limit = question_length_limit
self.skip_impossible_questions = skip_impossible_questions
self.no_answer_token = no_answer_token
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset_json = json.load(dataset_file)
dataset = dataset_json["data"]
logger.info("Reading the dataset")
for article in dataset:
for paragraph_json in article["paragraphs"]:
paragraph = paragraph_json["context"]
tokenized_paragraph = self._tokenizer.tokenize(paragraph)
for question_answer in self.shard_iterable(paragraph_json["qas"]):
question_text = question_answer["question"].strip().replace("\n", "")
is_impossible = question_answer.get("is_impossible", False)
if is_impossible:
answer_texts: List[str] = []
span_starts: List[int] = []
span_ends: List[int] = []
else:
answer_texts = [answer["text"] for answer in question_answer["answers"]]
span_starts = [
answer["answer_start"] for answer in question_answer["answers"]
]
span_ends = [
start + len(answer) for start, answer in zip(span_starts, answer_texts)
]
additional_metadata = {"id": question_answer.get("id", None)}
instance = self.text_to_instance(
question_text,
paragraph,
is_impossible=is_impossible,
char_spans=zip(span_starts, span_ends),
answer_texts=answer_texts,
passage_tokens=tokenized_paragraph,
additional_metadata=additional_metadata,
)
if instance is not None:
yield instance
def text_to_instance(
self, # type: ignore
question_text: str,
passage_text: str,
is_impossible: bool = None,
char_spans: Iterable[Tuple[int, int]] = None,
answer_texts: List[str] = None,
passage_tokens: List[Token] = None,
additional_metadata: Dict[str, Any] = None,
) -> Optional[Instance]:
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
if self.no_answer_token is not None:
if self.passage_length_limit is not None:
passage_tokens = passage_tokens[: self.passage_length_limit - 1]
passage_tokens = passage_tokens + [
Token(
text=self.no_answer_token,
idx=passage_tokens[-1].idx + len(passage_tokens[-1].text) + 1, # type: ignore
lemma_=self.no_answer_token,
)
]
elif self.passage_length_limit is not None:
passage_tokens = passage_tokens[: self.passage_length_limit]
question_tokens = self._tokenizer.tokenize(question_text)
if self.question_length_limit is not None:
question_tokens = question_tokens[: self.question_length_limit]
if is_impossible:
if self.no_answer_token is None:
raise ValueError(
"This is a SQuAD 2.0 dataset, yet your using a SQuAD reader has 'no_answer_token' "
"set to `None`. "
"Consider specifying the 'no_answer_token' or using the 'squad2' reader instead, which "
f"by default uses '{SQUAD2_NO_ANSWER_TOKEN}' as the 'no_answer_token'."
)
answer_texts = [self.no_answer_token]
token_spans: List[Tuple[int, int]] = [
(len(passage_tokens) - 1, len(passage_tokens) - 1)
]
else:
char_spans = char_spans or []
# We need to convert character indices in `passage_text` to token indices in
# `passage_tokens`, as the latter is what we'll actually use for supervision.
token_spans = []
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
for char_span_start, char_span_end in char_spans:
if char_span_end > passage_offsets[-1][1]:
continue
(span_start, span_end), error = utils.char_span_to_token_span(
passage_offsets, (char_span_start, char_span_end)
)
if error:
logger.debug("Passage: %s", passage_text)
logger.debug("Passage tokens (with no-answer): %s", passage_tokens)
logger.debug("Question text: %s", question_text)
logger.debug("Answer span: (%d, %d)", char_span_start, char_span_end)
logger.debug("Token span: (%d, %d)", span_start, span_end)
logger.debug(
"Tokens in answer: %s",
passage_tokens[span_start : span_end + 1],
)
logger.debug("Answer: %s", passage_text[char_span_start:char_span_end])
token_spans.append((span_start, span_end))
# The original answer is filtered out
if char_spans and not token_spans:
if self.skip_impossible_questions:
return None
else:
if self.no_answer_token is not None:
answer_texts = [self.no_answer_token]
token_spans.append(
(
len(passage_tokens) - 1,
len(passage_tokens) - 1,
)
)
return utils.make_reading_comprehension_instance(
question_tokens,
passage_tokens,
self._token_indexers,
passage_text,
token_spans,
answer_texts,
additional_metadata,
)
@classmethod
def squad1(
cls,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
passage_length_limit: int = None,
question_length_limit: int = None,
skip_impossible_questions: bool = False,
**kwargs,
) -> "SquadReader":
"""
Gives a `SquadReader` suitable for SQuAD v1.1.
"""
return cls(
tokenizer=tokenizer,
token_indexers=token_indexers,
passage_length_limit=passage_length_limit,
question_length_limit=question_length_limit,
skip_impossible_questions=skip_impossible_questions,
no_answer_token=None,
**kwargs,
)
@classmethod
def squad2(
cls,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
passage_length_limit: int = None,
question_length_limit: int = None,
skip_impossible_questions: bool = False,
no_answer_token: str = SQUAD2_NO_ANSWER_TOKEN,
**kwargs,
) -> "SquadReader":
"""
Gives a `SquadReader` suitable for SQuAD v2.0.
"""
return cls(
tokenizer=tokenizer,
token_indexers=token_indexers,
passage_length_limit=passage_length_limit,
question_length_limit=question_length_limit,
skip_impossible_questions=skip_impossible_questions,
no_answer_token=no_answer_token,
**kwargs,
)
DatasetReader.register("squad1", constructor="squad1")(SquadReader)
DatasetReader.register("squad2", constructor="squad2")(SquadReader)
| allennlp-models-main | allennlp_models/rc/dataset_readers/squad.py |
"""
Utilities for reading comprehension dataset readers.
"""
from collections import Counter, defaultdict
import logging
import string
from typing import Any, Dict, List, Tuple, Optional, Union
from allennlp.data.fields import (
Field,
TextField,
IndexField,
MetadataField,
LabelField,
ListField,
SequenceLabelField,
)
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
# These are tokens and characters that are stripped by the standard SQuAD and TriviaQA evaluation
# scripts.
IGNORED_TOKENS = {"a", "an", "the"}
STRIPPED_CHARACTERS = string.punctuation + "".join(["β", "β", "Β΄", "`", "_"])
def normalize_text(text: str) -> str:
"""
Performs a normalization that is very similar to that done by the normalization functions in
SQuAD and TriviaQA.
This involves splitting and rejoining the text, and could be a somewhat expensive operation.
"""
return " ".join(
[
token
for token in text.lower().strip(STRIPPED_CHARACTERS).split()
if token not in IGNORED_TOKENS
]
)
def char_span_to_token_span(
token_offsets: List[Optional[Tuple[int, int]]], character_span: Tuple[int, int]
) -> Tuple[Tuple[int, int], bool]:
"""
Converts a character span from a passage into the corresponding token span in the tokenized
version of the passage. If you pass in a character span that does not correspond to complete
tokens in the tokenized version, we'll do our best, but the behavior is officially undefined.
We return an error flag in this case, and have some debug logging so you can figure out the
cause of this issue (in SQuAD, these are mostly either tokenization problems or annotation
problems; there's a fair amount of both).
The basic outline of this method is to find the token span that has the same offsets as the
input character span. If the tokenizer tokenized the passage correctly and has matching
offsets, this is easy. We try to be a little smart about cases where they don't match exactly,
but mostly just find the closest thing we can.
The returned ``(begin, end)`` indices are `inclusive` for both ``begin`` and ``end``.
So, for example, ``(2, 2)`` is the one word span beginning at token index 2, ``(3, 4)`` is the
two-word span beginning at token index 3, and so on.
Returns
-------
token_span : ``Tuple[int, int]``
`Inclusive` span start and end token indices that match as closely as possible to the input
character spans.
error : ``bool``
Whether there was an error while matching the token spans exactly. If this is ``True``, it
means there was an error in either the tokenization or the annotated character span. If this
is ``False``, it means that we found tokens that match the character span exactly.
"""
error = False
start_index = 0
while start_index < len(token_offsets) and (
token_offsets[start_index] is None or token_offsets[start_index][0] < character_span[0]
):
start_index += 1
# If we overshot and the token prior to start_index ends after the first character, back up.
if (
start_index > 0
and token_offsets[start_index - 1] is not None
and token_offsets[start_index - 1][1] > character_span[0]
) or (
start_index <= len(token_offsets)
and token_offsets[start_index] is not None
and token_offsets[start_index][0] > character_span[0]
):
start_index -= 1
if start_index >= len(token_offsets):
raise ValueError("Could not find the start token given the offsets.")
if token_offsets[start_index] is None or token_offsets[start_index][0] != character_span[0]:
error = True
end_index = start_index
while end_index < len(token_offsets) and (
token_offsets[end_index] is None or token_offsets[end_index][1] < character_span[1]
):
end_index += 1
if end_index == len(token_offsets):
# We want a character span that goes beyond the last token. Let's see if this is salvageable.
# We consider this salvageable if the span we're looking for starts before the last token ends.
# In other words, we don't salvage if the whole span comes after the tokens end.
if character_span[0] < token_offsets[-1][1]:
# We also want to make sure we aren't way off. We need to be within 8 characters to salvage.
if character_span[1] - 8 < token_offsets[-1][1]:
end_index -= 1
if end_index >= len(token_offsets):
raise ValueError("Character span %r outside the range of the given tokens.")
if end_index == start_index and token_offsets[end_index][1] > character_span[1]:
# Looks like there was a token that should have been split, like "1854-1855", where the
# answer is "1854". We can't do much in this case, except keep the answer as the whole
# token.
logger.debug("Bad tokenization - end offset doesn't match")
elif token_offsets[end_index][1] > character_span[1]:
# This is a case where the given answer span is more than one token, and the last token is
# cut off for some reason, like "split with Luckett and Rober", when the original passage
# said "split with Luckett and Roberson". In this case, we'll just keep the end index
# where it is, and assume the intent was to mark the whole token.
logger.debug("Bad labelling or tokenization - end offset doesn't match")
if token_offsets[end_index][1] != character_span[1]:
error = True
return (start_index, end_index), error
def find_valid_answer_spans(
passage_tokens: List[Token], answer_texts: List[str]
) -> List[Tuple[int, int]]:
"""
Finds a list of token spans in ``passage_tokens`` that match the given ``answer_texts``. This
tries to find all spans that would evaluate to correct given the SQuAD and TriviaQA official
evaluation scripts, which do some normalization of the input text.
Note that this could return duplicate spans! The caller is expected to be able to handle
possible duplicates (as already happens in the SQuAD dev set, for instance).
"""
normalized_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens]
# Because there could be many `answer_texts`, we'll do the most expensive pre-processing
# step once. This gives us a map from tokens to the position in the passage they appear.
word_positions: Dict[str, List[int]] = defaultdict(list)
for i, token in enumerate(normalized_tokens):
word_positions[token].append(i)
spans = []
for answer_text in answer_texts:
# For each answer, we'll first find all valid start positions in the passage. Then
# we'll grow each span to the same length as the number of answer tokens, and see if we
# have a match. We're a little tricky as we grow the span, skipping words that are
# already pruned from the normalized answer text, and stopping early if we don't match.
answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split()
num_answer_tokens = len(answer_tokens)
for span_start in word_positions[answer_tokens[0]]:
span_end = span_start # span_end is _inclusive_
answer_index = 1
while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens):
token = normalized_tokens[span_end + 1]
if answer_tokens[answer_index] == token:
answer_index += 1
span_end += 1
elif token in IGNORED_TOKENS:
span_end += 1
else:
break
if num_answer_tokens == answer_index:
spans.append((span_start, span_end))
return spans
def make_reading_comprehension_instance(
question_tokens: List[Token],
passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
token_spans: List[Tuple[int, int]] = None,
answer_texts: List[str] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
"""
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
# Parameters
question_tokens : `List[Token]`
An already-tokenized question.
passage_tokens : `List[Token]`
An already-tokenized passage that contains the answer to the given question.
token_indexers : `Dict[str, TokenIndexer]`
Determines how the question and passage `TextFields` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : `str`
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_spans : `List[Tuple[int, int]]`, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list because there might be several possible correct answer spans in the passage.
Currently, we just select the most frequent span in this list (i.e., SQuAD has multiple
annotations on the dev set; this will select the span that the most annotators gave as
correct).
answer_texts : `List[str]`, optional
All valid answer strings for the given question. In SQuAD, e.g., the training set has
exactly one answer per question, but the dev and test sets have several. TriviaQA has many
possible answers, which are the aliases for the known correct entity. This is put into the
metadata for use with official evaluation scripts, but not used anywhere else.
additional_metadata : `Dict[str, Any]`, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
"""
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = TextField(question_tokens, token_indexers)
metadata = {
"original_passage": passage_text,
"token_offsets": passage_offsets,
"question_tokens": [token.text for token in question_tokens],
"passage_tokens": [token.text for token in passage_tokens],
}
if answer_texts:
metadata["answer_texts"] = answer_texts
if token_spans:
# There may be multiple answer annotations, so we pick the one that occurs the most. This
# only matters on the SQuAD dev set, and it means our computed metrics ("start_acc",
# "end_acc", and "span_acc") aren't quite the same as the official metrics, which look at
# all of the annotations. This is why we have a separate official SQuAD metric calculation
# (the "em" and "f1" metrics use the official script).
candidate_answers: Counter = Counter()
for span_start, span_end in token_spans:
candidate_answers[(span_start, span_end)] += 1
span_start, span_end = candidate_answers.most_common(1)[0][0]
fields["span_start"] = IndexField(span_start, passage_field)
fields["span_end"] = IndexField(span_end, passage_field)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
def make_reading_comprehension_instance_quac(
question_list_tokens: List[List[Token]],
passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
token_span_lists: List[List[Tuple[int, int]]] = None,
yesno_list: Union[List[int], List[str]] = None,
followup_list: Union[List[int], List[str]] = None,
additional_metadata: Dict[str, Any] = None,
num_context_answers: int = 0,
) -> Instance:
"""
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
# Parameters
question_list_tokens : `List[List[Token]]`
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : `List[Token]`
An already-tokenized passage that contains the answer to the given question.
token_indexers : `Dict[str, TokenIndexer]`
Determines how the question and passage `TextFields` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : `str`
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : `List[List[Tuple[int, int]]]`, optional
Indices into `passage_tokens` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : `List[int]`
List of the affirmation bit for each question answer pairs.
followup_list : `List[int]`
List of the continuation bit for each question answer pairs.
num_context_answers : `int`, optional
How many answers to encode into the passage.
additional_metadata : `Dict[str, Any]`, optional
The constructed `metadata` field will by default contain `original_passage`,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
"""
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = ListField(
[TextField(q_tokens, token_indexers) for q_tokens in question_list_tokens]
)
metadata = {
"original_passage": passage_text,
"token_offsets": passage_offsets,
"question_tokens": [
[token.text for token in question_tokens] for question_tokens in question_list_tokens
],
"passage_tokens": [token.text for token in passage_tokens],
}
p1_answer_marker_list: List[Field] = []
p2_answer_marker_list: List[Field] = []
p3_answer_marker_list: List[Field] = []
def get_tag(i, i_name):
# Generate a tag to mark previous answer span in the passage.
return "<{0:d}_{1:s}>".format(i, i_name)
def mark_tag(span_start, span_end, passage_tags, prev_answer_distance):
try:
assert span_start >= 0
assert span_end >= 0
except: # noqa
raise ValueError(
"Previous {0:d}th answer span should have been updated!".format(
prev_answer_distance
)
)
# Modify "tags" to mark previous answer span.
if span_start == span_end:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "")
else:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "start")
passage_tags[prev_answer_distance][span_end] = get_tag(prev_answer_distance, "end")
for passage_index in range(span_start + 1, span_end):
passage_tags[prev_answer_distance][passage_index] = get_tag(
prev_answer_distance, "in"
)
if token_span_lists:
span_start_list: List[Field] = []
span_end_list: List[Field] = []
p1_span_start, p1_span_end, p2_span_start = -1, -1, -1
p2_span_end, p3_span_start, p3_span_end = -1, -1, -1
# Looping each <<answers>>.
for question_index, answer_span_lists in enumerate(token_span_lists):
span_start, span_end = answer_span_lists[-1] # Last one is the original answer
span_start_list.append(IndexField(span_start, passage_field))
span_end_list.append(IndexField(span_end, passage_field))
prev_answer_marker_lists = [
["O"] * len(passage_tokens),
["O"] * len(passage_tokens),
["O"] * len(passage_tokens),
["O"] * len(passage_tokens),
]
if question_index > 0 and num_context_answers > 0:
mark_tag(p1_span_start, p1_span_end, prev_answer_marker_lists, 1)
if question_index > 1 and num_context_answers > 1:
mark_tag(p2_span_start, p2_span_end, prev_answer_marker_lists, 2)
if question_index > 2 and num_context_answers > 2:
mark_tag(p3_span_start, p3_span_end, prev_answer_marker_lists, 3)
p3_span_start = p2_span_start
p3_span_end = p2_span_end
p2_span_start = p1_span_start
p2_span_end = p1_span_end
p1_span_start = span_start
p1_span_end = span_end
if num_context_answers > 2:
p3_answer_marker_list.append(
SequenceLabelField(
prev_answer_marker_lists[3], passage_field, label_namespace="answer_tags"
)
)
if num_context_answers > 1:
p2_answer_marker_list.append(
SequenceLabelField(
prev_answer_marker_lists[2], passage_field, label_namespace="answer_tags"
)
)
if num_context_answers > 0:
p1_answer_marker_list.append(
SequenceLabelField(
prev_answer_marker_lists[1], passage_field, label_namespace="answer_tags"
)
)
fields["span_start"] = ListField(span_start_list)
fields["span_end"] = ListField(span_end_list)
if num_context_answers > 0:
fields["p1_answer_marker"] = ListField(p1_answer_marker_list)
if num_context_answers > 1:
fields["p2_answer_marker"] = ListField(p2_answer_marker_list)
if num_context_answers > 2:
fields["p3_answer_marker"] = ListField(p3_answer_marker_list)
fields["yesno_list"] = ListField(
[LabelField(yesno, label_namespace="yesno_labels") for yesno in yesno_list]
)
fields["followup_list"] = ListField(
[LabelField(followup, label_namespace="followup_labels") for followup in followup_list]
)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
def handle_cannot(reference_answers: List[str]):
"""
Process a list of reference answers.
If equal or more than half of the reference answers are "CANNOTANSWER", take it as gold.
Otherwise, return answers that are not "CANNOTANSWER".
"""
num_cannot = 0
num_spans = 0
for ref in reference_answers:
if ref == "CANNOTANSWER":
num_cannot += 1
else:
num_spans += 1
if num_cannot >= num_spans:
reference_answers = ["CANNOTANSWER"]
else:
reference_answers = [x for x in reference_answers if x != "CANNOTANSWER"]
return reference_answers
def split_token_by_delimiter(token: Token, delimiter: str) -> List[Token]:
split_tokens = []
char_offset = token.idx
for sub_str in token.text.split(delimiter):
if sub_str:
split_tokens.append(Token(text=sub_str, idx=char_offset))
char_offset += len(sub_str)
split_tokens.append(Token(text=delimiter, idx=char_offset))
char_offset += len(delimiter)
if split_tokens:
split_tokens.pop(-1)
char_offset -= len(delimiter)
return split_tokens
else:
return [token]
def split_tokens_by_hyphen(tokens: List[Token]) -> List[Token]:
hyphens = ["-", "β", "~"]
new_tokens: List[Token] = []
for token in tokens:
if any(hyphen in token.text for hyphen in hyphens):
unsplit_tokens = [token]
split_tokens: List[Token] = []
for hyphen in hyphens:
for unsplit_token in unsplit_tokens:
if hyphen in token.text:
split_tokens += split_token_by_delimiter(unsplit_token, hyphen)
else:
split_tokens.append(unsplit_token)
unsplit_tokens, split_tokens = split_tokens, []
new_tokens += unsplit_tokens
else:
new_tokens.append(token)
return new_tokens
| allennlp-models-main | allennlp_models/rc/dataset_readers/utils.py |
import json
import logging
from typing import Any, Dict, List, Tuple, Union
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp_models.rc.dataset_readers import utils
logger = logging.getLogger(__name__)
@DatasetReader.register("quac")
class QuACReader(DatasetReader):
"""
Reads a JSON-formatted Question Answering in Context (QuAC) data file
and returns a ``Dataset`` where the ``Instances`` have four fields: ``question``, a ``ListField``,
``passage``, another ``TextField``, and ``span_start`` and ``span_end``, both ``ListField`` composed of
IndexFields`` into the ``passage`` ``TextField``.
Two ``ListField``, composed of ``LabelField``, ``yesno_list`` and ``followup_list`` is added.
We also add a
``MetadataField`` that stores the instance's ID, the original passage text, gold answer strings,
and token offsets into the original passage, accessible as ``metadata['id']``,
``metadata['original_passage']``, ``metadata['answer_text_lists'] and ``metadata['token_offsets']``.
# Parameters
tokenizer : `Tokenizer`, optional (default=`SpacyTokenizer()`)
We use this ``Tokenizer`` for both the question and the passage. See :class:`Tokenizer`.
Default is `SpacyTokenizer()`.
token_indexers : `Dict[str, TokenIndexer]`, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
num_context_answers : `int`, optional
How many previous question answers to consider in a context.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
num_context_answers: int = 0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._num_context_answers = num_context_answers
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset_json = json.load(dataset_file)
dataset = dataset_json["data"]
logger.info("Reading the dataset")
for article in dataset:
for paragraph_json in article["paragraphs"]:
paragraph = paragraph_json["context"]
tokenized_paragraph = self._tokenizer.tokenize(paragraph)
qas = paragraph_json["qas"]
metadata = {}
metadata["instance_id"] = [qa["id"] for qa in qas]
question_text_list = [qa["question"].strip().replace("\n", "") for qa in qas]
answer_texts_list = [[answer["text"] for answer in qa["answers"]] for qa in qas]
metadata["question"] = question_text_list
metadata["answer_texts_list"] = answer_texts_list
span_starts_list = [
[answer["answer_start"] for answer in qa["answers"]] for qa in qas
]
span_ends_list = []
for answer_starts, an_list in zip(span_starts_list, answer_texts_list):
span_ends = [
start + len(answer) for start, answer in zip(answer_starts, an_list)
]
span_ends_list.append(span_ends)
yesno_list = [str(qa["yesno"]) for qa in qas]
followup_list = [str(qa["followup"]) for qa in qas]
instance = self.text_to_instance(
question_text_list,
paragraph,
span_starts_list,
span_ends_list,
tokenized_paragraph,
yesno_list,
followup_list,
metadata,
)
yield instance
def text_to_instance(
self, # type: ignore
question_text_list: List[str],
passage_text: str,
start_span_list: List[List[int]] = None,
end_span_list: List[List[int]] = None,
passage_tokens: List[Token] = None,
yesno_list: Union[List[int], List[str]] = None,
followup_list: Union[List[int], List[str]] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
# We need to convert character indices in `passage_text` to token indices in
# `passage_tokens`, as the latter is what we'll actually use for supervision.
answer_token_span_list = []
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
for start_list, end_list in zip(start_span_list, end_span_list):
token_spans: List[Tuple[int, int]] = []
for char_span_start, char_span_end in zip(start_list, end_list):
(span_start, span_end), error = utils.char_span_to_token_span(
passage_offsets, (char_span_start, char_span_end)
)
if error:
logger.debug("Passage: %s", passage_text)
logger.debug("Passage tokens: %s", passage_tokens)
logger.debug("Answer span: (%d, %d)", char_span_start, char_span_end)
logger.debug("Token span: (%d, %d)", span_start, span_end)
logger.debug("Tokens in answer: %s", passage_tokens[span_start : span_end + 1])
logger.debug("Answer: %s", passage_text[char_span_start:char_span_end])
token_spans.append((span_start, span_end))
answer_token_span_list.append(token_spans)
question_list_tokens = [self._tokenizer.tokenize(q) for q in question_text_list]
# Map answer texts to "CANNOTANSWER" if more than half of them marked as so.
additional_metadata["answer_texts_list"] = [
utils.handle_cannot(ans_list) for ans_list in additional_metadata["answer_texts_list"]
]
return utils.make_reading_comprehension_instance_quac(
question_list_tokens,
passage_tokens,
self._token_indexers,
passage_text,
answer_token_span_list,
yesno_list,
followup_list,
additional_metadata,
self._num_context_answers,
)
| allennlp-models-main | allennlp_models/rc/dataset_readers/quac.py |
import itertools
import json
import logging
import string
from collections import defaultdict
from typing import Dict, List, Union, Tuple, Any
from word2number.w2n import word_to_num
from allennlp.common.file_utils import cached_path
from allennlp.data.fields import (
Field,
TextField,
MetadataField,
LabelField,
ListField,
SequenceLabelField,
SpanField,
IndexField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp_models.rc.dataset_readers.utils import (
IGNORED_TOKENS,
STRIPPED_CHARACTERS,
make_reading_comprehension_instance,
split_tokens_by_hyphen,
)
logger = logging.getLogger(__name__)
WORD_NUMBER_MAP = {
"zero": 0,
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7,
"eight": 8,
"nine": 9,
"ten": 10,
"eleven": 11,
"twelve": 12,
"thirteen": 13,
"fourteen": 14,
"fifteen": 15,
"sixteen": 16,
"seventeen": 17,
"eighteen": 18,
"nineteen": 19,
}
@DatasetReader.register("drop")
class DropReader(DatasetReader):
"""
Reads a JSON-formatted DROP dataset file and returns instances in a few different possible
formats. The input format is complicated; see the test fixture for an example of what it looks
like. The output formats all contain a question ``TextField``, a passage ``TextField``, and
some kind of answer representation. Because DROP has instances with several different kinds of
answers, this dataset reader allows you to filter out questions that do not have answers of a
particular type (e.g., remove questions that have numbers as answers, if you model can only
give passage spans as answers). We typically return all possible ways of arriving at a given
answer string, and expect models to marginalize over these possibilities.
# Parameters
tokenizer : `Tokenizer`, optional (default=`SpacyTokenizer()`)
We use this `Tokenizer` for both the question and the passage. See :class:`Tokenizer`.
Default is `SpacyTokenizer()`.
token_indexers : `Dict[str, TokenIndexer]`, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
passage_length_limit : `int`, optional (default=`None`)
If specified, we will cut the passage if the length of passage exceeds this limit.
question_length_limit : `int`, optional (default=`None`)
If specified, we will cut the question if the length of passage exceeds this limit.
skip_when_all_empty: `List[str]`, optional (default=`None`)
In some cases such as preparing for training examples, you may want to skip some examples
when there are no gold labels. You can specify on what condition should the examples be
skipped. Currently, you can put "passage_span", "question_span", "addition_subtraction",
or "counting" in this list, to tell the reader skip when there are no such label found.
If not specified, we will keep all the examples.
instance_format: `str`, optional (default=`"drop"`)
We try to be generous in providing a few different formats for the instances in DROP,
in terms of the `Fields` that we return for each `Instance`, to allow for several
different kinds of models. "drop" format will do processing to detect numbers and
various ways those numbers can be arrived at from the passage, and return `Fields`
related to that. "bert" format only allows passage spans as answers, and provides a
"question_and_passage" field with the two pieces of text joined as BERT expects.
"squad" format provides the same fields that our BiDAF and other SQuAD models expect.
relaxed_span_match_for_finding_labels : `bool`, optional (default=`True`)
DROP dataset contains multi-span answers, and the date-type answers are usually hard to
find exact span matches for, also. In order to use as many examples as possible
to train the model, we may not want a strict match for such cases when finding the gold
span labels. If this argument is true, we will treat every span in the multi-span
answers as correct, and every token in the date answer as correct, too. Because models
trained on DROP typically marginalize over all possible answer positions, this is just
being a little more generous in what is being marginalized. Note that this will not
affect evaluation.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
passage_length_limit: int = None,
question_length_limit: int = None,
skip_when_all_empty: List[str] = None,
instance_format: str = "drop",
relaxed_span_match_for_finding_labels: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.passage_length_limit = passage_length_limit
self.question_length_limit = question_length_limit
self.skip_when_all_empty = skip_when_all_empty if skip_when_all_empty is not None else []
for item in self.skip_when_all_empty:
assert item in [
"passage_span",
"question_span",
"addition_subtraction",
"counting",
], f"Unsupported skip type: {item}"
self.instance_format = instance_format
self.relaxed_span_match_for_finding_labels = relaxed_span_match_for_finding_labels
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path, extract_archive=True)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = json.load(dataset_file)
logger.info("Reading the dataset")
kept_count, skip_count = 0, 0
for passage_id, passage_info in dataset.items():
passage_text = passage_info["passage"]
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
for question_answer in passage_info["qa_pairs"]:
question_id = question_answer["query_id"]
question_text = question_answer["question"].strip()
answer_annotations = []
if "answer" in question_answer:
answer_annotations.append(question_answer["answer"])
if "validated_answers" in question_answer:
answer_annotations += question_answer["validated_answers"]
instance = self.text_to_instance(
question_text,
passage_text,
question_id,
passage_id,
answer_annotations,
passage_tokens,
)
if instance is not None:
kept_count += 1
yield instance
else:
skip_count += 1
logger.info(f"Skipped {skip_count} questions, kept {kept_count} questions.")
def text_to_instance(
self, # type: ignore
question_text: str,
passage_text: str,
question_id: str = None,
passage_id: str = None,
answer_annotations: List[Dict] = None,
passage_tokens: List[Token] = None,
) -> Union[Instance, None]:
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
question_tokens = self._tokenizer.tokenize(question_text)
question_tokens = split_tokens_by_hyphen(question_tokens)
if self.passage_length_limit is not None:
passage_tokens = passage_tokens[: self.passage_length_limit]
if self.question_length_limit is not None:
question_tokens = question_tokens[: self.question_length_limit]
answer_type: str = None
answer_texts: List[str] = []
if answer_annotations:
# Currently we only use the first annotated answer here, but actually this doesn't affect
# the training, because we only have one annotation for the train set.
answer_type, answer_texts = self.extract_answer_info_from_annotation(
answer_annotations[0]
)
# Tokenize the answer text in order to find the matched span based on token
tokenized_answer_texts = []
for answer_text in answer_texts:
answer_tokens = self._tokenizer.tokenize(answer_text)
answer_tokens = split_tokens_by_hyphen(answer_tokens)
tokenized_answer_texts.append(" ".join(token.text for token in answer_tokens))
if self.instance_format == "squad":
valid_passage_spans = (
self.find_valid_spans(passage_tokens, tokenized_answer_texts)
if tokenized_answer_texts
else []
)
if not valid_passage_spans:
if "passage_span" in self.skip_when_all_empty:
return None
else:
valid_passage_spans.append((len(passage_tokens) - 1, len(passage_tokens) - 1))
return make_reading_comprehension_instance(
question_tokens,
passage_tokens,
self._token_indexers,
passage_text,
valid_passage_spans,
# this `answer_texts` will not be used for evaluation
answer_texts,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"passage_id": passage_id,
"question_id": question_id,
"valid_passage_spans": valid_passage_spans,
"answer_annotations": answer_annotations,
},
)
elif self.instance_format == "bert":
question_concat_passage_tokens = question_tokens + [Token("[SEP]")] + passage_tokens
valid_passage_spans = []
for span in self.find_valid_spans(passage_tokens, tokenized_answer_texts):
# This span is for `question + [SEP] + passage`.
valid_passage_spans.append(
(span[0] + len(question_tokens) + 1, span[1] + len(question_tokens) + 1)
)
if not valid_passage_spans:
if "passage_span" in self.skip_when_all_empty:
return None
else:
valid_passage_spans.append(
(
len(question_concat_passage_tokens) - 1,
len(question_concat_passage_tokens) - 1,
)
)
answer_info = {
"answer_texts": answer_texts, # this `answer_texts` will not be used for evaluation
"answer_passage_spans": valid_passage_spans,
}
return self.make_bert_drop_instance(
question_tokens,
passage_tokens,
question_concat_passage_tokens,
self._token_indexers,
passage_text,
answer_info,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"passage_id": passage_id,
"question_id": question_id,
"answer_annotations": answer_annotations,
},
)
elif self.instance_format == "drop":
numbers_in_passage = []
number_indices = []
for token_index, token in enumerate(passage_tokens):
number = self.convert_word_to_number(token.text)
if number is not None:
numbers_in_passage.append(number)
number_indices.append(token_index)
# hack to guarantee minimal length of padded number
numbers_in_passage.append(0)
number_indices.append(-1)
numbers_as_tokens = [Token(str(number)) for number in numbers_in_passage]
valid_passage_spans = (
self.find_valid_spans(passage_tokens, tokenized_answer_texts)
if tokenized_answer_texts
else []
)
valid_question_spans = (
self.find_valid_spans(question_tokens, tokenized_answer_texts)
if tokenized_answer_texts
else []
)
target_numbers = []
# `answer_texts` is a list of valid answers.
for answer_text in answer_texts:
number = self.convert_word_to_number(answer_text)
if number is not None:
target_numbers.append(number)
valid_signs_for_add_sub_expressions: List[List[int]] = []
valid_counts: List[int] = []
if answer_type in ["number", "date"]:
valid_signs_for_add_sub_expressions = self.find_valid_add_sub_expressions(
numbers_in_passage, target_numbers
)
if answer_type in ["number"]:
# Currently we only support count number 0 ~ 9
numbers_for_count = list(range(10))
valid_counts = self.find_valid_counts(numbers_for_count, target_numbers)
type_to_answer_map = {
"passage_span": valid_passage_spans,
"question_span": valid_question_spans,
"addition_subtraction": valid_signs_for_add_sub_expressions,
"counting": valid_counts,
}
if self.skip_when_all_empty and not any(
type_to_answer_map[skip_type] for skip_type in self.skip_when_all_empty
):
return None
answer_info = {
"answer_texts": answer_texts, # this `answer_texts` will not be used for evaluation
"answer_passage_spans": valid_passage_spans,
"answer_question_spans": valid_question_spans,
"signs_for_add_sub_expressions": valid_signs_for_add_sub_expressions,
"counts": valid_counts,
}
return self.make_marginal_drop_instance(
question_tokens,
passage_tokens,
numbers_as_tokens,
number_indices,
self._token_indexers,
passage_text,
answer_info,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"original_numbers": numbers_in_passage,
"passage_id": passage_id,
"question_id": question_id,
"answer_info": answer_info,
"answer_annotations": answer_annotations,
},
)
else:
raise ValueError(
f'Expect the instance format to be "drop", "squad" or "bert", '
f"but got {self.instance_format}"
)
@staticmethod
def make_marginal_drop_instance(
question_tokens: List[Token],
passage_tokens: List[Token],
number_tokens: List[Token],
number_indices: List[int],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
answer_info: Dict[str, Any] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
question_offsets = [(token.idx, token.idx + len(token.text)) for token in question_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
question_field = TextField(question_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = question_field
number_index_fields: List[Field] = [
IndexField(index, passage_field) for index in number_indices
]
fields["number_indices"] = ListField(number_index_fields)
# This field is actually not required in the model,
# it is used to create the `answer_as_plus_minus_combinations` field, which is a `SequenceLabelField`.
# We cannot use `number_indices` field for creating that, because the `ListField` will not be empty
# when we want to create a new empty field. That will lead to error.
numbers_in_passage_field = TextField(number_tokens, token_indexers)
metadata = {
"original_passage": passage_text,
"passage_token_offsets": passage_offsets,
"question_token_offsets": question_offsets,
"question_tokens": [token.text for token in question_tokens],
"passage_tokens": [token.text for token in passage_tokens],
"number_tokens": [token.text for token in number_tokens],
"number_indices": number_indices,
}
if answer_info:
metadata["answer_texts"] = answer_info["answer_texts"]
passage_span_fields: List[Field] = [
SpanField(span[0], span[1], passage_field)
for span in answer_info["answer_passage_spans"]
]
if not passage_span_fields:
passage_span_fields.append(SpanField(-1, -1, passage_field))
fields["answer_as_passage_spans"] = ListField(passage_span_fields)
question_span_fields: List[Field] = [
SpanField(span[0], span[1], question_field)
for span in answer_info["answer_question_spans"]
]
if not question_span_fields:
question_span_fields.append(SpanField(-1, -1, question_field))
fields["answer_as_question_spans"] = ListField(question_span_fields)
add_sub_signs_field: List[Field] = []
for signs_for_one_add_sub_expression in answer_info["signs_for_add_sub_expressions"]:
add_sub_signs_field.append(
SequenceLabelField(signs_for_one_add_sub_expression, numbers_in_passage_field)
)
if not add_sub_signs_field:
add_sub_signs_field.append(
SequenceLabelField([0] * len(number_tokens), numbers_in_passage_field)
)
fields["answer_as_add_sub_expressions"] = ListField(add_sub_signs_field)
count_fields: List[Field] = [
LabelField(count_label, skip_indexing=True) for count_label in answer_info["counts"]
]
if not count_fields:
count_fields.append(LabelField(-1, skip_indexing=True))
fields["answer_as_counts"] = ListField(count_fields)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@staticmethod
def make_bert_drop_instance(
question_tokens: List[Token],
passage_tokens: List[Token],
question_concat_passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
answer_info: Dict[str, Any] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
question_field = TextField(question_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = question_field
question_and_passage_field = TextField(question_concat_passage_tokens, token_indexers)
fields["question_and_passage"] = question_and_passage_field
metadata = {
"original_passage": passage_text,
"passage_token_offsets": passage_offsets,
"question_tokens": [token.text for token in question_tokens],
"passage_tokens": [token.text for token in passage_tokens],
}
if answer_info:
metadata["answer_texts"] = answer_info["answer_texts"]
passage_span_fields: List[Field] = [
SpanField(span[0], span[1], question_and_passage_field)
for span in answer_info["answer_passage_spans"]
]
if not passage_span_fields:
passage_span_fields.append(SpanField(-1, -1, question_and_passage_field))
fields["answer_as_passage_spans"] = ListField(passage_span_fields)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@staticmethod
def extract_answer_info_from_annotation(
answer_annotation: Dict[str, Any]
) -> Tuple[str, List[str]]:
answer_type = None
if answer_annotation["spans"]:
answer_type = "spans"
elif answer_annotation["number"]:
answer_type = "number"
elif any(answer_annotation["date"].values()):
answer_type = "date"
answer_content = answer_annotation[answer_type] if answer_type is not None else None
answer_texts: List[str] = []
if answer_type is None: # No answer
pass
elif answer_type == "spans":
# answer_content is a list of string in this case
answer_texts = answer_content
elif answer_type == "date":
# answer_content is a dict with "month", "day", "year" as the keys
date_tokens = [
answer_content[key]
for key in ["month", "day", "year"]
if key in answer_content and answer_content[key]
]
answer_texts = date_tokens
elif answer_type == "number":
# answer_content is a string of number
answer_texts = [answer_content]
return answer_type, answer_texts
@staticmethod
def convert_word_to_number(word: str, try_to_include_more_numbers=False):
"""
Currently we only support limited types of conversion.
"""
if try_to_include_more_numbers:
# strip all punctuations from the sides of the word, except for the negative sign
punctruations = string.punctuation.replace("-", "")
word = word.strip(punctruations)
# some words may contain the comma as deliminator
word = word.replace(",", "")
# word2num will convert hundred, thousand ... to number, but we skip it.
if word in ["hundred", "thousand", "million", "billion", "trillion"]:
return None
try:
number = word_to_num(word)
except ValueError:
try:
number = int(word)
except ValueError:
try:
number = float(word)
except ValueError:
number = None
return number
else:
no_comma_word = word.replace(",", "")
if no_comma_word in WORD_NUMBER_MAP:
number = WORD_NUMBER_MAP[no_comma_word]
else:
try:
number = int(no_comma_word)
except ValueError:
number = None
return number
@staticmethod
def find_valid_spans(
passage_tokens: List[Token], answer_texts: List[str]
) -> List[Tuple[int, int]]:
normalized_tokens = [
token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens
]
word_positions: Dict[str, List[int]] = defaultdict(list)
for i, token in enumerate(normalized_tokens):
word_positions[token].append(i)
spans = []
for answer_text in answer_texts:
answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split()
num_answer_tokens = len(answer_tokens)
if answer_tokens[0] not in word_positions:
continue
for span_start in word_positions[answer_tokens[0]]:
span_end = span_start # span_end is _inclusive_
answer_index = 1
while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens):
token = normalized_tokens[span_end + 1]
if answer_tokens[answer_index].strip(STRIPPED_CHARACTERS) == token:
answer_index += 1
span_end += 1
elif token in IGNORED_TOKENS:
span_end += 1
else:
break
if num_answer_tokens == answer_index:
spans.append((span_start, span_end))
return spans
@staticmethod
def find_valid_add_sub_expressions(
numbers: List[int], targets: List[int], max_number_of_numbers_to_consider: int = 2
) -> List[List[int]]:
valid_signs_for_add_sub_expressions = []
# TODO: Try smaller numbers?
for number_of_numbers_to_consider in range(2, max_number_of_numbers_to_consider + 1):
possible_signs = list(itertools.product((-1, 1), repeat=number_of_numbers_to_consider))
for number_combination in itertools.combinations(
enumerate(numbers), number_of_numbers_to_consider
):
indices = [it[0] for it in number_combination]
values = [it[1] for it in number_combination]
for signs in possible_signs:
eval_value = sum(sign * value for sign, value in zip(signs, values))
if eval_value in targets:
labels_for_numbers = [0] * len(numbers) # 0 represents ``not included''.
for index, sign in zip(indices, signs):
labels_for_numbers[index] = (
1 if sign == 1 else 2
) # 1 for positive, 2 for negative
valid_signs_for_add_sub_expressions.append(labels_for_numbers)
return valid_signs_for_add_sub_expressions
@staticmethod
def find_valid_counts(count_numbers: List[int], targets: List[int]) -> List[int]:
valid_indices = []
for index, number in enumerate(count_numbers):
if number in targets:
valid_indices.append(index)
return valid_indices
| allennlp-models-main | allennlp_models/rc/dataset_readers/drop.py |
import json
import logging
import os
import tarfile
from typing import Dict, List, Tuple
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp_models.rc.dataset_readers import utils
logger = logging.getLogger(__name__)
@DatasetReader.register("triviaqa")
class TriviaQaReader(DatasetReader):
"""
Reads the TriviaQA dataset into a ``Dataset`` containing ``Instances`` with four fields:
``question`` (a ``TextField``), ``passage`` (another ``TextField``), ``span_start``, and
``span_end`` (both ``IndexFields``).
TriviaQA is split up into several JSON files defining the questions, and a lot of text files
containing crawled web documents. We read these from a gzipped tarball, to avoid having to
have millions of individual files on a filesystem.
Because we need to read both train and validation files from the same tarball, we take the
tarball itself as a constructor parameter, and take the question file as the argument to
``read``. This means that you should give the path to the tarball in the ``dataset_reader``
parameters in your experiment configuration file, and something like ``"wikipedia-train.json"``
for the ``train_data_path`` and ``validation_data_path``.
# Parameters
base_tarball_path : `str`
This is the path to the main ``tar.gz`` file you can download from the TriviaQA website,
with directories ``evidence`` and ``qa``.
unfiltered_tarball_path : `str`, optional
This is the path to the "unfiltered" TriviaQA data that you can download from the TriviaQA
website, containing just question JSON files that point to evidence files in the base
tarball.
tokenizer : `Tokenizer`, optional
We'll use this tokenizer on questions and evidence passages, defaulting to
``SpacyTokenizer`` if none is provided.
token_indexers : `Dict[str, TokenIndexer]`, optional
Determines how both the question and the evidence passages are represented as arrays. See
:class:`TokenIndexer`. Default is to have a single word ID for every token.
"""
def __init__(
self,
base_tarball_path: str,
unfiltered_tarball_path: str = None,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs
) -> None:
super().__init__(**kwargs)
self._base_tarball_path = base_tarball_path
self._unfiltered_tarball_path = unfiltered_tarball_path
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path: str):
logger.info("Opening base tarball file at %s", self._base_tarball_path)
base_tarball = tarfile.open(cached_path(self._base_tarball_path), "r")
if "unfiltered" in file_path:
logger.info("Opening unfiltered tarball file at %s", self._unfiltered_tarball_path)
unfiltered_tarball = tarfile.open(cached_path(self._unfiltered_tarball_path), "r")
logger.info("Loading question file from tarball")
data_json = json.loads(unfiltered_tarball.extractfile(file_path).read().decode("utf-8"))
else:
logger.info("Loading question file from tarball")
path = os.path.join("qa", file_path)
data_json = json.loads(base_tarball.extractfile(path).read().decode("utf-8"))
logger.info("Reading the dataset")
for question_json in data_json["Data"]:
question_text = question_json["Question"]
question_tokens = self._tokenizer.tokenize(question_text)
evidence_files: List[List[str]] = [] # contains lines from each evidence file
if "web" in file_path:
for result in question_json["SearchResults"]:
filename = result["Filename"]
evidence_file = base_tarball.extractfile(
os.path.join("evidence", "web", filename)
)
evidence_files.append(
[line.decode("utf-8") for line in evidence_file.readlines()]
)
else:
for result in question_json["EntityPages"]:
filename = result["Filename"]
evidence_file = base_tarball.extractfile(
os.path.join("evidence", "wikipedia", filename)
)
evidence_files.append(
[line.decode("utf-8") for line in evidence_file.readlines()]
)
answer_json = question_json["Answer"]
human_answers = [
utils.normalize_text(answer) for answer in answer_json.get("HumanAnswers", [])
]
answer_texts = answer_json["NormalizedAliases"] + human_answers
for paragraph in self.pick_paragraphs(evidence_files, question_text, answer_texts):
paragraph_tokens = self._tokenizer.tokenize(paragraph)
token_spans = utils.find_valid_answer_spans(paragraph_tokens, answer_texts)
if not token_spans:
# For now, we'll just ignore instances that we can't find answer spans for.
# Maybe we can do something smarter here later, but this will do for now.
continue
instance = self.text_to_instance(
question_text,
paragraph,
token_spans,
answer_texts,
question_tokens,
paragraph_tokens,
)
yield instance
def pick_paragraphs(
self, evidence_files: List[List[str]], question: str = None, answer_texts: List[str] = None
) -> List[str]:
"""
Given a list of evidence documents, return a list of paragraphs to use as training
examples. Each paragraph returned will be made into one training example.
To aid in picking the best paragraph, you can also optionally pass the question text or the
answer strings. Note, though, that if you actually use the answer strings for picking the
paragraph on the dev or test sets, that's likely cheating, depending on how you've defined
the task.
"""
paragraphs = []
for evidence_file in evidence_files:
whole_document = " ".join(evidence_file)
tokens = whole_document.split(" ")
paragraph = " ".join(tokens[:400])
paragraphs.append(paragraph)
return paragraphs
def text_to_instance(
self, # type: ignore
question_text: str,
passage_text: str,
token_spans: List[Tuple[int, int]] = None,
answer_texts: List[str] = None,
question_tokens: List[Token] = None,
passage_tokens: List[Token] = None,
) -> Instance:
if not question_tokens:
question_tokens = self._tokenizer.tokenize(question_text)
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
return utils.make_reading_comprehension_instance(
question_tokens,
passage_tokens,
self._token_indexers,
passage_text,
token_spans,
answer_texts,
)
| allennlp-models-main | allennlp_models/rc/dataset_readers/triviaqa.py |
import json
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp.predictors.predictor import Predictor
from allennlp.models import Model
@Predictor.register("dialog_qa")
class DialogQAPredictor(Predictor):
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._tokenizer = SpacyTokenizer(language=language)
def predict(self, jsonline: str) -> JsonDict:
"""
Make a dialog-style question answering prediction on the supplied input.
The supplied input json must contain a list of
question answer pairs, containing question, answer, yesno, followup, id
as well as the context (passage).
Parameters
----------
jsonline : ``str``
A json line that has the same format as the quac data file.
Returns
----------
A dictionary that represents the prediction made by the system. The answer string will be under the
"best_span_str" key.
"""
return self.predict_json(json.loads(jsonline))
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects json that looks like the original quac data file.
"""
paragraph_json = json_dict["paragraphs"][0]
paragraph = paragraph_json["context"]
tokenized_paragraph = self._tokenizer.tokenize(paragraph)
qas = paragraph_json["qas"]
metadata = {}
metadata["instance_id"] = [qa["id"] for qa in qas]
question_text_list = [qa["question"].strip().replace("\n", "") for qa in qas]
answer_texts_list = [[answer["text"] for answer in qa["answers"]] for qa in qas]
metadata["answer_texts_list"] = answer_texts_list
metadata["question"] = question_text_list
span_starts_list = [[answer["answer_start"] for answer in qa["answers"]] for qa in qas]
span_ends_list = []
for st_list, an_list in zip(span_starts_list, answer_texts_list):
span_ends = [start + len(answer) for start, answer in zip(st_list, an_list)]
span_ends_list.append(span_ends)
yesno_list = [str(qa["yesno"]) for qa in qas]
followup_list = [str(qa["followup"]) for qa in qas]
instance = self._dataset_reader.text_to_instance(
question_text_list,
paragraph,
span_starts_list,
span_ends_list,
tokenized_paragraph,
yesno_list,
followup_list,
metadata,
)
return instance
| allennlp-models-main | allennlp_models/rc/predictors/dialog_qa.py |
from allennlp_models.rc.predictors.bidaf import ReadingComprehensionPredictor
from allennlp_models.rc.predictors.dialog_qa import DialogQAPredictor
from allennlp_models.rc.predictors.transformer_qa import TransformerQAPredictor
| allennlp-models-main | allennlp_models/rc/predictors/__init__.py |
from typing import Dict, List
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import (
IndexField,
ListField,
LabelField,
SpanField,
SequenceLabelField,
SequenceField,
)
@Predictor.register("reading_comprehension")
class ReadingComprehensionPredictor(Predictor):
"""
Predictor for the :class:`~allennlp_rc.models.bidaf.BidirectionalAttentionFlow` model, and any
other model that takes a question and passage as input.
"""
def predict(self, question: str, passage: str) -> JsonDict:
"""
Make a machine comprehension prediction on the supplied input.
See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task.
Parameters
----------
question : ``str``
A question about the content in the supplied paragraph. The question must be answerable by a
span in the paragraph.
passage : ``str``
A paragraph of information relevant to the question.
Returns
-------
A dictionary that represents the prediction made by the system. The answer string will be under the
"best_span_str" key.
"""
return self.predict_json({"passage": passage, "question": question})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like ``{"question": "...", "passage": "..."}``.
"""
question_text = json_dict["question"]
passage_text = json_dict["passage"]
return self._dataset_reader.text_to_instance(question_text, passage_text)
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = instance.duplicate()
# For BiDAF
if "best_span" in outputs:
span_start_label = outputs["best_span"][0]
span_end_label = outputs["best_span"][1]
passage_field: SequenceField = new_instance["passage"] # type: ignore
new_instance.add_field(
"span_start", IndexField(int(span_start_label), passage_field), self._model.vocab
)
new_instance.add_field(
"span_end", IndexField(int(span_end_label), passage_field), self._model.vocab
)
# For NAQANet model. It has the fields: answer_as_passage_spans, answer_as_question_spans,
# answer_as_add_sub_expressions, answer_as_counts. We need labels for all.
elif "answer" in outputs:
answer_type = outputs["answer"]["answer_type"]
# When the problem is a counting problem
if answer_type == "count":
field = ListField([LabelField(int(outputs["answer"]["count"]), skip_indexing=True)])
new_instance.add_field("answer_as_counts", field, self._model.vocab)
# When the answer is in the passage
elif answer_type == "passage_span":
# TODO(mattg): Currently we only handle one predicted span.
span = outputs["answer"]["spans"][0]
# Convert character span indices into word span indices
word_span_start = None
word_span_end = None
offsets = new_instance["metadata"].metadata["passage_token_offsets"] # type: ignore
for index, offset in enumerate(offsets):
if offset[0] == span[0]:
word_span_start = index
if offset[1] == span[1]:
word_span_end = index
passage_field: SequenceField = new_instance["passage"] # type: ignore
field = ListField([SpanField(word_span_start, word_span_end, passage_field)])
new_instance.add_field("answer_as_passage_spans", field, self._model.vocab)
# When the answer is an arithmetic calculation
elif answer_type == "arithmetic":
# The different numbers in the passage that the model encounters
sequence_labels = outputs["answer"]["numbers"]
numbers_field: ListField = instance["number_indices"] # type: ignore
# The numbers in the passage are given signs, that's what we are labeling here.
# Negative signs are given the class label 2 (for 0 and 1, the sign matches the
# label).
labels = []
for label in sequence_labels:
if label["sign"] == -1:
labels.append(2)
else:
labels.append(label["sign"])
# There's a dummy number added in the dataset reader to handle passages with no
# numbers; it has a label of 0 (not included).
labels.append(0)
field = ListField([SequenceLabelField(labels, numbers_field)])
new_instance.add_field("answer_as_add_sub_expressions", field, self._model.vocab)
# When the answer is in the question
elif answer_type == "question_span":
span = outputs["answer"]["spans"][0]
# Convert character span indices into word span indices
word_span_start = None
word_span_end = None
question_offsets = new_instance["metadata"].metadata[ # type: ignore
"question_token_offsets"
]
for index, offset in enumerate(question_offsets):
if offset[0] == span[0]:
word_span_start = index
if offset[1] == span[1]:
word_span_end = index
question_field: SequenceField = new_instance["question"] # type: ignore
field = ListField([SpanField(word_span_start, word_span_end, question_field)])
new_instance.add_field("answer_as_question_spans", field, self._model.vocab)
return [new_instance]
| allennlp-models-main | allennlp_models/rc/predictors/bidaf.py |
import logging
from typing import List, Dict, Any, Optional
import numpy
from allennlp.models import Model
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance, DatasetReader
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import SpanField
logger = logging.getLogger(__name__)
@Predictor.register("transformer_qa")
class TransformerQAPredictor(Predictor):
"""
Predictor for the [`TransformerQA`](/models/rc/models/transformer_qa#transformer_qa) model,
and any other model that takes a question and passage as input.
"""
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super(TransformerQAPredictor, self).__init__(model, dataset_reader)
self._next_qid = 1
def predict(self, question: str, passage: str) -> JsonDict:
"""
Make a machine comprehension prediction on the supplied input.
See [https://rajpurkar.github.io/SQuAD-explorer/](https://rajpurkar.github.io/SQuAD-explorer/)
for more information about the machine comprehension task.
# Parameters
question : `str`
A question about the content in the supplied paragraph.
passage : `str`
A paragraph of information relevant to the question.
# Returns
`JsonDict`
A dictionary that represents the prediction made by the system.
The answer string will be under the `"best_span_str"` key.
"""
return self.predict_json({"context": passage, "question": question})
def predict_json(self, inputs: JsonDict) -> JsonDict:
results = self.predict_batch_json([inputs])
assert len(results) == 1
return results[0]
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = instance.duplicate()
span_start = int(outputs["best_span"][0])
span_end = int(outputs["best_span"][1])
start_of_context = (
len(self._dataset_reader._tokenizer.sequence_pair_start_tokens)
+ len(instance["metadata"]["question_tokens"])
+ len(self._dataset_reader._tokenizer.sequence_pair_mid_tokens)
)
answer_span = SpanField(
start_of_context + span_start,
start_of_context + span_end,
instance["question_with_context"],
)
new_instance.add_field("answer_span", answer_span)
return [new_instance]
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
logger.warning(
"This method is implemented only for use in interpret modules."
"The predictor maps a question to multiple instances. "
"Please use _json_to_instances instead for all non-interpret uses. "
)
return self._json_to_instances(json_dict, qid=-1)[0]
def _json_to_instances(self, json_dict: JsonDict, qid: Optional[int] = None) -> List[Instance]:
# We allow the passage / context to be specified with either key.
# But we do it this way so that a 'KeyError: context' exception will be raised
# when neither key is specified, since the 'context' key is the default and
# the 'passage' key was only added to be compatible with the input for other
# RC models.
# if `qid` is `None`, it is updated using self._next_qid
context = json_dict["passage"] if "passage" in json_dict else json_dict["context"]
result: List[Instance] = []
question_id = qid or self._next_qid
for instance in self._dataset_reader.make_instances(
qid=str(question_id),
question=json_dict["question"],
answers=[],
context=context,
first_answer_offset=None,
is_training=False,
):
self._dataset_reader.apply_token_indexers(instance)
result.append(instance)
if qid is None:
self._next_qid += 1
return result
def _batch_json_to_instances(self, json_dicts: List[JsonDict]) -> List[Instance]:
instances = []
for json_dict in json_dicts:
instances.extend(self._json_to_instances(json_dict))
return instances
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
instances = self._batch_json_to_instances(inputs)
result = self.predict_batch_instance(instances)
assert len(result) == len(inputs)
return result
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
outputs = self._model.forward_on_instances(instances)
# group outputs with the same question id
qid_to_output: Dict[str, Dict[str, Any]] = {}
for instance, output in zip(instances, outputs):
qid = instance["metadata"]["id"]
output["id"] = qid
output["context_tokens"] = instance["metadata"]["context_tokens"]
if instance["metadata"]["answers"]:
output["answers"] = instance["metadata"]["answers"]
if qid in qid_to_output:
old_output = qid_to_output[qid]
if old_output["best_span_scores"] < output["best_span_scores"]:
qid_to_output[qid] = output
else:
qid_to_output[qid] = output
return [sanitize(o) for o in qid_to_output.values()]
| allennlp-models-main | allennlp_models/rc/predictors/transformer_qa.py |
import logging
from typing import Any, Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.functional import nll_loss
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.matrix_attention.linear_matrix_attention import LinearMatrixAttention
from allennlp.nn import InitializerApplicator, util
from allennlp.training.metrics import Average, BooleanAccuracy, CategoricalAccuracy
from allennlp_models.rc.tools import squad
from allennlp_models.rc.models.utils import replace_masked_values_with_big_negative_number
logger = logging.getLogger(__name__)
@Model.register("dialog_qa")
class DialogQA(Model):
"""
This class implements modified version of BiDAF
(with self attention and residual layer, from Clark and Gardner ACL 17 paper) model as used in
Question Answering in Context (EMNLP 2018) paper [https://arxiv.org/pdf/1808.07036.pdf].
In this set-up, a single instance is a dialog, list of question answer pairs.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``question`` and ``passage`` ``TextFields`` we get as input to the model.
phrase_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and doing the bidirectional attention.
span_start_encoder : ``Seq2SeqEncoder``
The encoder that we will use to incorporate span start predictions into the passage state
before predicting span end.
span_end_encoder : ``Seq2SeqEncoder``
The encoder that we will use to incorporate span end predictions into the passage state.
dropout : ``float``, optional (default=0.2)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
num_context_answers : ``int``, optional (default=0)
If greater than 0, the model will consider previous question answering context.
max_span_length: ``int``, optional (default=0)
Maximum token length of the output span.
max_turn_length: ``int``, optional (default=12)
Maximum length of an interaction.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
phrase_layer: Seq2SeqEncoder,
residual_encoder: Seq2SeqEncoder,
span_start_encoder: Seq2SeqEncoder,
span_end_encoder: Seq2SeqEncoder,
initializer: Optional[InitializerApplicator] = None,
dropout: float = 0.2,
num_context_answers: int = 0,
marker_embedding_dim: int = 10,
max_span_length: int = 30,
max_turn_length: int = 12,
) -> None:
super().__init__(vocab)
self._num_context_answers = num_context_answers
self._max_span_length = max_span_length
self._text_field_embedder = text_field_embedder
self._phrase_layer = phrase_layer
self._marker_embedding_dim = marker_embedding_dim
self._encoding_dim = phrase_layer.get_output_dim()
self._matrix_attention = LinearMatrixAttention(
self._encoding_dim, self._encoding_dim, "x,y,x*y"
)
self._merge_atten = TimeDistributed(
torch.nn.Linear(self._encoding_dim * 4, self._encoding_dim)
)
self._residual_encoder = residual_encoder
if num_context_answers > 0:
self._question_num_marker = torch.nn.Embedding(
max_turn_length, marker_embedding_dim * num_context_answers
)
self._prev_ans_marker = torch.nn.Embedding(
(num_context_answers * 4) + 1, marker_embedding_dim
)
self._self_attention = LinearMatrixAttention(
self._encoding_dim, self._encoding_dim, "x,y,x*y"
)
self._followup_lin = torch.nn.Linear(self._encoding_dim, 3)
self._merge_self_attention = TimeDistributed(
torch.nn.Linear(self._encoding_dim * 3, self._encoding_dim)
)
self._span_start_encoder = span_start_encoder
self._span_end_encoder = span_end_encoder
self._span_start_predictor = TimeDistributed(torch.nn.Linear(self._encoding_dim, 1))
self._span_end_predictor = TimeDistributed(torch.nn.Linear(self._encoding_dim, 1))
self._span_yesno_predictor = TimeDistributed(torch.nn.Linear(self._encoding_dim, 3))
self._span_followup_predictor = TimeDistributed(self._followup_lin)
check_dimensions_match(
phrase_layer.get_input_dim(),
text_field_embedder.get_output_dim() + marker_embedding_dim * num_context_answers,
"phrase layer input dim",
"embedding dim + marker dim * num context answers",
)
if initializer is not None:
initializer(self)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_yesno_accuracy = CategoricalAccuracy()
self._span_followup_accuracy = CategoricalAccuracy()
self._span_gt_yesno_accuracy = CategoricalAccuracy()
self._span_gt_followup_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._official_f1 = Average()
self._variational_dropout = InputVariationalDropout(dropout)
def forward( # type: ignore
self,
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
p1_answer_marker: torch.IntTensor = None,
p2_answer_marker: torch.IntTensor = None,
p3_answer_marker: torch.IntTensor = None,
yesno_list: torch.IntTensor = None,
followup_list: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
p1_answer_marker : ``torch.IntTensor``, optional
This is one of the inputs, but only when num_context_answers > 0.
This is a tensor that has a shape [batch_size, max_qa_count, max_passage_length].
Most passage token will have assigned 'O', except the passage tokens belongs to the previous answer
in the dialog, which will be assigned labels such as <1_start>, <1_in>, <1_end>.
For more details, look into dataset_readers/util/make_reading_comprehension_instance_quac
p2_answer_marker : ``torch.IntTensor``, optional
This is one of the inputs, but only when num_context_answers > 1.
It is similar to p1_answer_marker, but marking previous previous answer in passage.
p3_answer_marker : ``torch.IntTensor``, optional
This is one of the inputs, but only when num_context_answers > 2.
It is similar to p1_answer_marker, but marking previous previous previous answer in passage.
yesno_list : ``torch.IntTensor``, optional
This is one of the outputs that we are trying to predict.
Three way classification (the yes/no/not a yes no question).
followup_list : ``torch.IntTensor``, optional
This is one of the outputs that we are trying to predict.
Three way classification (followup / maybe followup / don't followup).
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question ID, original passage text, and token
offsets into the passage for each instance in the batch. We use this for computing
official metrics using the official SQuAD evaluation script. The length of this list
should be the batch size, and each dictionary should have the keys ``id``,
``original_passage``, and ``token_offsets``. If you only want the best span string and
don't care about official metrics, you can omit the ``id`` key.
Returns
-------
An output dictionary consisting of the followings.
Each of the followings is a nested list because first iterates over dialog, then questions in dialog.
qid : List[List[str]]
A list of list, consisting of question ids.
followup : List[List[int]]
A list of list, consisting of continuation marker prediction index.
(y :yes, m: maybe follow up, n: don't follow up)
yesno : List[List[int]]
A list of list, consisting of affirmation marker prediction index.
(y :yes, x: not a yes/no question, n: np)
best_span_str : List[List[str]]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
token_character_ids = question["token_characters"]["token_characters"]
batch_size, max_qa_count, max_q_len, _ = token_character_ids.size()
total_qa_count = batch_size * max_qa_count
qa_mask = torch.ge(followup_list, 0).view(total_qa_count)
embedded_question = self._text_field_embedder(question, num_wrapping_dims=1)
embedded_question = embedded_question.reshape(
total_qa_count, max_q_len, self._text_field_embedder.get_output_dim()
)
embedded_question = self._variational_dropout(embedded_question)
embedded_passage = self._variational_dropout(self._text_field_embedder(passage))
passage_length = embedded_passage.size(1)
question_mask = util.get_text_field_mask(question, num_wrapping_dims=1)
question_mask = question_mask.reshape(total_qa_count, max_q_len)
passage_mask = util.get_text_field_mask(passage)
repeated_passage_mask = passage_mask.unsqueeze(1).repeat(1, max_qa_count, 1)
repeated_passage_mask = repeated_passage_mask.view(total_qa_count, passage_length)
if self._num_context_answers > 0:
# Encode question turn number inside the dialog into question embedding.
question_num_ind = util.get_range_vector(
max_qa_count, util.get_device_of(embedded_question)
)
question_num_ind = question_num_ind.unsqueeze(-1).repeat(1, max_q_len)
question_num_ind = question_num_ind.unsqueeze(0).repeat(batch_size, 1, 1)
question_num_ind = question_num_ind.reshape(total_qa_count, max_q_len)
question_num_marker_emb = self._question_num_marker(question_num_ind)
embedded_question = torch.cat([embedded_question, question_num_marker_emb], dim=-1)
# Encode the previous answers in passage embedding.
repeated_embedded_passage = (
embedded_passage.unsqueeze(1)
.repeat(1, max_qa_count, 1, 1)
.view(total_qa_count, passage_length, self._text_field_embedder.get_output_dim())
)
# batch_size * max_qa_count, passage_length, word_embed_dim
p1_answer_marker = p1_answer_marker.view(total_qa_count, passage_length)
p1_answer_marker_emb = self._prev_ans_marker(p1_answer_marker)
repeated_embedded_passage = torch.cat(
[repeated_embedded_passage, p1_answer_marker_emb], dim=-1
)
if self._num_context_answers > 1:
p2_answer_marker = p2_answer_marker.view(total_qa_count, passage_length)
p2_answer_marker_emb = self._prev_ans_marker(p2_answer_marker)
repeated_embedded_passage = torch.cat(
[repeated_embedded_passage, p2_answer_marker_emb], dim=-1
)
if self._num_context_answers > 2:
p3_answer_marker = p3_answer_marker.view(total_qa_count, passage_length)
p3_answer_marker_emb = self._prev_ans_marker(p3_answer_marker)
repeated_embedded_passage = torch.cat(
[repeated_embedded_passage, p3_answer_marker_emb], dim=-1
)
repeated_encoded_passage = self._variational_dropout(
self._phrase_layer(repeated_embedded_passage, repeated_passage_mask)
)
else:
encoded_passage = self._variational_dropout(
self._phrase_layer(embedded_passage, passage_mask)
)
repeated_encoded_passage = encoded_passage.unsqueeze(1).repeat(1, max_qa_count, 1, 1)
repeated_encoded_passage = repeated_encoded_passage.view(
total_qa_count, passage_length, self._encoding_dim
)
encoded_question = self._variational_dropout(
self._phrase_layer(embedded_question, question_mask)
)
# Shape: (batch_size * max_qa_count, passage_length, question_length)
passage_question_similarity = self._matrix_attention(
repeated_encoded_passage, encoded_question
)
# Shape: (batch_size * max_qa_count, passage_length, question_length)
passage_question_attention = util.masked_softmax(passage_question_similarity, question_mask)
# Shape: (batch_size * max_qa_count, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# We replace masked values with something really negative here, so they don't affect the
# max below.
masked_similarity = replace_masked_values_with_big_negative_number(
passage_question_similarity, question_mask.unsqueeze(1)
)
question_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)
question_passage_attention = util.masked_softmax(
question_passage_similarity, repeated_passage_mask
)
# Shape: (batch_size * max_qa_count, encoding_dim)
question_passage_vector = util.weighted_sum(
repeated_encoded_passage, question_passage_attention
)
tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(
total_qa_count, passage_length, self._encoding_dim
)
# Shape: (batch_size * max_qa_count, passage_length, encoding_dim * 4)
final_merged_passage = torch.cat(
[
repeated_encoded_passage,
passage_question_vectors,
repeated_encoded_passage * passage_question_vectors,
repeated_encoded_passage * tiled_question_passage_vector,
],
dim=-1,
)
final_merged_passage = F.relu(self._merge_atten(final_merged_passage))
residual_layer = self._variational_dropout(
self._residual_encoder(final_merged_passage, repeated_passage_mask)
)
self_attention_matrix = self._self_attention(residual_layer, residual_layer)
mask = repeated_passage_mask.reshape(
total_qa_count, passage_length, 1
) * repeated_passage_mask.reshape(total_qa_count, 1, passage_length)
self_mask = torch.eye(
passage_length, passage_length, dtype=torch.bool, device=self_attention_matrix.device
)
self_mask = self_mask.reshape(1, passage_length, passage_length)
mask = mask & ~self_mask
self_attention_probs = util.masked_softmax(self_attention_matrix, mask)
# (batch, passage_len, passage_len) * (batch, passage_len, dim) -> (batch, passage_len, dim)
self_attention_vecs = torch.matmul(self_attention_probs, residual_layer)
self_attention_vecs = torch.cat(
[self_attention_vecs, residual_layer, residual_layer * self_attention_vecs], dim=-1
)
residual_layer = F.relu(self._merge_self_attention(self_attention_vecs))
final_merged_passage = final_merged_passage + residual_layer
# batch_size * maxqa_pair_len * max_passage_len * 200
final_merged_passage = self._variational_dropout(final_merged_passage)
start_rep = self._span_start_encoder(final_merged_passage, repeated_passage_mask)
span_start_logits = self._span_start_predictor(start_rep).squeeze(-1)
end_rep = self._span_end_encoder(
torch.cat([final_merged_passage, start_rep], dim=-1), repeated_passage_mask
)
span_end_logits = self._span_end_predictor(end_rep).squeeze(-1)
span_yesno_logits = self._span_yesno_predictor(end_rep).squeeze(-1)
span_followup_logits = self._span_followup_predictor(end_rep).squeeze(-1)
span_start_logits = replace_masked_values_with_big_negative_number(
span_start_logits, repeated_passage_mask
)
# batch_size * maxqa_len_pair, max_document_len
span_end_logits = replace_masked_values_with_big_negative_number(
span_end_logits, repeated_passage_mask
)
best_span = self._get_best_span_yesno_followup(
span_start_logits,
span_end_logits,
span_yesno_logits,
span_followup_logits,
self._max_span_length,
)
output_dict: Dict[str, Any] = {}
# Compute the loss.
if span_start is not None:
loss = nll_loss(
util.masked_log_softmax(span_start_logits, repeated_passage_mask),
span_start.view(-1),
ignore_index=-1,
)
self._span_start_accuracy(span_start_logits, span_start.view(-1), mask=qa_mask)
loss += nll_loss(
util.masked_log_softmax(span_end_logits, repeated_passage_mask),
span_end.view(-1),
ignore_index=-1,
)
self._span_end_accuracy(span_end_logits, span_end.view(-1), mask=qa_mask)
self._span_accuracy(
best_span[:, 0:2],
torch.stack([span_start, span_end], -1).view(total_qa_count, 2),
mask=qa_mask.unsqueeze(1).expand(-1, 2),
)
# add a select for the right span to compute loss
gold_span_end_loc = []
span_end = span_end.view(total_qa_count).squeeze().data.cpu().numpy()
for i in range(0, total_qa_count):
gold_span_end_loc.append(max(span_end[i] * 3 + i * passage_length * 3, 0))
gold_span_end_loc.append(max(span_end[i] * 3 + i * passage_length * 3 + 1, 0))
gold_span_end_loc.append(max(span_end[i] * 3 + i * passage_length * 3 + 2, 0))
gold_span_end_loc = span_start.new(gold_span_end_loc)
pred_span_end_loc = []
for i in range(0, total_qa_count):
pred_span_end_loc.append(max(best_span[i][1] * 3 + i * passage_length * 3, 0))
pred_span_end_loc.append(max(best_span[i][1] * 3 + i * passage_length * 3 + 1, 0))
pred_span_end_loc.append(max(best_span[i][1] * 3 + i * passage_length * 3 + 2, 0))
predicted_end = span_start.new(pred_span_end_loc)
_yesno = span_yesno_logits.view(-1).index_select(0, gold_span_end_loc).view(-1, 3)
_followup = span_followup_logits.view(-1).index_select(0, gold_span_end_loc).view(-1, 3)
loss += nll_loss(F.log_softmax(_yesno, dim=-1), yesno_list.view(-1), ignore_index=-1)
loss += nll_loss(
F.log_softmax(_followup, dim=-1), followup_list.view(-1), ignore_index=-1
)
_yesno = span_yesno_logits.view(-1).index_select(0, predicted_end).view(-1, 3)
_followup = span_followup_logits.view(-1).index_select(0, predicted_end).view(-1, 3)
self._span_yesno_accuracy(_yesno, yesno_list.view(-1), mask=qa_mask)
self._span_followup_accuracy(_followup, followup_list.view(-1), mask=qa_mask)
output_dict["loss"] = loss
# Compute F1 and preparing the output dictionary.
output_dict["best_span_str"] = []
output_dict["qid"] = []
output_dict["followup"] = []
output_dict["yesno"] = []
best_span_cpu = best_span.detach().cpu().numpy()
for i in range(batch_size):
passage_str = metadata[i]["original_passage"]
offsets = metadata[i]["token_offsets"]
f1_score = 0.0
per_dialog_best_span_list = []
per_dialog_yesno_list = []
per_dialog_followup_list = []
per_dialog_query_id_list = []
for per_dialog_query_index, (iid, answer_texts) in enumerate(
zip(metadata[i]["instance_id"], metadata[i]["answer_texts_list"])
):
predicted_span = tuple(best_span_cpu[i * max_qa_count + per_dialog_query_index])
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
yesno_pred = predicted_span[2]
followup_pred = predicted_span[3]
per_dialog_yesno_list.append(yesno_pred)
per_dialog_followup_list.append(followup_pred)
per_dialog_query_id_list.append(iid)
best_span_string = passage_str[start_offset:end_offset]
per_dialog_best_span_list.append(best_span_string)
if answer_texts:
if len(answer_texts) > 1:
t_f1 = []
# Compute F1 over N-1 human references and averages the scores.
for answer_index in range(len(answer_texts)):
idxes = list(range(len(answer_texts)))
idxes.pop(answer_index)
refs = [answer_texts[z] for z in idxes]
t_f1.append(
squad.metric_max_over_ground_truths(
squad.compute_f1, best_span_string, refs
)
)
f1_score = 1.0 * sum(t_f1) / len(t_f1)
else:
f1_score = squad.metric_max_over_ground_truths(
squad.compute_f1, best_span_string, answer_texts
)
self._official_f1(100 * f1_score)
output_dict["qid"].append(per_dialog_query_id_list)
output_dict["best_span_str"].append(per_dialog_best_span_list)
output_dict["yesno"].append(per_dialog_yesno_list)
output_dict["followup"].append(per_dialog_followup_list)
return output_dict
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
yesno_tags = [
[self.vocab.get_token_from_index(x, namespace="yesno_labels") for x in yn_list]
for yn_list in output_dict.pop("yesno")
]
followup_tags = [
[self.vocab.get_token_from_index(x, namespace="followup_labels") for x in followup_list]
for followup_list in output_dict.pop("followup")
]
output_dict["yesno"] = yesno_tags
output_dict["followup"] = followup_tags
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
"start_acc": self._span_start_accuracy.get_metric(reset),
"end_acc": self._span_end_accuracy.get_metric(reset),
"span_acc": self._span_accuracy.get_metric(reset),
"yesno": self._span_yesno_accuracy.get_metric(reset),
"followup": self._span_followup_accuracy.get_metric(reset),
"f1": self._official_f1.get_metric(reset),
}
@staticmethod
def _get_best_span_yesno_followup(
span_start_logits: torch.Tensor,
span_end_logits: torch.Tensor,
span_yesno_logits: torch.Tensor,
span_followup_logits: torch.Tensor,
max_span_length: int,
) -> torch.Tensor:
# Returns the index of highest-scoring span that is not longer than 30 tokens, as well as
# yesno prediction bit and followup prediction bit from the predicted span end token.
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
max_span_log_prob = [util.min_value_of_dtype(span_start_logits.dtype)] * batch_size
span_start_argmax = [0] * batch_size
best_word_span = span_start_logits.new_zeros((batch_size, 4), dtype=torch.long)
span_start_logits = span_start_logits.data.cpu().numpy()
span_end_logits = span_end_logits.data.cpu().numpy()
span_yesno_logits = span_yesno_logits.data.cpu().numpy()
span_followup_logits = span_followup_logits.data.cpu().numpy()
for b_i in range(batch_size):
for j in range(passage_length):
val1 = span_start_logits[b_i, span_start_argmax[b_i]]
if val1 < span_start_logits[b_i, j]:
span_start_argmax[b_i] = j
val1 = span_start_logits[b_i, j]
val2 = span_end_logits[b_i, j]
if val1 + val2 > max_span_log_prob[b_i]:
if j - span_start_argmax[b_i] > max_span_length:
continue
best_word_span[b_i, 0] = span_start_argmax[b_i]
best_word_span[b_i, 1] = j
max_span_log_prob[b_i] = val1 + val2
for b_i in range(batch_size):
j = best_word_span[b_i, 1]
yesno_pred = np.argmax(span_yesno_logits[b_i, j])
followup_pred = np.argmax(span_followup_logits[b_i, j])
best_word_span[b_i, 2] = int(yesno_pred)
best_word_span[b_i, 3] = int(followup_pred)
return best_word_span
default_predictor = "dialog_qa"
| allennlp-models-main | allennlp_models/rc/models/dialog_qa.py |
from typing import Any, Dict, List, Optional
import torch
from torch.nn.functional import nll_loss
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy
from allennlp.nn.util import masked_softmax
from allennlp_models.rc.models.utils import (
get_best_span,
replace_masked_values_with_big_negative_number,
)
from allennlp_models.rc.metrics import SquadEmAndF1
@Model.register("qanet")
@Model.register("rc-qanet")
class QaNet(Model):
"""
This class implements Adams Wei Yu's `QANet Model <https://openreview.net/forum?id=B14TlG-RW>`_
for machine reading comprehension published at ICLR 2018.
The overall architecture of QANet is very similar to BiDAF. The main difference is that QANet
replaces the RNN encoder with CNN + self-attention. There are also some minor differences in the
modeling layer and output layer.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``question`` and ``passage`` ``TextFields`` we get as input to the model.
num_highway_layers : ``int``
The number of highway layers to use in between embedding the input and passing it through
the phrase layer.
phrase_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and doing the passage-question attention.
matrix_attention_layer : ``MatrixAttention``
The matrix attention function that we will use when comparing encoded passage and question
representations.
modeling_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between the bidirectional
attention and predicting span start and end.
dropout_prob : ``float``, optional (default=0.1)
If greater than 0, we will apply dropout with this probability between layers.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
matrix_attention_layer: MatrixAttention,
modeling_layer: Seq2SeqEncoder,
dropout_prob: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super().__init__(vocab, regularizer)
text_embed_dim = text_field_embedder.get_output_dim()
encoding_in_dim = phrase_layer.get_input_dim()
encoding_out_dim = phrase_layer.get_output_dim()
modeling_in_dim = modeling_layer.get_input_dim()
modeling_out_dim = modeling_layer.get_output_dim()
self._text_field_embedder = text_field_embedder
self._embedding_proj_layer = torch.nn.Linear(text_embed_dim, encoding_in_dim)
self._highway_layer = Highway(encoding_in_dim, num_highway_layers)
self._encoding_proj_layer = torch.nn.Linear(encoding_in_dim, encoding_in_dim, bias=False)
self._phrase_layer = phrase_layer
self._matrix_attention = matrix_attention_layer
self._modeling_proj_layer = torch.nn.Linear(
encoding_out_dim * 4, modeling_in_dim, bias=False
)
self._modeling_layer = modeling_layer
self._span_start_predictor = torch.nn.Linear(modeling_out_dim * 2, 1)
self._span_end_predictor = torch.nn.Linear(modeling_out_dim * 2, 1)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._metrics = SquadEmAndF1()
self._dropout = torch.nn.Dropout(p=dropout_prob) if dropout_prob > 0 else lambda x: x
initializer(self)
def forward( # type: ignore
self,
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question tokens, passage tokens, original passage
text, and token offsets into the passage for each instance in the batch. The length
of this list should be the batch size, and each dictionary should have the keys
``question_tokens``, ``passage_tokens``, ``original_passage``, and ``token_offsets``.
Returns
-------
An output dictionary consisting of:
span_start_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span start position.
span_start_probs : torch.FloatTensor
The result of ``softmax(span_start_logits)``.
span_end_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span end position (inclusive).
span_end_probs : torch.FloatTensor
The result of ``softmax(span_end_logits)``.
best_span : torch.IntTensor
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``
and each offset is a token index.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
best_span_str : List[str]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
question_mask = util.get_text_field_mask(question)
passage_mask = util.get_text_field_mask(passage)
embedded_question = self._dropout(self._text_field_embedder(question))
embedded_passage = self._dropout(self._text_field_embedder(passage))
embedded_question = self._highway_layer(self._embedding_proj_layer(embedded_question))
embedded_passage = self._highway_layer(self._embedding_proj_layer(embedded_passage))
batch_size = embedded_question.size(0)
projected_embedded_question = self._encoding_proj_layer(embedded_question)
projected_embedded_passage = self._encoding_proj_layer(embedded_passage)
encoded_question = self._dropout(
self._phrase_layer(projected_embedded_question, question_mask)
)
encoded_passage = self._dropout(
self._phrase_layer(projected_embedded_passage, passage_mask)
)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = masked_softmax(
passage_question_similarity, question_mask, memory_efficient=True
)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# Shape: (batch_size, question_length, passage_length)
question_passage_attention = masked_softmax(
passage_question_similarity.transpose(1, 2), passage_mask, memory_efficient=True
)
# Shape: (batch_size, passage_length, passage_length)
attention_over_attention = torch.bmm(passage_question_attention, question_passage_attention)
# Shape: (batch_size, passage_length, encoding_dim)
passage_passage_vectors = util.weighted_sum(encoded_passage, attention_over_attention)
# Shape: (batch_size, passage_length, encoding_dim * 4)
merged_passage_attention_vectors = self._dropout(
torch.cat(
[
encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * passage_passage_vectors,
],
dim=-1,
)
)
modeled_passage_list = [self._modeling_proj_layer(merged_passage_attention_vectors)]
for _ in range(3):
modeled_passage = self._dropout(
self._modeling_layer(modeled_passage_list[-1], passage_mask)
)
modeled_passage_list.append(modeled_passage)
# Shape: (batch_size, passage_length, modeling_dim * 2))
span_start_input = torch.cat([modeled_passage_list[-3], modeled_passage_list[-2]], dim=-1)
# Shape: (batch_size, passage_length)
span_start_logits = self._span_start_predictor(span_start_input).squeeze(-1)
# Shape: (batch_size, passage_length, modeling_dim * 2)
span_end_input = torch.cat([modeled_passage_list[-3], modeled_passage_list[-1]], dim=-1)
span_end_logits = self._span_end_predictor(span_end_input).squeeze(-1)
span_start_logits = replace_masked_values_with_big_negative_number(
span_start_logits, passage_mask
)
span_end_logits = replace_masked_values_with_big_negative_number(
span_end_logits, passage_mask
)
# Shape: (batch_size, passage_length)
span_start_probs = torch.nn.functional.softmax(span_start_logits, dim=-1)
span_end_probs = torch.nn.functional.softmax(span_end_logits, dim=-1)
best_span = get_best_span(span_start_logits, span_end_logits)
output_dict = {
"passage_question_attention": passage_question_attention,
"span_start_logits": span_start_logits,
"span_start_probs": span_start_probs,
"span_end_logits": span_end_logits,
"span_end_probs": span_end_probs,
"best_span": best_span,
}
# Compute the loss for training.
if span_start is not None:
loss = nll_loss(
util.masked_log_softmax(span_start_logits, passage_mask), span_start.squeeze(-1)
)
self._span_start_accuracy(span_start_logits, span_start.squeeze(-1))
loss += nll_loss(
util.masked_log_softmax(span_end_logits, passage_mask), span_end.squeeze(-1)
)
self._span_end_accuracy(span_end_logits, span_end.squeeze(-1))
self._span_accuracy(best_span, torch.cat([span_start, span_end], -1))
output_dict["loss"] = loss
# Compute the EM and F1 on SQuAD and add the tokenized input to the output.
if metadata is not None:
output_dict["best_span_str"] = []
question_tokens = []
passage_tokens = []
for i in range(batch_size):
question_tokens.append(metadata[i]["question_tokens"])
passage_tokens.append(metadata[i]["passage_tokens"])
passage_str = metadata[i]["original_passage"]
offsets = metadata[i]["token_offsets"]
predicted_span = tuple(best_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output_dict["best_span_str"].append(best_span_string)
answer_texts = metadata[i].get("answer_texts", [])
if answer_texts:
self._metrics(best_span_string, answer_texts)
output_dict["question_tokens"] = question_tokens
output_dict["passage_tokens"] = passage_tokens
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._metrics.get_metric(reset)
return {
"start_acc": self._span_start_accuracy.get_metric(reset),
"end_acc": self._span_end_accuracy.get_metric(reset),
"span_acc": self._span_accuracy.get_metric(reset),
"em": exact_match,
"f1": f1_score,
}
default_predictor = "reading_comprehension"
| allennlp-models-main | allennlp_models/rc/models/qanet.py |
from allennlp_models.rc.models.bidaf_ensemble import BidafEnsemble
from allennlp_models.rc.models.bidaf import BidirectionalAttentionFlow
from allennlp_models.rc.models.dialog_qa import DialogQA
from allennlp_models.rc.models.naqanet import NumericallyAugmentedQaNet
from allennlp_models.rc.models.qanet import QaNet
from allennlp_models.rc.models.transformer_qa import TransformerQA
from allennlp_models.rc.models.utils import get_best_span
| allennlp-models-main | allennlp_models/rc/models/__init__.py |
import logging
from typing import Any, Dict, List, Optional
import torch
from torch.nn.functional import nll_loss
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.matrix_attention import MatrixAttention
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy
from allennlp_models.rc.metrics import SquadEmAndF1
from allennlp_models.rc.models.utils import (
get_best_span,
replace_masked_values_with_big_negative_number,
)
logger = logging.getLogger(__name__)
@Model.register("bidaf")
class BidirectionalAttentionFlow(Model):
"""
This class implements Minjoon Seo's [Bidirectional Attention Flow model]
(https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/7586b7cca1deba124af80609327395e613a20e9d)
for answering reading comprehension questions (ICLR 2017).
The basic layout is pretty simple: encode words as a combination of word embeddings and a
character-level encoder, pass the word representations through a bi-LSTM/GRU, use a matrix of
attentions to put question information into the passage word representations (this is the only
part that is at all non-standard), pass this through another few layers of bi-LSTMs/GRUs, and
do a softmax over span start and span end.
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the ``question`` and ``passage`` ``TextFields`` we get as input to the model.
num_highway_layers : `int`
The number of highway layers to use in between embedding the input and passing it through
the phrase layer.
phrase_layer : `Seq2SeqEncoder`
The encoder (with its own internal stacking) that we will use in between embedding tokens
and doing the bidirectional attention.
matrix_attention : `MatrixAttention`
The attention function that we will use when comparing encoded passage and question
representations.
modeling_layer : `Seq2SeqEncoder`
The encoder (with its own internal stacking) that we will use in between the bidirectional
attention and predicting span start and end.
span_end_encoder : `Seq2SeqEncoder`
The encoder that we will use to incorporate span start predictions into the passage state
before predicting span end.
dropout : `float`, optional (default=`0.2`)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
mask_lstms : `bool`, optional (default=`True`)
If ``False``, we will skip passing the mask to the LSTM layers. This gives a ~2x speedup,
with only a slight performance decrease, if any. We haven't experimented much with this
yet, but have confirmed that we still get very similar performance with much faster
training times. We still use the mask for all softmaxes, but avoid the shuffling that's
required when using masking with pytorch LSTMs.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
regularizer : `RegularizerApplicator`, optional (default=`None`)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
matrix_attention: MatrixAttention,
modeling_layer: Seq2SeqEncoder,
span_end_encoder: Seq2SeqEncoder,
dropout: float = 0.2,
mask_lstms: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super().__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._highway_layer = TimeDistributed(
Highway(text_field_embedder.get_output_dim(), num_highway_layers)
)
self._phrase_layer = phrase_layer
self._matrix_attention = matrix_attention
self._modeling_layer = modeling_layer
self._span_end_encoder = span_end_encoder
encoding_dim = phrase_layer.get_output_dim()
modeling_dim = modeling_layer.get_output_dim()
span_start_input_dim = encoding_dim * 4 + modeling_dim
self._span_start_predictor = TimeDistributed(torch.nn.Linear(span_start_input_dim, 1))
span_end_encoding_dim = span_end_encoder.get_output_dim()
span_end_input_dim = encoding_dim * 4 + span_end_encoding_dim
self._span_end_predictor = TimeDistributed(torch.nn.Linear(span_end_input_dim, 1))
# Bidaf has lots of layer dimensions which need to match up - these aren't necessarily
# obvious from the configuration files, so we check here.
check_dimensions_match(
modeling_layer.get_input_dim(),
4 * encoding_dim,
"modeling layer input dim",
"4 * encoding dim",
)
check_dimensions_match(
text_field_embedder.get_output_dim(),
phrase_layer.get_input_dim(),
"text field embedder output dim",
"phrase layer input dim",
)
check_dimensions_match(
span_end_encoder.get_input_dim(),
4 * encoding_dim + 3 * modeling_dim,
"span end encoder input dim",
"4 * encoding dim + 3 * modeling dim",
)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._mask_lstms = mask_lstms
initializer(self)
def forward( # type: ignore
self,
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
question : `Dict[str, torch.LongTensor]`
From a ``TextField``.
passage : `Dict[str, torch.LongTensor]`
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : `torch.IntTensor`, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
span_end : `torch.IntTensor`, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
metadata : `List[Dict[str, Any]]`, optional
metadata : `List[Dict[str, Any]]`, optional
If present, this should contain the question tokens, passage tokens, original passage
text, and token offsets into the passage for each instance in the batch. The length
of this list should be the batch size, and each dictionary should have the keys
``question_tokens``, ``passage_tokens``, ``original_passage``, and ``token_offsets``.
Returns
-------
An output dictionary consisting of:
span_start_logits : `torch.FloatTensor`
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span start position.
span_start_probs : `torch.FloatTensor`
The result of ``softmax(span_start_logits)``.
span_end_logits : `torch.FloatTensor`
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span end position (inclusive).
span_end_probs : `torch.FloatTensor`
The result of ``softmax(span_end_logits)``.
best_span : `torch.IntTensor`
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``
and each offset is a token index.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
best_span_str : `List[str]`
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
embedded_question = self._highway_layer(self._text_field_embedder(question))
embedded_passage = self._highway_layer(self._text_field_embedder(passage))
batch_size = embedded_question.size(0)
passage_length = embedded_passage.size(1)
question_mask = util.get_text_field_mask(question)
passage_mask = util.get_text_field_mask(passage)
question_lstm_mask = question_mask if self._mask_lstms else None
passage_lstm_mask = passage_mask if self._mask_lstms else None
encoded_question = self._dropout(self._phrase_layer(embedded_question, question_lstm_mask))
encoded_passage = self._dropout(self._phrase_layer(embedded_passage, passage_lstm_mask))
encoding_dim = encoded_question.size(-1)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = util.masked_softmax(passage_question_similarity, question_mask)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# We replace masked values with something really negative here, so they don't affect the
# max below.
masked_similarity = replace_masked_values_with_big_negative_number(
passage_question_similarity, question_mask.unsqueeze(1)
)
# Shape: (batch_size, passage_length)
question_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)
# Shape: (batch_size, passage_length)
question_passage_attention = util.masked_softmax(question_passage_similarity, passage_mask)
# Shape: (batch_size, encoding_dim)
question_passage_vector = util.weighted_sum(encoded_passage, question_passage_attention)
# Shape: (batch_size, passage_length, encoding_dim)
tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(
batch_size, passage_length, encoding_dim
)
# Shape: (batch_size, passage_length, encoding_dim * 4)
final_merged_passage = torch.cat(
[
encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * tiled_question_passage_vector,
],
dim=-1,
)
modeled_passage = self._dropout(
self._modeling_layer(final_merged_passage, passage_lstm_mask)
)
modeling_dim = modeled_passage.size(-1)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim))
span_start_input = self._dropout(torch.cat([final_merged_passage, modeled_passage], dim=-1))
# Shape: (batch_size, passage_length)
span_start_logits = self._span_start_predictor(span_start_input).squeeze(-1)
# Shape: (batch_size, passage_length)
span_start_probs = util.masked_softmax(span_start_logits, passage_mask)
# Shape: (batch_size, modeling_dim)
span_start_representation = util.weighted_sum(modeled_passage, span_start_probs)
# Shape: (batch_size, passage_length, modeling_dim)
tiled_start_representation = span_start_representation.unsqueeze(1).expand(
batch_size, passage_length, modeling_dim
)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim * 3)
span_end_representation = torch.cat(
[
final_merged_passage,
modeled_passage,
tiled_start_representation,
modeled_passage * tiled_start_representation,
],
dim=-1,
)
# Shape: (batch_size, passage_length, encoding_dim)
encoded_span_end = self._dropout(
self._span_end_encoder(span_end_representation, passage_lstm_mask)
)
# Shape: (batch_size, passage_length, encoding_dim * 4 + span_end_encoding_dim)
span_end_input = self._dropout(torch.cat([final_merged_passage, encoded_span_end], dim=-1))
span_end_logits = self._span_end_predictor(span_end_input).squeeze(-1)
span_end_probs = util.masked_softmax(span_end_logits, passage_mask)
# Replace the masked values with a very negative constant.
span_start_logits = replace_masked_values_with_big_negative_number(
span_start_logits, passage_mask
)
span_end_logits = replace_masked_values_with_big_negative_number(
span_end_logits, passage_mask
)
best_span = get_best_span(span_start_logits, span_end_logits)
output_dict = {
"passage_question_attention": passage_question_attention,
"span_start_logits": span_start_logits,
"span_start_probs": span_start_probs,
"span_end_logits": span_end_logits,
"span_end_probs": span_end_probs,
"best_span": best_span,
}
# Compute the loss for training.
if span_start is not None:
loss = nll_loss(
util.masked_log_softmax(span_start_logits, passage_mask), span_start.squeeze(-1)
)
self._span_start_accuracy(span_start_logits, span_start.squeeze(-1))
loss += nll_loss(
util.masked_log_softmax(span_end_logits, passage_mask), span_end.squeeze(-1)
)
self._span_end_accuracy(span_end_logits, span_end.squeeze(-1))
self._span_accuracy(best_span, torch.cat([span_start, span_end], -1))
output_dict["loss"] = loss
# Compute the EM and F1 on SQuAD and add the tokenized input to the output.
if metadata is not None:
output_dict["best_span_str"] = []
question_tokens = []
passage_tokens = []
token_offsets = []
for i in range(batch_size):
question_tokens.append(metadata[i]["question_tokens"])
passage_tokens.append(metadata[i]["passage_tokens"])
token_offsets.append(metadata[i]["token_offsets"])
passage_str = metadata[i]["original_passage"]
offsets = metadata[i]["token_offsets"]
predicted_span = tuple(best_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output_dict["best_span_str"].append(best_span_string)
answer_texts = metadata[i].get("answer_texts", [])
if answer_texts:
self._squad_metrics(best_span_string, answer_texts)
output_dict["question_tokens"] = question_tokens
output_dict["passage_tokens"] = passage_tokens
output_dict["token_offsets"] = token_offsets
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._squad_metrics.get_metric(reset)
return {
"start_acc": self._span_start_accuracy.get_metric(reset),
"end_acc": self._span_end_accuracy.get_metric(reset),
"span_acc": self._span_accuracy.get_metric(reset),
"em": exact_match,
"f1": f1_score,
}
@staticmethod
def get_best_span(
span_start_logits: torch.Tensor, span_end_logits: torch.Tensor
) -> torch.Tensor:
# We call the inputs "logits" - they could either be unnormalized logits or normalized log
# probabilities. A log_softmax operation is a constant shifting of the entire logit
# vector, so taking an argmax over either one gives the same result.
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
device = span_start_logits.device
# (batch_size, passage_length, passage_length)
span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)
# Only the upper triangle of the span matrix is valid; the lower triangle has entries where
# the span ends before it starts.
span_log_mask = (
torch.triu(torch.ones((passage_length, passage_length), device=device))
.log()
.unsqueeze(0)
)
valid_span_log_probs = span_log_probs + span_log_mask
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1)
span_start_indices = best_spans // passage_length
span_end_indices = best_spans % passage_length
return torch.stack([span_start_indices, span_end_indices], dim=-1)
default_predictor = "reading_comprehension"
| allennlp-models-main | allennlp_models/rc/models/bidaf.py |
from typing import Dict, List, Any, Optional
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.models.archival import load_archive
from allennlp.models.model import Model, remove_weights_related_keys_from_params
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp_models.rc.models.bidaf import BidirectionalAttentionFlow
from allennlp_models.rc.models.utils import get_best_span
from allennlp_models.rc.metrics import SquadEmAndF1
@Model.register("bidaf-ensemble")
class BidafEnsemble(Model):
"""
This class ensembles the output from multiple BiDAF models.
It combines results from the submodels by averaging the start and end span probabilities.
"""
def __init__(self, submodels: List[BidirectionalAttentionFlow]) -> None:
vocab = submodels[0].vocab
for submodel in submodels:
if submodel.vocab != vocab:
raise ConfigurationError("Vocabularies in ensemble differ")
super().__init__(vocab, None)
# Using ModuleList propagates calls to .eval() so dropout is disabled on the submodels in evaluation
# and prediction.
self.submodels = torch.nn.ModuleList(submodels)
self._squad_metrics = SquadEmAndF1()
def forward(
self, # type: ignore
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
The forward method runs each of the submodels, then selects the best span from the subresults.
The best span is determined by averaging the probabilities for the start and end of the spans.
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question ID, original passage text, and token
offsets into the passage for each instance in the batch. We use this for computing
official metrics using the official SQuAD evaluation script. The length of this list
should be the batch size, and each dictionary should have the keys ``id``,
``original_passage``, and ``token_offsets``. If you only want the best span string and
don't care about official metrics, you can omit the ``id`` key.
Returns
-------
An output dictionary consisting of:
best_span : torch.IntTensor
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``
and each offset is a token index.
best_span_str : List[str]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
subresults = [
submodel(question, passage, span_start, span_end, metadata)
for submodel in self.submodels
]
batch_size = len(subresults[0]["best_span"])
best_span = ensemble(subresults)
output = {"best_span": best_span, "best_span_str": []}
for index in range(batch_size):
if metadata is not None:
passage_str = metadata[index]["original_passage"]
offsets = metadata[index]["token_offsets"]
predicted_span = tuple(best_span[index].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output["best_span_str"].append(best_span_string)
answer_texts = metadata[index].get("answer_texts", [])
if answer_texts:
self._squad_metrics(best_span_string, answer_texts)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._squad_metrics.get_metric(reset)
return {"em": exact_match, "f1": f1_score}
# The logic here requires a custom from_params.
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> "BidafEnsemble": # type: ignore
if vocab:
raise ConfigurationError("vocab should be None")
submodels = []
paths = params.pop("submodels")
for path in paths:
submodels.append(load_archive(path).model)
return cls(submodels=submodels)
@classmethod
def _load(
cls,
config: Params,
serialization_dir: str,
weights_file: Optional[str] = None,
cuda_device: int = -1,
opt_level: Optional[str] = None,
) -> Model:
"""
Ensembles don't have vocabularies or weights of their own, so they override _load.
"""
if opt_level is not None:
raise NotImplementedError(f"{cls.__name__} does not support AMP yet.")
model_params = config.get("model")
# The experiment config tells us how to _train_ a model, including where to get pre-trained
# embeddings from. We're now _loading_ the model, so those embeddings will already be
# stored in our weights. We don't need any pretrained weight file anymore, and we don't
# want the code to look for it, so we remove it from the parameters here.
remove_weights_related_keys_from_params(model_params)
model = Model.from_params(vocab=None, params=model_params)
# Force model to cpu or gpu, as appropriate, to make sure that the embeddings are
# in sync with the weights
if cuda_device >= 0:
model.cuda(cuda_device)
else:
model.cpu()
return model
default_predictor = "reading_comprehension"
def ensemble(subresults: List[Dict[str, torch.Tensor]]) -> torch.Tensor:
"""
Identifies the best prediction given the results from the submodels.
Parameters
----------
subresults : List[Dict[str, torch.Tensor]]
Results of each submodel.
Returns
-------
The index of the best submodel.
"""
# Choose the highest average confidence span.
span_start_probs = sum(subresult["span_start_probs"] for subresult in subresults) / len(
subresults
)
span_end_probs = sum(subresult["span_end_probs"] for subresult in subresults) / len(subresults)
return get_best_span(span_start_probs.log(), span_end_probs.log()) # type: ignore
| allennlp-models-main | allennlp_models/rc/models/bidaf_ensemble.py |
import torch
from allennlp.nn.util import replace_masked_values, min_value_of_dtype
def get_best_span(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor) -> torch.Tensor:
"""
This acts the same as the static method ``BidirectionalAttentionFlow.get_best_span()``
in ``allennlp/models/reading_comprehension/bidaf.py``. We keep it here so that users can
directly import this function without the class.
We call the inputs "logits" - they could either be unnormalized logits or normalized log
probabilities. A log_softmax operation is a constant shifting of the entire logit
vector, so taking an argmax over either one gives the same result.
"""
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
device = span_start_logits.device
# (batch_size, passage_length, passage_length)
span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)
# Only the upper triangle of the span matrix is valid; the lower triangle has entries where
# the span ends before it starts.
span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log()
valid_span_log_probs = span_log_probs + span_log_mask
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1)
span_start_indices = torch.div(best_spans, passage_length, rounding_mode="trunc")
span_end_indices = best_spans % passage_length
return torch.stack([span_start_indices, span_end_indices], dim=-1)
def replace_masked_values_with_big_negative_number(x: torch.Tensor, mask: torch.Tensor):
"""
Replace the masked values in a tensor something really negative so that they won't
affect a max operation.
"""
return replace_masked_values(x, mask, min_value_of_dtype(x.dtype))
| allennlp-models-main | allennlp_models/rc/models/utils.py |
from typing import Any, Dict, List, Optional
import logging
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway
from allennlp.nn.activations import Activation
from allennlp.modules.feedforward import FeedForward
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import masked_softmax
from allennlp_models.rc.models.utils import (
get_best_span,
replace_masked_values_with_big_negative_number,
)
from allennlp_models.rc.metrics.drop_em_and_f1 import DropEmAndF1
logger = logging.getLogger(__name__)
@Model.register("naqanet")
class NumericallyAugmentedQaNet(Model):
"""
This class augments the QANet model with some rudimentary numerical reasoning abilities, as
published in the original DROP paper.
The main idea here is that instead of just predicting a passage span after doing all of the
QANet modeling stuff, we add several different "answer abilities": predicting a span from the
question, predicting a count, or predicting an arithmetic expression. Near the end of the
QANet model, we have a variable that predicts what kind of answer type we need, and each branch
has separate modeling logic to predict that answer type. We then marginalize over all possible
ways of getting to the right answer through each of these answer types.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
matrix_attention_layer: MatrixAttention,
modeling_layer: Seq2SeqEncoder,
dropout_prob: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
answering_abilities: List[str] = None,
) -> None:
super().__init__(vocab, regularizer)
if answering_abilities is None:
self.answering_abilities = [
"passage_span_extraction",
"question_span_extraction",
"addition_subtraction",
"counting",
]
else:
self.answering_abilities = answering_abilities
text_embed_dim = text_field_embedder.get_output_dim()
encoding_in_dim = phrase_layer.get_input_dim()
encoding_out_dim = phrase_layer.get_output_dim()
modeling_in_dim = modeling_layer.get_input_dim()
modeling_out_dim = modeling_layer.get_output_dim()
self._text_field_embedder = text_field_embedder
self._embedding_proj_layer = torch.nn.Linear(text_embed_dim, encoding_in_dim)
self._highway_layer = Highway(encoding_in_dim, num_highway_layers)
self._encoding_proj_layer = torch.nn.Linear(encoding_in_dim, encoding_in_dim, bias=False)
self._phrase_layer = phrase_layer
self._matrix_attention = matrix_attention_layer
self._modeling_proj_layer = torch.nn.Linear(
encoding_out_dim * 4, modeling_in_dim, bias=False
)
self._modeling_layer = modeling_layer
self._passage_weights_predictor = torch.nn.Linear(modeling_out_dim, 1)
self._question_weights_predictor = torch.nn.Linear(encoding_out_dim, 1)
if len(self.answering_abilities) > 1:
self._answer_ability_predictor = FeedForward(
modeling_out_dim + encoding_out_dim,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, len(self.answering_abilities)],
num_layers=2,
dropout=dropout_prob,
)
if "passage_span_extraction" in self.answering_abilities:
self._passage_span_extraction_index = self.answering_abilities.index(
"passage_span_extraction"
)
self._passage_span_start_predictor = FeedForward(
modeling_out_dim * 2,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2,
)
self._passage_span_end_predictor = FeedForward(
modeling_out_dim * 2,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2,
)
if "question_span_extraction" in self.answering_abilities:
self._question_span_extraction_index = self.answering_abilities.index(
"question_span_extraction"
)
self._question_span_start_predictor = FeedForward(
modeling_out_dim * 2,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2,
)
self._question_span_end_predictor = FeedForward(
modeling_out_dim * 2,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2,
)
if "addition_subtraction" in self.answering_abilities:
self._addition_subtraction_index = self.answering_abilities.index(
"addition_subtraction"
)
self._number_sign_predictor = FeedForward(
modeling_out_dim * 3,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 3],
num_layers=2,
)
if "counting" in self.answering_abilities:
self._counting_index = self.answering_abilities.index("counting")
self._count_number_predictor = FeedForward(
modeling_out_dim,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 10],
num_layers=2,
)
self._drop_metrics = DropEmAndF1()
self._dropout = torch.nn.Dropout(p=dropout_prob)
initializer(self)
def forward( # type: ignore
self,
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
number_indices: torch.LongTensor,
answer_as_passage_spans: torch.LongTensor = None,
answer_as_question_spans: torch.LongTensor = None,
answer_as_add_sub_expressions: torch.LongTensor = None,
answer_as_counts: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
question_mask = util.get_text_field_mask(question)
passage_mask = util.get_text_field_mask(passage)
embedded_question = self._dropout(self._text_field_embedder(question))
embedded_passage = self._dropout(self._text_field_embedder(passage))
embedded_question = self._highway_layer(self._embedding_proj_layer(embedded_question))
embedded_passage = self._highway_layer(self._embedding_proj_layer(embedded_passage))
batch_size = embedded_question.size(0)
projected_embedded_question = self._encoding_proj_layer(embedded_question)
projected_embedded_passage = self._encoding_proj_layer(embedded_passage)
encoded_question = self._dropout(
self._phrase_layer(projected_embedded_question, question_mask)
)
encoded_passage = self._dropout(
self._phrase_layer(projected_embedded_passage, passage_mask)
)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = masked_softmax(
passage_question_similarity, question_mask, memory_efficient=True
)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# Shape: (batch_size, question_length, passage_length)
question_passage_attention = masked_softmax(
passage_question_similarity.transpose(1, 2), passage_mask, memory_efficient=True
)
# Shape: (batch_size, passage_length, passage_length)
passsage_attention_over_attention = torch.bmm(
passage_question_attention, question_passage_attention
)
# Shape: (batch_size, passage_length, encoding_dim)
passage_passage_vectors = util.weighted_sum(
encoded_passage, passsage_attention_over_attention
)
# Shape: (batch_size, passage_length, encoding_dim * 4)
merged_passage_attention_vectors = self._dropout(
torch.cat(
[
encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * passage_passage_vectors,
],
dim=-1,
)
)
# The recurrent modeling layers. Since these layers share the same parameters,
# we don't construct them conditioned on answering abilities.
modeled_passage_list = [self._modeling_proj_layer(merged_passage_attention_vectors)]
for _ in range(4):
modeled_passage = self._dropout(
self._modeling_layer(modeled_passage_list[-1], passage_mask)
)
modeled_passage_list.append(modeled_passage)
# Pop the first one, which is input
modeled_passage_list.pop(0)
# The first modeling layer is used to calculate the vector representation of passage
passage_weights = self._passage_weights_predictor(modeled_passage_list[0]).squeeze(-1)
passage_weights = masked_softmax(passage_weights, passage_mask)
passage_vector = util.weighted_sum(modeled_passage_list[0], passage_weights)
# The vector representation of question is calculated based on the unmatched encoding,
# because we may want to infer the answer ability only based on the question words.
question_weights = self._question_weights_predictor(encoded_question).squeeze(-1)
question_weights = masked_softmax(question_weights, question_mask)
question_vector = util.weighted_sum(encoded_question, question_weights)
if len(self.answering_abilities) > 1:
# Shape: (batch_size, number_of_abilities)
answer_ability_logits = self._answer_ability_predictor(
torch.cat([passage_vector, question_vector], -1)
)
answer_ability_log_probs = torch.nn.functional.log_softmax(answer_ability_logits, -1)
best_answer_ability = torch.argmax(answer_ability_log_probs, 1)
if "counting" in self.answering_abilities:
# Shape: (batch_size, 10)
count_number_logits = self._count_number_predictor(passage_vector)
count_number_log_probs = torch.nn.functional.log_softmax(count_number_logits, -1)
# Info about the best count number prediction
# Shape: (batch_size,)
best_count_number = torch.argmax(count_number_log_probs, -1)
best_count_log_prob = torch.gather(
count_number_log_probs, 1, best_count_number.unsqueeze(-1)
).squeeze(-1)
if len(self.answering_abilities) > 1:
best_count_log_prob += answer_ability_log_probs[:, self._counting_index]
if "passage_span_extraction" in self.answering_abilities:
# Shape: (batch_size, passage_length, modeling_dim * 2))
passage_for_span_start = torch.cat(
[modeled_passage_list[0], modeled_passage_list[1]], dim=-1
)
# Shape: (batch_size, passage_length)
passage_span_start_logits = self._passage_span_start_predictor(
passage_for_span_start
).squeeze(-1)
# Shape: (batch_size, passage_length, modeling_dim * 2)
passage_for_span_end = torch.cat(
[modeled_passage_list[0], modeled_passage_list[2]], dim=-1
)
# Shape: (batch_size, passage_length)
passage_span_end_logits = self._passage_span_end_predictor(
passage_for_span_end
).squeeze(-1)
# Shape: (batch_size, passage_length)
passage_span_start_log_probs = util.masked_log_softmax(
passage_span_start_logits, passage_mask
)
passage_span_end_log_probs = util.masked_log_softmax(
passage_span_end_logits, passage_mask
)
# Info about the best passage span prediction
passage_span_start_logits = replace_masked_values_with_big_negative_number(
passage_span_start_logits, passage_mask
)
passage_span_end_logits = replace_masked_values_with_big_negative_number(
passage_span_end_logits, passage_mask
)
# Shape: (batch_size, 2)
best_passage_span = get_best_span(passage_span_start_logits, passage_span_end_logits)
# Shape: (batch_size, 2)
best_passage_start_log_probs = torch.gather(
passage_span_start_log_probs, 1, best_passage_span[:, 0].unsqueeze(-1)
).squeeze(-1)
best_passage_end_log_probs = torch.gather(
passage_span_end_log_probs, 1, best_passage_span[:, 1].unsqueeze(-1)
).squeeze(-1)
# Shape: (batch_size,)
best_passage_span_log_prob = best_passage_start_log_probs + best_passage_end_log_probs
if len(self.answering_abilities) > 1:
best_passage_span_log_prob += answer_ability_log_probs[
:, self._passage_span_extraction_index
]
if "question_span_extraction" in self.answering_abilities:
# Shape: (batch_size, question_length)
encoded_question_for_span_prediction = torch.cat(
[
encoded_question,
passage_vector.unsqueeze(1).repeat(1, encoded_question.size(1), 1),
],
-1,
)
question_span_start_logits = self._question_span_start_predictor(
encoded_question_for_span_prediction
).squeeze(-1)
# Shape: (batch_size, question_length)
question_span_end_logits = self._question_span_end_predictor(
encoded_question_for_span_prediction
).squeeze(-1)
question_span_start_log_probs = util.masked_log_softmax(
question_span_start_logits, question_mask
)
question_span_end_log_probs = util.masked_log_softmax(
question_span_end_logits, question_mask
)
# Info about the best question span prediction
question_span_start_logits = replace_masked_values_with_big_negative_number(
question_span_start_logits, question_mask
)
question_span_end_logits = replace_masked_values_with_big_negative_number(
question_span_end_logits, question_mask
)
# Shape: (batch_size, 2)
best_question_span = get_best_span(question_span_start_logits, question_span_end_logits)
# Shape: (batch_size, 2)
best_question_start_log_probs = torch.gather(
question_span_start_log_probs, 1, best_question_span[:, 0].unsqueeze(-1)
).squeeze(-1)
best_question_end_log_probs = torch.gather(
question_span_end_log_probs, 1, best_question_span[:, 1].unsqueeze(-1)
).squeeze(-1)
# Shape: (batch_size,)
best_question_span_log_prob = (
best_question_start_log_probs + best_question_end_log_probs
)
if len(self.answering_abilities) > 1:
best_question_span_log_prob += answer_ability_log_probs[
:, self._question_span_extraction_index
]
if "addition_subtraction" in self.answering_abilities:
# Shape: (batch_size, # of numbers in the passage)
number_indices = number_indices.squeeze(-1)
number_mask = number_indices != -1
clamped_number_indices = util.replace_masked_values(number_indices, number_mask, 0)
encoded_passage_for_numbers = torch.cat(
[modeled_passage_list[0], modeled_passage_list[3]], dim=-1
)
# Shape: (batch_size, # of numbers in the passage, encoding_dim)
encoded_numbers = torch.gather(
encoded_passage_for_numbers,
1,
clamped_number_indices.unsqueeze(-1).expand(
-1, -1, encoded_passage_for_numbers.size(-1)
),
)
# Shape: (batch_size, # of numbers in the passage)
encoded_numbers = torch.cat(
[
encoded_numbers,
passage_vector.unsqueeze(1).repeat(1, encoded_numbers.size(1), 1),
],
-1,
)
# Shape: (batch_size, # of numbers in the passage, 3)
number_sign_logits = self._number_sign_predictor(encoded_numbers)
number_sign_log_probs = torch.nn.functional.log_softmax(number_sign_logits, -1)
# Shape: (batch_size, # of numbers in passage).
best_signs_for_numbers = torch.argmax(number_sign_log_probs, -1)
# For padding numbers, the best sign masked as 0 (not included).
best_signs_for_numbers = util.replace_masked_values(
best_signs_for_numbers, number_mask, 0
)
# Shape: (batch_size, # of numbers in passage)
best_signs_log_probs = torch.gather(
number_sign_log_probs, 2, best_signs_for_numbers.unsqueeze(-1)
).squeeze(-1)
# the probs of the masked positions should be 1 so that it will not affect the joint probability
# TODO: this is not quite right, since if there are many numbers in the passage,
# TODO: the joint probability would be very small.
best_signs_log_probs = util.replace_masked_values(best_signs_log_probs, number_mask, 0)
# Shape: (batch_size,)
best_combination_log_prob = best_signs_log_probs.sum(-1)
if len(self.answering_abilities) > 1:
best_combination_log_prob += answer_ability_log_probs[
:, self._addition_subtraction_index
]
output_dict = {}
# If answer is given, compute the loss.
if (
answer_as_passage_spans is not None
or answer_as_question_spans is not None
or answer_as_add_sub_expressions is not None
or answer_as_counts is not None
):
log_marginal_likelihood_list = []
for answering_ability in self.answering_abilities:
if answering_ability == "passage_span_extraction":
# Shape: (batch_size, # of answer spans)
gold_passage_span_starts = answer_as_passage_spans[:, :, 0]
gold_passage_span_ends = answer_as_passage_spans[:, :, 1]
# Some spans are padded with index -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
gold_passage_span_mask = gold_passage_span_starts != -1
clamped_gold_passage_span_starts = util.replace_masked_values(
gold_passage_span_starts, gold_passage_span_mask, 0
)
clamped_gold_passage_span_ends = util.replace_masked_values(
gold_passage_span_ends, gold_passage_span_mask, 0
)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_span_starts = torch.gather(
passage_span_start_log_probs, 1, clamped_gold_passage_span_starts
)
log_likelihood_for_passage_span_ends = torch.gather(
passage_span_end_log_probs, 1, clamped_gold_passage_span_ends
)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_spans = (
log_likelihood_for_passage_span_starts
+ log_likelihood_for_passage_span_ends
)
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_passage_spans = (
replace_masked_values_with_big_negative_number(
log_likelihood_for_passage_spans,
gold_passage_span_mask,
)
)
# Shape: (batch_size, )
log_marginal_likelihood_for_passage_span = util.logsumexp(
log_likelihood_for_passage_spans
)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_passage_span)
elif answering_ability == "question_span_extraction":
# Shape: (batch_size, # of answer spans)
gold_question_span_starts = answer_as_question_spans[:, :, 0]
gold_question_span_ends = answer_as_question_spans[:, :, 1]
# Some spans are padded with index -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
gold_question_span_mask = gold_question_span_starts != -1
clamped_gold_question_span_starts = util.replace_masked_values(
gold_question_span_starts, gold_question_span_mask, 0
)
clamped_gold_question_span_ends = util.replace_masked_values(
gold_question_span_ends, gold_question_span_mask, 0
)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_question_span_starts = torch.gather(
question_span_start_log_probs, 1, clamped_gold_question_span_starts
)
log_likelihood_for_question_span_ends = torch.gather(
question_span_end_log_probs, 1, clamped_gold_question_span_ends
)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_question_spans = (
log_likelihood_for_question_span_starts
+ log_likelihood_for_question_span_ends
)
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_question_spans = (
replace_masked_values_with_big_negative_number(
log_likelihood_for_question_spans,
gold_question_span_mask,
)
)
# Shape: (batch_size, )
log_marginal_likelihood_for_question_span = util.logsumexp(
log_likelihood_for_question_spans
)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_question_span)
elif answering_ability == "addition_subtraction":
# The padded add-sub combinations use 0 as the signs for all numbers, and we mask them here.
# Shape: (batch_size, # of combinations)
gold_add_sub_mask = answer_as_add_sub_expressions.sum(-1) > 0
# Shape: (batch_size, # of numbers in the passage, # of combinations)
gold_add_sub_signs = answer_as_add_sub_expressions.transpose(1, 2)
# Shape: (batch_size, # of numbers in the passage, # of combinations)
log_likelihood_for_number_signs = torch.gather(
number_sign_log_probs, 2, gold_add_sub_signs
)
# the log likelihood of the masked positions should be 0
# so that it will not affect the joint probability
log_likelihood_for_number_signs = util.replace_masked_values(
log_likelihood_for_number_signs, number_mask.unsqueeze(-1), 0
)
# Shape: (batch_size, # of combinations)
log_likelihood_for_add_subs = log_likelihood_for_number_signs.sum(1)
# For those padded combinations, we set their log probabilities to be very small negative value
log_likelihood_for_add_subs = replace_masked_values_with_big_negative_number(
log_likelihood_for_add_subs, gold_add_sub_mask
)
# Shape: (batch_size, )
log_marginal_likelihood_for_add_sub = util.logsumexp(
log_likelihood_for_add_subs
)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_add_sub)
elif answering_ability == "counting":
# Count answers are padded with label -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
# Shape: (batch_size, # of count answers)
gold_count_mask = answer_as_counts != -1
# Shape: (batch_size, # of count answers)
clamped_gold_counts = util.replace_masked_values(
answer_as_counts, gold_count_mask, 0
)
log_likelihood_for_counts = torch.gather(
count_number_log_probs, 1, clamped_gold_counts
)
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_counts = replace_masked_values_with_big_negative_number(
log_likelihood_for_counts, gold_count_mask
)
# Shape: (batch_size, )
log_marginal_likelihood_for_count = util.logsumexp(log_likelihood_for_counts)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_count)
else:
raise ValueError(f"Unsupported answering ability: {answering_ability}")
if len(self.answering_abilities) > 1:
# Add the ability probabilities if there are more than one abilities
all_log_marginal_likelihoods = torch.stack(log_marginal_likelihood_list, dim=-1)
all_log_marginal_likelihoods = (
all_log_marginal_likelihoods + answer_ability_log_probs
)
marginal_log_likelihood = util.logsumexp(all_log_marginal_likelihoods)
else:
marginal_log_likelihood = log_marginal_likelihood_list[0]
output_dict["loss"] = -marginal_log_likelihood.mean()
# Compute the metrics and add the tokenized input to the output.
if metadata is not None:
output_dict["question_id"] = []
output_dict["answer"] = []
question_tokens = []
passage_tokens = []
for i in range(batch_size):
question_tokens.append(metadata[i]["question_tokens"])
passage_tokens.append(metadata[i]["passage_tokens"])
if len(self.answering_abilities) > 1:
predicted_ability_str = self.answering_abilities[
best_answer_ability[i].detach().cpu().numpy()
]
else:
predicted_ability_str = self.answering_abilities[0]
answer_json: Dict[str, Any] = {}
# We did not consider multi-mention answers here
if predicted_ability_str == "passage_span_extraction":
answer_json["answer_type"] = "passage_span"
passage_str = metadata[i]["original_passage"]
offsets = metadata[i]["passage_token_offsets"]
predicted_span = tuple(best_passage_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
predicted_answer = passage_str[start_offset:end_offset]
answer_json["value"] = predicted_answer
answer_json["spans"] = [(start_offset, end_offset)]
elif predicted_ability_str == "question_span_extraction":
answer_json["answer_type"] = "question_span"
question_str = metadata[i]["original_question"]
offsets = metadata[i]["question_token_offsets"]
predicted_span = tuple(best_question_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
predicted_answer = question_str[start_offset:end_offset]
answer_json["value"] = predicted_answer
answer_json["spans"] = [(start_offset, end_offset)]
elif (
predicted_ability_str == "addition_subtraction"
): # plus_minus combination answer
answer_json["answer_type"] = "arithmetic"
original_numbers = metadata[i]["original_numbers"]
sign_remap = {0: 0, 1: 1, 2: -1}
predicted_signs = [
sign_remap[it] for it in best_signs_for_numbers[i].detach().cpu().numpy()
]
result = sum(
[sign * number for sign, number in zip(predicted_signs, original_numbers)]
)
predicted_answer = str(result)
offsets = metadata[i]["passage_token_offsets"]
number_indices = metadata[i]["number_indices"]
number_positions = [offsets[index] for index in number_indices]
answer_json["numbers"] = []
for offset, value, sign in zip(
number_positions, original_numbers, predicted_signs
):
answer_json["numbers"].append(
{"span": offset, "value": value, "sign": sign}
)
if number_indices[-1] == -1:
# There is a dummy 0 number at position -1 added in some cases; we are
# removing that here.
answer_json["numbers"].pop()
answer_json["value"] = result
elif predicted_ability_str == "counting":
answer_json["answer_type"] = "count"
predicted_count = best_count_number[i].detach().cpu().numpy()
predicted_answer = str(predicted_count)
answer_json["count"] = predicted_count
else:
raise ValueError(f"Unsupported answer ability: {predicted_ability_str}")
output_dict["question_id"].append(metadata[i]["question_id"])
output_dict["answer"].append(answer_json)
answer_annotations = metadata[i].get("answer_annotations", [])
if answer_annotations:
self._drop_metrics(predicted_answer, answer_annotations)
# This is used for the demo.
output_dict["passage_question_attention"] = passage_question_attention
output_dict["question_tokens"] = question_tokens
output_dict["passage_tokens"] = passage_tokens
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._drop_metrics.get_metric(reset)
return {"em": exact_match, "f1": f1_score}
default_predictor = "reading_comprehension"
| allennlp-models-main | allennlp_models/rc/models/naqanet.py |
import logging
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.nn.functional import cross_entropy
from torch.nn.functional import softmax
from allennlp.common.util import sanitize_wordpiece
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from allennlp.nn.util import get_token_ids_from_text_field_tensors
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy
from allennlp_models.rc.models.utils import (
get_best_span,
replace_masked_values_with_big_negative_number,
)
from allennlp_models.rc.metrics import SquadEmAndF1
logger = logging.getLogger(__name__)
@Model.register("transformer_qa")
class TransformerQA(Model):
"""
Registered as `"transformer_qa"`, this class implements a reading comprehension model patterned
after the proposed model in [Devlin et al]([email protected]:huggingface/transformers.git),
with improvements borrowed from the SQuAD model in the transformers project.
It predicts start tokens and end tokens with a linear layer on top of word piece embeddings.
If you want to use this model on SQuAD datasets, you can use it with the
[`TransformerSquadReader`](../../dataset_readers/transformer_squad#transformersquadreader)
dataset reader, registered as `"transformer_squad"`.
Note that the metrics that the model produces are calculated on a per-instance basis only. Since there could
be more than one instance per question, these metrics are not the official numbers on either SQuAD task.
To get official numbers for SQuAD v1.1, for example, you can run
```
python -m allennlp_models.rc.tools.transformer_qa_eval
```
# Parameters
vocab : `Vocabulary`
transformer_model_name : `str`, optional (default=`'bert-base-cased'`)
This model chooses the embedder according to this setting. You probably want to make sure this is set to
the same thing as the reader.
"""
def __init__(
self, vocab: Vocabulary, transformer_model_name: str = "bert-base-cased", **kwargs
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = BasicTextFieldEmbedder(
{"tokens": PretrainedTransformerEmbedder(transformer_model_name)}
)
self._linear_layer = nn.Linear(self._text_field_embedder.get_output_dim(), 2)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._per_instance_metrics = SquadEmAndF1()
def forward( # type: ignore
self,
question_with_context: Dict[str, Dict[str, torch.LongTensor]],
context_span: torch.IntTensor,
cls_index: torch.LongTensor = None,
answer_span: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
question_with_context : `Dict[str, torch.LongTensor]`
From a `TextField`. The model assumes that this text field contains the context followed by the
question. It further assumes that the tokens have type ids set such that any token that can be part of
the answer (i.e., tokens from the context) has type id 0, and any other token (including
`[CLS]` and `[SEP]`) has type id 1.
context_span : `torch.IntTensor`
From a `SpanField`. This marks the span of word pieces in `question` from which answers can come.
cls_index : `torch.LongTensor`, optional
A tensor of shape `(batch_size,)` that provides the index of the `[CLS]` token
in the `question_with_context` for each instance.
This is needed because the `[CLS]` token is used to indicate that the question
is impossible.
If this is `None`, it's assumed that the `[CLS]` token is at index 0 for each instance
in the batch.
answer_span : `torch.IntTensor`, optional
From a `SpanField`. This is the thing we are trying to predict - the span of text that marks the
answer. If given, we compute a loss that gets included in the output directory.
metadata : `List[Dict[str, Any]]`, optional
If present, this should contain the question id, and the original texts of context, question, tokenized
version of both, and a list of possible answers. The length of the `metadata` list should be the
batch size, and each dictionary should have the keys `id`, `question`, `context`,
`question_tokens`, `context_tokens`, and `answers`.
# Returns
`Dict[str, torch.Tensor]` :
An output dictionary with the following fields:
- span_start_logits (`torch.FloatTensor`) :
A tensor of shape `(batch_size, passage_length)` representing unnormalized log
probabilities of the span start position.
- span_end_logits (`torch.FloatTensor`) :
A tensor of shape `(batch_size, passage_length)` representing unnormalized log
probabilities of the span end position (inclusive).
- best_span_scores (`torch.FloatTensor`) :
The score for each of the best spans.
- loss (`torch.FloatTensor`, optional) :
A scalar loss to be optimised, evaluated against `answer_span`.
- best_span (`torch.IntTensor`, optional) :
Provided when not in train mode and sufficient metadata given for the instance.
The result of a constrained inference over `span_start_logits` and
`span_end_logits` to find the most probable span. Shape is `(batch_size, 2)`
and each offset is a token index, unless the best span for an instance
was predicted to be the `[CLS]` token, in which case the span will be (-1, -1).
- best_span_str (`List[str]`, optional) :
Provided when not in train mode and sufficient metadata given for the instance.
This is the string from the original passage that the model thinks is the best answer
to the question.
"""
embedded_question = self._text_field_embedder(question_with_context)
# shape: (batch_size, sequence_length, 2)
logits = self._linear_layer(embedded_question)
# shape: (batch_size, sequence_length, 1)
span_start_logits, span_end_logits = logits.split(1, dim=-1)
# shape: (batch_size, sequence_length)
span_start_logits = span_start_logits.squeeze(-1)
# shape: (batch_size, sequence_length)
span_end_logits = span_end_logits.squeeze(-1)
# Create a mask for `question_with_context` to mask out tokens that are not part
# of the context.
# shape: (batch_size, sequence_length)
possible_answer_mask = torch.zeros_like(
get_token_ids_from_text_field_tensors(question_with_context), dtype=torch.bool
)
for i, (start, end) in enumerate(context_span):
possible_answer_mask[i, start : end + 1] = True
# Also unmask the [CLS] token since that token is used to indicate that
# the question is impossible.
possible_answer_mask[i, 0 if cls_index is None else cls_index[i]] = True
# Calculate span start and end probabilities
# shape: (batch_size, sequence_length)
span_start_probs = softmax(span_start_logits, dim=-1)
# shape: (batch_size, sequence_length)
span_end_probs = softmax(span_end_logits, dim=-1)
# Replace the masked values with a very negative constant since we're in log-space.
# shape: (batch_size, sequence_length)
span_start_logits = replace_masked_values_with_big_negative_number(
span_start_logits, possible_answer_mask
)
# shape: (batch_size, sequence_length)
span_end_logits = replace_masked_values_with_big_negative_number(
span_end_logits, possible_answer_mask
)
# Now calculate the best span.
# shape: (batch_size, 2)
best_spans = get_best_span(span_start_logits, span_end_logits)
# Sum the span start score with the span end score to get an overall score for the span.
# shape: (batch_size,)
best_span_scores = torch.gather(
span_start_logits, 1, best_spans[:, 0].unsqueeze(1)
) + torch.gather(span_end_logits, 1, best_spans[:, 1].unsqueeze(1))
best_span_scores = best_span_scores.squeeze(1)
best_span_probs = torch.gather(
span_start_probs, 1, best_spans[:, 0].unsqueeze(1)
) * torch.gather(span_end_probs, 1, best_spans[:, 1].unsqueeze(1))
best_span_probs = best_span_probs.squeeze(1)
output_dict = {
"span_start_logits": span_start_logits,
"span_end_logits": span_end_logits,
"best_span_scores": best_span_scores,
"span_start_probs": span_start_probs,
"span_end_probs": span_end_probs,
"best_span_probs": best_span_probs,
}
# Compute the loss.
if answer_span is not None:
output_dict["loss"] = self._evaluate_span(
best_spans, span_start_logits, span_end_logits, answer_span
)
# Gather the string of the best span and compute the EM and F1 against the gold span,
# if given.
if not self.training and metadata is not None:
(
output_dict["best_span_str"],
output_dict["best_span"],
) = self._collect_best_span_strings(best_spans, context_span, metadata, cls_index)
return output_dict
def _evaluate_span(
self,
best_spans: torch.Tensor,
span_start_logits: torch.Tensor,
span_end_logits: torch.Tensor,
answer_span: torch.Tensor,
) -> torch.Tensor:
"""
Calculate the loss against the `answer_span` and also update the span metrics.
"""
span_start = answer_span[:, 0]
span_end = answer_span[:, 1]
self._span_accuracy(best_spans, answer_span)
start_loss = cross_entropy(span_start_logits, span_start, ignore_index=-1)
big_constant = min(torch.finfo(start_loss.dtype).max, 1e9)
assert not torch.any(start_loss > big_constant), "Start loss too high"
end_loss = cross_entropy(span_end_logits, span_end, ignore_index=-1)
assert not torch.any(end_loss > big_constant), "End loss too high"
self._span_start_accuracy(span_start_logits, span_start)
self._span_end_accuracy(span_end_logits, span_end)
return (start_loss + end_loss) / 2
def _collect_best_span_strings(
self,
best_spans: torch.Tensor,
context_span: torch.IntTensor,
metadata: List[Dict[str, Any]],
cls_index: Optional[torch.LongTensor],
) -> Tuple[List[str], torch.Tensor]:
"""
Collect the string of the best predicted span from the context metadata and
update `self._per_instance_metrics`, which in the case of SQuAD v1.1 / v2.0
includes the EM and F1 score.
This returns a `Tuple[List[str], torch.Tensor]`, where the `List[str]` is the
predicted answer for each instance in the batch, and the tensor is just the input
tensor `best_spans` after adjustments so that each answer span corresponds to the
context tokens only, and not the question tokens. Spans that correspond to the
`[CLS]` token, i.e. the question was predicted to be impossible, will be set
to `(-1, -1)`.
"""
_best_spans = best_spans.detach().cpu().numpy()
best_span_strings: List[str] = []
best_span_strings_for_metric: List[str] = []
answer_strings_for_metric: List[List[str]] = []
for (metadata_entry, best_span, cspan, cls_ind) in zip(
metadata,
_best_spans,
context_span,
cls_index or (0 for _ in range(len(metadata))),
):
context_tokens_for_question = metadata_entry["context_tokens"]
if best_span[0] == cls_ind:
# Predicting [CLS] is interpreted as predicting the question as unanswerable.
best_span_string = ""
# NOTE: even though we've "detached" 'best_spans' above, this still
# modifies the original tensor in-place.
best_span[0], best_span[1] = -1, -1
else:
best_span -= int(cspan[0])
assert np.all(best_span >= 0)
predicted_start, predicted_end = tuple(best_span)
while (
predicted_start >= 0
and context_tokens_for_question[predicted_start].idx is None
):
predicted_start -= 1
if predicted_start < 0:
logger.warning(
f"Could not map the token '{context_tokens_for_question[best_span[0]].text}' at index "
f"'{best_span[0]}' to an offset in the original text."
)
character_start = 0
else:
character_start = context_tokens_for_question[predicted_start].idx
while (
predicted_end < len(context_tokens_for_question)
and context_tokens_for_question[predicted_end].idx is None
):
predicted_end += 1
if predicted_end >= len(context_tokens_for_question):
logger.warning(
f"Could not map the token '{context_tokens_for_question[best_span[1]].text}' at index "
f"'{best_span[1]}' to an offset in the original text."
)
character_end = len(metadata_entry["context"])
else:
end_token = context_tokens_for_question[predicted_end]
character_end = end_token.idx + len(sanitize_wordpiece(end_token.text))
best_span_string = metadata_entry["context"][character_start:character_end]
best_span_strings.append(best_span_string)
answers = metadata_entry.get("answers")
if answers:
best_span_strings_for_metric.append(best_span_string)
answer_strings_for_metric.append(answers)
if answer_strings_for_metric:
self._per_instance_metrics(best_span_strings_for_metric, answer_strings_for_metric)
return best_span_strings, best_spans
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
output = {
"start_acc": self._span_start_accuracy.get_metric(reset),
"end_acc": self._span_end_accuracy.get_metric(reset),
"span_acc": self._span_accuracy.get_metric(reset),
}
if not self.training:
exact_match, f1_score = self._per_instance_metrics.get_metric(reset)
output["per_instance_em"] = exact_match
output["per_instance_f1"] = f1_score
return output
default_predictor = "transformer_qa"
| allennlp-models-main | allennlp_models/rc/models/transformer_qa.py |
# flake8: noqa: F403
from allennlp_models.rc.modules.seq2seq_encoders import *
| allennlp-models-main | allennlp_models/rc/modules/__init__.py |
from typing import List
import torch
from torch.nn import Dropout
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.layer_norm import LayerNorm
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.nn.activations import Activation
from allennlp.nn.util import add_positional_features
from allennlp_models.rc.modules.seq2seq_encoders.multi_head_self_attention import (
MultiHeadSelfAttention,
)
@Seq2SeqEncoder.register("stacked_self_attention")
class StackedSelfAttentionEncoder(Seq2SeqEncoder):
"""
Implements a stacked self-attention encoder similar to, but different from, the Transformer architecture in
[Attention is all you Need]
(https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077).
This encoder combines 3 layers in a 'block':
1. A 2 layer FeedForward network.
2. Multi-headed self attention, which uses 2 learnt linear projections
to perform a dot-product similarity between every pair of elements
scaled by the square root of the sequence length.
3. Layer Normalisation.
These are then stacked into `num_layers` layers.
# Parameters
input_dim : `int`, required.
The input dimension of the encoder.
hidden_dim : `int`, required.
The hidden dimension used for the _input_ to self attention layers
and the _output_ from the feedforward layers.
projection_dim : `int`, required.
The dimension of the linear projections for the self-attention layers.
feedforward_hidden_dim : `int`, required.
The middle dimension of the FeedForward network. The input and output
dimensions are fixed to ensure sizes match up for the self attention layers.
num_layers : `int`, required.
The number of stacked self attention -> feedforward -> layer normalisation blocks.
num_attention_heads : `int`, required.
The number of attention heads to use per layer.
use_positional_encoding : `bool`, optional, (default = `True`)
Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended,
as without this feature, the self attention layers have no idea of absolute or relative
position (as they are just computing pairwise similarity between vectors of elements),
which can be important features for many tasks.
dropout_prob : `float`, optional, (default = `0.1`)
The dropout probability for the feedforward network.
residual_dropout_prob : `float`, optional, (default = `0.2`)
The dropout probability for the residual connections.
attention_dropout_prob : `float`, optional, (default = `0.1`)
The dropout probability for the attention distributions in each attention layer.
""" # noqa
def __init__(
self,
input_dim: int,
hidden_dim: int,
projection_dim: int,
feedforward_hidden_dim: int,
num_layers: int,
num_attention_heads: int,
use_positional_encoding: bool = True,
dropout_prob: float = 0.1,
residual_dropout_prob: float = 0.2,
attention_dropout_prob: float = 0.1,
) -> None:
super().__init__()
self._use_positional_encoding = use_positional_encoding
self._attention_layers: List[MultiHeadSelfAttention] = []
self._feedforward_layers: List[FeedForward] = []
self._layer_norm_layers: List[LayerNorm] = []
self._feed_forward_layer_norm_layers: List[LayerNorm] = []
feedforward_input_dim = input_dim
for i in range(num_layers):
feedforward = FeedForward(
feedforward_input_dim,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[feedforward_hidden_dim, hidden_dim],
num_layers=2,
dropout=dropout_prob,
)
# Note: Please use `ModuleList` in new code. It provides better
# support for running on multiple GPUs. We've kept `add_module` here
# solely for backwards compatibility with existing serialized models.
self.add_module(f"feedforward_{i}", feedforward)
self._feedforward_layers.append(feedforward)
feedforward_layer_norm = LayerNorm(feedforward.get_output_dim())
self.add_module(f"feedforward_layer_norm_{i}", feedforward_layer_norm)
self._feed_forward_layer_norm_layers.append(feedforward_layer_norm)
self_attention = MultiHeadSelfAttention(
num_heads=num_attention_heads,
input_dim=hidden_dim,
attention_dim=projection_dim,
values_dim=projection_dim,
attention_dropout_prob=attention_dropout_prob,
)
self.add_module(f"self_attention_{i}", self_attention)
self._attention_layers.append(self_attention)
layer_norm = LayerNorm(self_attention.get_output_dim())
self.add_module(f"layer_norm_{i}", layer_norm)
self._layer_norm_layers.append(layer_norm)
feedforward_input_dim = hidden_dim
self.dropout = Dropout(residual_dropout_prob)
self._input_dim = input_dim
self._output_dim = self._attention_layers[-1].get_output_dim()
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._output_dim
def is_bidirectional(self):
return False
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor):
if self._use_positional_encoding:
output = add_positional_features(inputs)
else:
output = inputs
for i in range(len(self._attention_layers)):
# It's necessary to use `getattr` here because the elements stored
# in the lists are not replicated by torch.nn.parallel.replicate
# when running on multiple GPUs. Please use `ModuleList` in new
# code. It handles this issue transparently. We've kept `add_module`
# (in conjunction with `getattr`) solely for backwards compatibility
# with existing serialized models.
attention = getattr(self, f"self_attention_{i}")
feedforward = getattr(self, f"feedforward_{i}")
feedforward_layer_norm = getattr(self, f"feedforward_layer_norm_{i}")
layer_norm = getattr(self, f"layer_norm_{i}")
cached_input = output
# Project output of attention encoder through a feedforward
# network and back to the input size for the next layer.
# shape (batch_size, timesteps, input_size)
feedforward_output = feedforward(output)
feedforward_output = self.dropout(feedforward_output)
if feedforward_output.size() == cached_input.size():
# First layer might have the wrong size for highway
# layers, so we exclude it here.
feedforward_output = feedforward_layer_norm(feedforward_output + cached_input)
# shape (batch_size, sequence_length, hidden_dim)
attention_output = attention(feedforward_output, mask)
output = layer_norm(self.dropout(attention_output) + feedforward_output)
return output
| allennlp-models-main | allennlp_models/rc/modules/seq2seq_encoders/stacked_self_attention.py |
from allennlp_models.rc.modules.seq2seq_encoders.multi_head_self_attention import (
MultiHeadSelfAttention,
)
from allennlp_models.rc.modules.seq2seq_encoders.qanet_encoder import QaNetEncoder
from allennlp_models.rc.modules.seq2seq_encoders.stacked_self_attention import (
StackedSelfAttentionEncoder,
)
| allennlp-models-main | allennlp_models/rc/modules/seq2seq_encoders/__init__.py |
import torch
from torch.nn import Dropout, Linear
from allennlp.nn.util import masked_softmax, weighted_sum
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
# exist_ok has to be true until we remove this from the core library
@Seq2SeqEncoder.register("multi_head_self_attention", exist_ok=True)
class MultiHeadSelfAttention(Seq2SeqEncoder):
"""
This class implements the key-value scaled dot product attention mechanism
detailed in the paper [Attention is all you Need]
(https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077).
The attention mechanism is a weighted sum of a projection V of the inputs, with respect
to the scaled, normalised dot product of Q and K, which are also both linear projections
of the input. This procedure is repeated for each attention head, using different parameters.
# Parameters
num_heads : `int`, required.
The number of attention heads to use.
input_dim : `int`, required.
The size of the last dimension of the input tensor.
attention_dim `int`, required.
The total dimension of the query and key projections which comprise the
dot product attention function. Must be divisible by `num_heads`.
values_dim : `int`, required.
The total dimension which the input is projected to for representing the values,
which are combined using the attention. Must be divisible by `num_heads`.
output_projection_dim : `int`, optional (default = `None`)
The dimensionality of the final output projection. If this is not passed
explicitly, the projection has size `input_size`.
attention_dropout_prob : `float`, optional (default = `0.1`)
The dropout probability applied to the normalised attention
distributions.
""" # noqa
def __init__(
self,
num_heads: int,
input_dim: int,
attention_dim: int,
values_dim: int,
output_projection_dim: int = None,
attention_dropout_prob: float = 0.1,
) -> None:
super().__init__()
self._num_heads = num_heads
self._input_dim = input_dim
self._output_dim = output_projection_dim or input_dim
self._attention_dim = attention_dim
self._values_dim = values_dim
if attention_dim % num_heads != 0:
raise ValueError(
f"Key size ({attention_dim}) must be divisible by the number of "
f"attention heads ({num_heads})."
)
if values_dim % num_heads != 0:
raise ValueError(
f"Value size ({values_dim}) must be divisible by the number of "
f"attention heads ({num_heads})."
)
self._combined_projection = Linear(input_dim, 2 * attention_dim + values_dim)
self._scale = (input_dim // num_heads) ** 0.5
self._output_projection = Linear(values_dim, self._output_dim)
self._attention_dropout = Dropout(attention_dropout_prob)
def get_input_dim(self):
return self._input_dim
def get_output_dim(self):
return self._output_dim
def is_bidirectional(self):
return False
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.FloatTensor:
"""
# Parameters
inputs : `torch.FloatTensor`, required.
A tensor of shape (batch_size, timesteps, input_dim)
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, timesteps).
# Returns
A tensor of shape `(batch_size, timesteps, output_projection_dim)`,
where output_projection_dim = input_dim by default.
"""
num_heads = self._num_heads
batch_size, timesteps, _ = inputs.size()
if mask is None:
mask = inputs.new_ones(batch_size, timesteps).bool()
# Shape (batch_size, timesteps, 2 * attention_dim + values_dim)
combined_projection = self._combined_projection(inputs)
# split by attention dim - if values_dim > attention_dim, we will get more
# than 3 elements returned. All of the rest are the values vector, so we
# just concatenate them back together again below.
queries, keys, *values = combined_projection.split(self._attention_dim, -1)
queries = queries.contiguous()
keys = keys.contiguous()
values = torch.cat(values, -1).contiguous()
# Shape (num_heads * batch_size, timesteps, values_dim / num_heads)
values_per_head = values.view(
batch_size, timesteps, num_heads, int(self._values_dim / num_heads)
)
values_per_head = values_per_head.transpose(1, 2).contiguous()
values_per_head = values_per_head.view(
batch_size * num_heads, timesteps, int(self._values_dim / num_heads)
)
# Shape (num_heads * batch_size, timesteps, attention_dim / num_heads)
queries_per_head = queries.view(
batch_size, timesteps, num_heads, int(self._attention_dim / num_heads)
)
queries_per_head = queries_per_head.transpose(1, 2).contiguous()
queries_per_head = queries_per_head.view(
batch_size * num_heads, timesteps, int(self._attention_dim / num_heads)
)
# Shape (num_heads * batch_size, timesteps, attention_dim / num_heads)
keys_per_head = keys.view(
batch_size, timesteps, num_heads, int(self._attention_dim / num_heads)
)
keys_per_head = keys_per_head.transpose(1, 2).contiguous()
keys_per_head = keys_per_head.view(
batch_size * num_heads, timesteps, int(self._attention_dim / num_heads)
)
# shape (num_heads * batch_size, timesteps, timesteps)
scaled_similarities = torch.bmm(
queries_per_head / self._scale, keys_per_head.transpose(1, 2)
)
# shape (num_heads * batch_size, timesteps, timesteps)
# Normalise the distributions, using the same mask for all heads.
attention = masked_softmax(
scaled_similarities,
mask.repeat(1, num_heads).view(batch_size * num_heads, timesteps),
memory_efficient=True,
)
attention = self._attention_dropout(attention)
# Take a weighted sum of the values with respect to the attention
# distributions for each element in the num_heads * batch_size dimension.
# shape (num_heads * batch_size, timesteps, values_dim/num_heads)
outputs = weighted_sum(values_per_head, attention)
# Reshape back to original shape (batch_size, timesteps, values_dim)
# shape (batch_size, num_heads, timesteps, values_dim/num_heads)
outputs = outputs.view(batch_size, num_heads, timesteps, int(self._values_dim / num_heads))
# shape (batch_size, timesteps, num_heads, values_dim/num_heads)
outputs = outputs.transpose(1, 2).contiguous()
# shape (batch_size, timesteps, values_dim)
outputs = outputs.view(batch_size, timesteps, self._values_dim)
# Project back to original input size.
# shape (batch_size, timesteps, input_size)
outputs = self._output_projection(outputs)
return outputs
| allennlp-models-main | allennlp_models/rc/modules/seq2seq_encoders/multi_head_self_attention.py |
import torch
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import ModuleList
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.residual_with_layer_dropout import ResidualWithLayerDropout
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.nn.activations import Activation
from allennlp.nn.util import add_positional_features
from allennlp.common.checks import check_dimensions_match
from allennlp_models.rc.modules.seq2seq_encoders.multi_head_self_attention import (
MultiHeadSelfAttention,
)
# exist_ok has to be true until we remove this from the core library
@Seq2SeqEncoder.register("qanet_encoder", exist_ok=True)
class QaNetEncoder(Seq2SeqEncoder):
"""
Stack multiple QANetEncoderBlock into one sequence encoder.
# Parameters
input_dim : `int`, required.
The input dimension of the encoder.
hidden_dim : `int`, required.
The hidden dimension used for convolution output channels, multi-head attention output
and the final output of feedforward layer.
attention_projection_dim : `int`, required.
The dimension of the linear projections for the self-attention layers.
feedforward_hidden_dim : `int`, required.
The middle dimension of the FeedForward network. The input and output
dimensions are fixed to ensure sizes match up for the self attention layers.
num_blocks : `int`, required.
The number of stacked encoder blocks.
num_convs_per_block : `int`, required.
The number of convolutions in each block.
conv_kernel_size : `int`, required.
The kernel size for convolution.
num_attention_heads : `int`, required.
The number of attention heads to use per layer.
use_positional_encoding : `bool`, optional, (default = `True`)
Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended,
as without this feature, the self attention layers have no idea of absolute or relative
position (as they are just computing pairwise similarity between vectors of elements),
which can be important features for many tasks.
dropout_prob : `float`, optional, (default = `0.1`)
The dropout probability for the feedforward network.
layer_dropout_undecayed_prob : `float`, optional, (default = `0.1`)
The initial dropout probability for layer dropout, and this might decay w.r.t the depth
of the layer. For each mini-batch, the convolution/attention/ffn sublayer is
stochastically dropped according to its layer dropout probability.
attention_dropout_prob : `float`, optional, (default = `0.0`)
The dropout probability for the attention distributions in the attention layer.
"""
def __init__(
self,
input_dim: int,
hidden_dim: int,
attention_projection_dim: int,
feedforward_hidden_dim: int,
num_blocks: int,
num_convs_per_block: int,
conv_kernel_size: int,
num_attention_heads: int,
use_positional_encoding: bool = True,
dropout_prob: float = 0.1,
layer_dropout_undecayed_prob: float = 0.1,
attention_dropout_prob: float = 0,
) -> None:
super().__init__()
self._input_projection_layer = None
if input_dim != hidden_dim:
self._input_projection_layer = torch.nn.Linear(input_dim, hidden_dim)
else:
self._input_projection_layer = lambda x: x
self._encoder_blocks = ModuleList([])
for _ in range(num_blocks):
encoder_block = QaNetEncoderBlock(
hidden_dim,
hidden_dim,
attention_projection_dim,
feedforward_hidden_dim,
num_convs_per_block,
conv_kernel_size,
num_attention_heads,
use_positional_encoding,
dropout_prob,
layer_dropout_undecayed_prob,
attention_dropout_prob,
)
self._encoder_blocks.append(encoder_block)
self._input_dim = input_dim
self._output_dim = hidden_dim
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._output_dim
def is_bidirectional(self) -> bool:
return False
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor:
inputs = self._input_projection_layer(inputs)
output = inputs
for encoder_block in self._encoder_blocks:
output = encoder_block(output, mask)
return output
# exist_ok has to be true until we remove this from the core library
@Seq2SeqEncoder.register("qanet_encoder_block", exist_ok=True)
class QaNetEncoderBlock(Seq2SeqEncoder):
"""
Implements the encoder block described in [QANet: Combining Local Convolution with Global
Self-attention for Reading Comprehension](https://openreview.net/forum?id=B14TlG-RW).
One encoder block mainly contains 4 parts:
1. Add position embedding.
2. Several depthwise seperable convolutions.
3. Multi-headed self attention, which uses 2 learnt linear projections
to perform a dot-product similarity between every pair of elements
scaled by the square root of the sequence length.
4. A two-layer FeedForward network.
# Parameters
input_dim : `int`, required.
The input dimension of the encoder.
hidden_dim : `int`, required.
The hidden dimension used for convolution output channels, multi-head attention output
and the final output of feedforward layer.
attention_projection_dim : `int`, required.
The dimension of the linear projections for the self-attention layers.
feedforward_hidden_dim : `int`, required.
The middle dimension of the FeedForward network. The input and output
dimensions are fixed to ensure sizes match up for the self attention layers.
num_convs : `int`, required.
The number of convolutions in each block.
conv_kernel_size : `int`, required.
The kernel size for convolution.
num_attention_heads : `int`, required.
The number of attention heads to use per layer.
use_positional_encoding : `bool`, optional, (default = `True`)
Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended,
as without this feature, the self attention layers have no idea of absolute or relative
position (as they are just computing pairwise similarity between vectors of elements),
which can be important features for many tasks.
dropout_prob : `float`, optional, (default = `0.1`)
The dropout probability for the feedforward network.
layer_dropout_undecayed_prob : `float`, optional, (default = `0.1`)
The initial dropout probability for layer dropout, and this might decay w.r.t the depth
of the layer. For each mini-batch, the convolution/attention/ffn sublayer is randomly
dropped according to its layer dropout probability.
attention_dropout_prob : `float`, optional, (default = `0.0`)
The dropout probability for the attention distributions in the attention layer.
"""
def __init__(
self,
input_dim: int,
hidden_dim: int,
attention_projection_dim: int,
feedforward_hidden_dim: int,
num_convs: int,
conv_kernel_size: int,
num_attention_heads: int,
use_positional_encoding: bool = True,
dropout_prob: float = 0.1,
layer_dropout_undecayed_prob: float = 0.1,
attention_dropout_prob: float = 0,
) -> None:
super().__init__()
check_dimensions_match(input_dim, hidden_dim, "input_dim", "hidden_dim")
self._use_positional_encoding = use_positional_encoding
self._conv_norm_layers = torch.nn.ModuleList(
LayerNorm(hidden_dim) for _ in range(num_convs)
)
self._conv_layers = torch.nn.ModuleList()
for _ in range(num_convs):
padding = torch.nn.ConstantPad1d(
(conv_kernel_size // 2, (conv_kernel_size - 1) // 2), 0
)
depthwise_conv = torch.nn.Conv1d(
hidden_dim, hidden_dim, conv_kernel_size, groups=hidden_dim
)
pointwise_conv = torch.nn.Conv1d(hidden_dim, hidden_dim, 1)
self._conv_layers.append(
torch.nn.Sequential(
padding, depthwise_conv, pointwise_conv, Activation.by_name("relu")()
)
)
self.attention_norm_layer = LayerNorm(hidden_dim)
self.attention_layer = MultiHeadSelfAttention(
num_heads=num_attention_heads,
input_dim=hidden_dim,
attention_dim=attention_projection_dim,
values_dim=attention_projection_dim,
attention_dropout_prob=attention_dropout_prob,
)
self.feedforward_norm_layer = LayerNorm(hidden_dim)
self.feedforward = FeedForward(
hidden_dim,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[feedforward_hidden_dim, hidden_dim],
num_layers=2,
dropout=dropout_prob,
)
self.dropout = Dropout(dropout_prob)
self.residual_with_layer_dropout = ResidualWithLayerDropout(layer_dropout_undecayed_prob)
self._input_dim = input_dim
self._output_dim = hidden_dim
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._output_dim
def is_bidirectional(self):
return False
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor:
if self._use_positional_encoding:
output = add_positional_features(inputs)
else:
output = inputs
total_sublayers = len(self._conv_layers) + 2
sublayer_count = 0
for conv_norm_layer, conv_layer in zip(self._conv_norm_layers, self._conv_layers):
conv_norm_out = self.dropout(conv_norm_layer(output))
conv_out = self.dropout(conv_layer(conv_norm_out.transpose(1, 2)).transpose(1, 2))
sublayer_count += 1
output = self.residual_with_layer_dropout(
output, conv_out, sublayer_count, total_sublayers
)
attention_norm_out = self.dropout(self.attention_norm_layer(output))
attention_out = self.dropout(self.attention_layer(attention_norm_out, mask))
sublayer_count += 1
output = self.residual_with_layer_dropout(
output, attention_out, sublayer_count, total_sublayers
)
feedforward_norm_out = self.dropout(self.feedforward_norm_layer(output))
feedforward_out = self.dropout(self.feedforward(feedforward_norm_out))
sublayer_count += 1
output = self.residual_with_layer_dropout(
output, feedforward_out, sublayer_count, total_sublayers
)
return output
| allennlp-models-main | allennlp_models/rc/modules/seq2seq_encoders/qanet_encoder.py |
# flake8: noqa: F403
from allennlp_models.generation.modules import *
from allennlp_models.generation.predictors import *
from allennlp_models.generation.models import *
from allennlp_models.generation.dataset_readers import *
| allennlp-models-main | allennlp_models/generation/__init__.py |
import logging
from typing import List, Dict
import warnings
import torch
from allennlp.common.file_utils import cached_path
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, TensorField, MetadataField, NamespaceSwappingField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import (
Token,
Tokenizer,
SpacyTokenizer,
PretrainedTransformerTokenizer,
)
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
@DatasetReader.register("copynet_seq2seq")
class CopyNetDatasetReader(DatasetReader):
"""
Read a tsv file containing paired sequences, and create a dataset suitable for a
`CopyNet` model, or any model with a matching API.
The expected format for each input line is: <source_sequence_string><tab><target_sequence_string>.
An instance produced by `CopyNetDatasetReader` will containing at least the following fields:
- `source_tokens`: a `TextField` containing the tokenized source sentence.
This will result in a tensor of shape `(batch_size, source_length)`.
- `source_token_ids`: an `TensorField` of size `(batch_size, source_length)`
that contains an ID for each token in the source sentence. Tokens that
match at the lowercase level will share the same ID. If `target_tokens`
is passed as well, these IDs will also correspond to the `target_token_ids`
field, i.e. any tokens that match at the lowercase level in both
the source and target sentences will share the same ID. Note that these IDs
have no correlation with the token indices from the corresponding
vocabulary namespaces.
- `source_to_target`: a `NamespaceSwappingField` that keeps track of the index
of the target token that matches each token in the source sentence.
When there is no matching target token, the OOV index is used.
This will result in a tensor of shape `(batch_size, source_length)`.
- `metadata`: a `MetadataField` which contains the source tokens and
potentially target tokens as lists of strings.
When `target_string` is passed, the instance will also contain these fields:
- `target_tokens`: a `TextField` containing the tokenized target sentence,
including the `START_SYMBOL` and `END_SYMBOL`. This will result in
a tensor of shape `(batch_size, target_length)`.
- `target_token_ids`: an `TensorField` of size `(batch_size, target_length)`.
This is calculated in the same way as `source_token_ids`.
See the "Notes" section below for a description of how these fields are used.
# Parameters
target_namespace : `str`, required
The vocab namespace for the targets. This needs to be passed to the dataset reader
in order to construct the NamespaceSwappingField.
source_tokenizer : `Tokenizer`, optional
Tokenizer to use to split the input sequences into words or other kinds of tokens. Defaults
to `SpacyTokenizer()`.
target_tokenizer : `Tokenizer`, optional
Tokenizer to use to split the output sequences (during training) into words or other kinds
of tokens. Defaults to `source_tokenizer`.
source_token_indexers : `Dict[str, TokenIndexer]`, optional
Indexers used to define input (source side) token representations. Defaults to
`{"tokens": SingleIdTokenIndexer()}`.
# Notes
In regards to the fields in an `Instance` produced by this dataset reader,
`source_token_ids` and `target_token_ids` are primarily used during training
to determine whether a target token is copied from a source token (or multiple matching
source tokens), while `source_to_target` is primarily used during prediction
to combine the copy scores of source tokens with the generation scores for matching
tokens in the target namespace.
"""
def __init__(
self,
target_namespace: str,
source_tokenizer: Tokenizer = None,
target_tokenizer: Tokenizer = None,
source_token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True, manual_multiprocess_sharding=True, **kwargs
)
self._target_namespace = target_namespace
self._source_tokenizer = source_tokenizer or SpacyTokenizer()
self._target_tokenizer = target_tokenizer or self._source_tokenizer
self._source_token_indexers = source_token_indexers or {"tokens": SingleIdTokenIndexer()}
self._target_token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer(namespace=self._target_namespace)
}
if (
isinstance(self._target_tokenizer, PretrainedTransformerTokenizer)
and self._target_tokenizer._add_special_tokens
):
warnings.warn(
"'add_special_tokens' is True for target_tokenizer, which is a PretrainedTransformerTokenizer. "
"This means special tokens, such as '[CLS]' and '[SEP]', will probably end up in "
"your model's predicted target sequences. "
"If this is not what you intended, make sure to specify 'add_special_tokens: False' for "
"your target_tokenizer.",
UserWarning,
)
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line_num, line in self.shard_iterable(enumerate(data_file)):
line = line.strip("\n")
if not line:
continue
line_parts = line.split("\t")
if len(line_parts) != 2:
raise RuntimeError(
"Invalid line format: %s (line number %d)" % (line, line_num + 1)
)
source_sequence, target_sequence = line_parts
if not source_sequence:
continue
yield self.text_to_instance(source_sequence, target_sequence)
@staticmethod
def _tokens_to_ids(tokens: List[Token]) -> List[int]:
ids: Dict[str, int] = {}
out: List[int] = []
for token in tokens:
out.append(ids.setdefault(token.text, len(ids)))
return out
def text_to_instance(
self,
source_string: str,
target_string: str = None,
weight: float = None,
) -> Instance: # type: ignore
"""
Turn raw source string and target string into an `Instance`.
# Parameters
source_string : `str`, required
target_string : `str`, optional (default = `None`)
weight : `float`, optional (default = `None`)
An optional weight to assign to this instance when calculating the loss in
[CopyNetSeq2Seq.forward()](../../models/copynet_seq2seq/#forward.parameters).
# Returns
`Instance`
See the above for a description of the fields that the instance will contain.
"""
tokenized_source = self._source_tokenizer.tokenize(source_string)
if not tokenized_source:
# If the tokenized source is empty, it will cause issues downstream.
raise ValueError(f"source tokenizer produced no tokens from source '{source_string}'")
source_field = TextField(tokenized_source)
# For each token in the source sentence, we keep track of the matching token
# in the target sentence (which will be the OOV symbol if there is no match).
source_to_target_field = NamespaceSwappingField(tokenized_source, self._target_namespace)
meta_fields = {"source_tokens": [x.text for x in tokenized_source]}
fields_dict = {"source_tokens": source_field, "source_to_target": source_to_target_field}
if target_string is not None:
tokenized_target = self._target_tokenizer.tokenize(target_string)
tokenized_target.insert(0, Token(START_SYMBOL))
tokenized_target.append(Token(END_SYMBOL))
target_field = TextField(tokenized_target)
fields_dict["target_tokens"] = target_field
meta_fields["target_tokens"] = [y.text for y in tokenized_target[1:-1]]
source_and_target_token_ids = self._tokens_to_ids(tokenized_source + tokenized_target)
source_token_ids = source_and_target_token_ids[: len(tokenized_source)]
fields_dict["source_token_ids"] = TensorField(torch.tensor(source_token_ids))
target_token_ids = source_and_target_token_ids[len(tokenized_source) :]
fields_dict["target_token_ids"] = TensorField(torch.tensor(target_token_ids))
else:
source_token_ids = self._tokens_to_ids(tokenized_source)
fields_dict["source_token_ids"] = TensorField(torch.tensor(source_token_ids))
fields_dict["metadata"] = MetadataField(meta_fields)
if weight is not None:
fields_dict["weight"] = TensorField(torch.tensor(float(weight), dtype=torch.float))
return Instance(fields_dict)
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["source_tokens"]._token_indexers = self._source_token_indexers # type: ignore
if "target_tokens" in instance.fields:
instance.fields["target_tokens"]._token_indexers = self._target_token_indexers # type: ignore
| allennlp-models-main | allennlp_models/generation/dataset_readers/copynet_seq2seq.py |
from allennlp_models.generation.dataset_readers.copynet_seq2seq import CopyNetDatasetReader
from allennlp_models.generation.dataset_readers.seq2seq import Seq2SeqDatasetReader
from allennlp_models.generation.dataset_readers.cnn_dm import CNNDailyMailDatasetReader
| allennlp-models-main | allennlp_models/generation/dataset_readers/__init__.py |
from pathlib import Path
from typing import Dict, Optional, List
import logging
import os
import glob
import hashlib
import ftfy
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
@DatasetReader.register("cnn_dm")
class CNNDailyMailDatasetReader(DatasetReader):
"""
Reads the CNN/DailyMail dataset for text summarization.
The output of `read` is a list of `Instance` s with the fields:
source_tokens : `TextField` and
target_tokens : `TextField`
# Parameters
source_tokenizer : `Tokenizer`, optional
Tokenizer to use to split the input sequences into words or other kinds of tokens. Defaults
to `SpacyTokenizer()`.
target_tokenizer : `Tokenizer`, optional
Tokenizer to use to split the output sequences (during training) into words or other kinds
of tokens. Defaults to `source_tokenizer`.
source_token_indexers : `Dict[str, TokenIndexer]`, optional
Indexers used to define input (source side) token representations. Defaults to
`{"tokens": SingleIdTokenIndexer()}`.
target_token_indexers : `Dict[str, TokenIndexer]`, optional
Indexers used to define output (target side) token representations. Defaults to
`source_token_indexers`.
source_max_tokens : `int`, optional
Maximum number of tokens in source sequence.
target_max_tokens : `int`, optional
Maximum number of tokens in target sequence.
source_prefix : `str`, optional
An optional prefix to prepend to source strings. For example, with a T5 model,
you want to set the `source_prefix` to "summarize: ".
"""
def __init__(
self,
source_tokenizer: Tokenizer = None,
target_tokenizer: Tokenizer = None,
source_token_indexers: Dict[str, TokenIndexer] = None,
target_token_indexers: Dict[str, TokenIndexer] = None,
source_max_tokens: Optional[int] = None,
target_max_tokens: Optional[int] = None,
source_prefix: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True, manual_multiprocess_sharding=True, **kwargs
)
self._source_tokenizer = source_tokenizer or SpacyTokenizer()
self._target_tokenizer = target_tokenizer or self._source_tokenizer
self._source_token_indexers = source_token_indexers or {"tokens": SingleIdTokenIndexer()}
self._target_token_indexers = target_token_indexers or self._source_token_indexers
self._source_max_tokens = source_max_tokens
self._target_max_tokens = target_max_tokens
self._source_prefix = source_prefix
@staticmethod
def _hashhex(url):
h = hashlib.sha1()
h.update(url)
return h.hexdigest()
@staticmethod
def _sanitize_story_line(line):
line = ftfy.fix_encoding(line)
sentence_endings = [".", "!", "?", "...", "'", "`", '"', ")", "\u2019", "\u201d"]
# Highlight are essentially bullet points and don't have proper sentence endings
if line[-1] not in sentence_endings:
line += "."
return line
@staticmethod
def _read_story(story_path: str):
article: List[str] = []
summary: List[str] = []
highlight = False
with open(story_path, "r") as f:
for line in f:
line = line.strip()
# CNN stories always start with "(CNN)"
if line.startswith("(CNN)"):
line = line[len("(CNN)") :]
if line == "":
continue
if line == "@highlight":
highlight = True
continue
line = CNNDailyMailDatasetReader._sanitize_story_line(line)
(summary if highlight else article).append(line)
return " ".join(article), " ".join(summary)
@staticmethod
def _strip_extension(filename: str) -> str:
return os.path.splitext(filename)[0]
def _read(self, file_path: str):
# Reset exceeded counts
self._source_max_exceeded = 0
self._target_max_exceeded = 0
url_file_path = cached_path(file_path, extract_archive=True)
data_dir = os.path.join(os.path.dirname(url_file_path), "..")
cnn_stories_path = os.path.join(data_dir, "cnn_stories")
dm_stories_path = os.path.join(data_dir, "dm_stories")
cnn_stories = {Path(s).stem for s in glob.glob(os.path.join(cnn_stories_path, "*.story"))}
dm_stories = {Path(s).stem for s in glob.glob(os.path.join(dm_stories_path, "*.story"))}
with open(url_file_path, "r") as url_file:
for url in self.shard_iterable(url_file):
url = url.strip()
url_hash = self._hashhex(url.encode("utf-8"))
if url_hash in cnn_stories:
story_base_path = cnn_stories_path
elif url_hash in dm_stories:
story_base_path = dm_stories_path
else:
raise ConfigurationError(
"Story with url '%s' and hash '%s' not found" % (url, url_hash)
)
story_path = os.path.join(story_base_path, url_hash) + ".story"
article, summary = self._read_story(story_path)
if len(article) == 0 or len(summary) == 0 or len(article) < len(summary):
continue
yield self.text_to_instance(article, summary)
def text_to_instance(
self, source_sequence: str, target_sequence: str = None
) -> Instance: # type: ignore
if self._source_prefix is not None:
tokenized_source = self._source_tokenizer.tokenize(
self._source_prefix + source_sequence
)
else:
tokenized_source = self._source_tokenizer.tokenize(source_sequence)
if self._source_max_tokens is not None and len(tokenized_source) > self._source_max_tokens:
tokenized_source = tokenized_source[: self._source_max_tokens]
source_field = TextField(tokenized_source)
if target_sequence is not None:
tokenized_target = self._target_tokenizer.tokenize(target_sequence)
if (
self._target_max_tokens is not None
and len(tokenized_target) > self._target_max_tokens
):
tokenized_target = tokenized_target[: self._target_max_tokens]
target_field = TextField(tokenized_target)
return Instance({"source_tokens": source_field, "target_tokens": target_field})
else:
return Instance({"source_tokens": source_field})
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["source_tokens"]._token_indexers = self._source_token_indexers # type: ignore
if "target_tokens" in instance.fields:
instance.fields["target_tokens"]._token_indexers = self._target_token_indexers # type: ignore
| allennlp-models-main | allennlp_models/generation/dataset_readers/cnn_dm.py |
import csv
from typing import Dict, Optional
import logging
import copy
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer, Token
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
@DatasetReader.register("seq2seq")
class Seq2SeqDatasetReader(DatasetReader):
"""
Read a tsv file containing paired sequences, and create a dataset suitable for a
`ComposedSeq2Seq` model, or any model with a matching API.
Expected format for each input line: <source_sequence_string>\t<target_sequence_string>
The output of `read` is a list of `Instance` s with the fields:
source_tokens : `TextField` and
target_tokens : `TextField`
`START_SYMBOL` and `END_SYMBOL` tokens are added to the source and target sequences.
# Parameters
source_tokenizer : `Tokenizer`, optional
Tokenizer to use to split the input sequences into words or other kinds of tokens. Defaults
to `SpacyTokenizer()`.
target_tokenizer : `Tokenizer`, optional
Tokenizer to use to split the output sequences (during training) into words or other kinds
of tokens. Defaults to `source_tokenizer`.
source_token_indexers : `Dict[str, TokenIndexer]`, optional
Indexers used to define input (source side) token representations. Defaults to
`{"tokens": SingleIdTokenIndexer()}`.
target_token_indexers : `Dict[str, TokenIndexer]`, optional
Indexers used to define output (target side) token representations. Defaults to
`source_token_indexers`.
source_add_start_token : `bool`, (optional, default=`True`)
Whether or not to add `start_symbol` to the beginning of the source sequence.
source_add_end_token : `bool`, (optional, default=`True`)
Whether or not to add `end_symbol` to the end of the source sequence.
target_add_start_token : `bool`, (optional, default=`True`)
Whether or not to add `start_symbol` to the beginning of the target sequence.
target_add_end_token : `bool`, (optional, default=`True`)
Whether or not to add `end_symbol` to the end of the target sequence.
start_symbol : `str`, (optional, default=`START_SYMBOL`)
The special token to add to the end of the source sequence or the target sequence if
`source_add_start_token` or `target_add_start_token` respectively.
end_symbol : `str`, (optional, default=`END_SYMBOL`)
The special token to add to the end of the source sequence or the target sequence if
`source_add_end_token` or `target_add_end_token` respectively.
delimiter : `str`, (optional, default=`"\t"`)
Set delimiter for tsv/csv file.
quoting : `int`, (optional, default=`csv.QUOTE_MINIMAL`)
Quoting to use for csv reader.
"""
def __init__(
self,
source_tokenizer: Tokenizer = None,
target_tokenizer: Tokenizer = None,
source_token_indexers: Dict[str, TokenIndexer] = None,
target_token_indexers: Dict[str, TokenIndexer] = None,
source_add_start_token: bool = True,
source_add_end_token: bool = True,
target_add_start_token: bool = True,
target_add_end_token: bool = True,
start_symbol: str = START_SYMBOL,
end_symbol: str = END_SYMBOL,
delimiter: str = "\t",
source_max_tokens: Optional[int] = None,
target_max_tokens: Optional[int] = None,
quoting: int = csv.QUOTE_MINIMAL,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True, manual_multiprocess_sharding=True, **kwargs
)
self._source_tokenizer = source_tokenizer or SpacyTokenizer()
self._target_tokenizer = target_tokenizer or self._source_tokenizer
self._source_token_indexers = source_token_indexers or {"tokens": SingleIdTokenIndexer()}
self._target_token_indexers = target_token_indexers or self._source_token_indexers
self._source_add_start_token = source_add_start_token
self._source_add_end_token = source_add_end_token
self._target_add_start_token = target_add_start_token
self._target_add_end_token = target_add_end_token
self._start_token: Optional[Token] = None
self._end_token: Optional[Token] = None
if (
source_add_start_token
or source_add_end_token
or target_add_start_token
or target_add_end_token
):
if source_add_start_token or source_add_end_token:
self._check_start_end_tokens(start_symbol, end_symbol, self._source_tokenizer)
if (
target_add_start_token or target_add_end_token
) and self._target_tokenizer != self._source_tokenizer:
self._check_start_end_tokens(start_symbol, end_symbol, self._target_tokenizer)
self._start_token = Token(start_symbol)
self._end_token = Token(end_symbol)
self._delimiter = delimiter
self._source_max_tokens = source_max_tokens
self._target_max_tokens = target_max_tokens
self._source_max_exceeded = 0
self._target_max_exceeded = 0
self.quoting = quoting
def _read(self, file_path: str):
# Reset exceeded counts
self._source_max_exceeded = 0
self._target_max_exceeded = 0
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line_num, row in self.shard_iterable(
enumerate(csv.reader(data_file, delimiter=self._delimiter, quoting=self.quoting))
):
if len(row) != 2:
raise ConfigurationError(
"Invalid line format: %s (line number %d)" % (row, line_num + 1)
)
source_sequence, target_sequence = row
if len(source_sequence) == 0 or len(target_sequence) == 0:
continue
yield self.text_to_instance(source_sequence, target_sequence)
if self._source_max_tokens and self._source_max_exceeded:
logger.info(
"In %d instances, the source token length exceeded the max limit (%d) and were truncated.",
self._source_max_exceeded,
self._source_max_tokens,
)
if self._target_max_tokens and self._target_max_exceeded:
logger.info(
"In %d instances, the target token length exceeded the max limit (%d) and were truncated.",
self._target_max_exceeded,
self._target_max_tokens,
)
def text_to_instance(
self, source_string: str, target_string: str = None
) -> Instance: # type: ignore
tokenized_source = self._source_tokenizer.tokenize(source_string)
if self._source_max_tokens and len(tokenized_source) > self._source_max_tokens:
self._source_max_exceeded += 1
tokenized_source = tokenized_source[: self._source_max_tokens]
if self._source_add_start_token:
tokenized_source.insert(0, copy.deepcopy(self._start_token))
if self._source_add_end_token:
tokenized_source.append(copy.deepcopy(self._end_token))
source_field = TextField(tokenized_source)
if target_string is not None:
tokenized_target = self._target_tokenizer.tokenize(target_string)
if self._target_max_tokens and len(tokenized_target) > self._target_max_tokens:
self._target_max_exceeded += 1
tokenized_target = tokenized_target[: self._target_max_tokens]
if self._target_add_start_token:
tokenized_target.insert(0, copy.deepcopy(self._start_token))
if self._target_add_end_token:
tokenized_target.append(copy.deepcopy(self._end_token))
target_field = TextField(tokenized_target)
return Instance({"source_tokens": source_field, "target_tokens": target_field})
else:
return Instance({"source_tokens": source_field})
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["source_tokens"]._token_indexers = self._source_token_indexers # type: ignore
if "target_tokens" in instance.fields:
instance.fields["target_tokens"]._token_indexers = self._target_token_indexers # type: ignore
def _check_start_end_tokens(
self, start_symbol: str, end_symbol: str, tokenizer: Tokenizer
) -> None:
"""Check that `tokenizer` correctly appends `start_symbol` and `end_symbol` to the
sequence without splitting them. Raises a `ValueError` if this is not the case.
"""
tokens = tokenizer.tokenize(start_symbol + " " + end_symbol)
err_msg = (
f"Bad start or end symbol ('{start_symbol}', '{end_symbol}') "
f"for tokenizer {self._source_tokenizer}"
)
try:
start_token, end_token = tokens[0], tokens[-1]
except IndexError:
raise ValueError(err_msg)
if start_token.text != start_symbol or end_token.text != end_symbol:
raise ValueError(err_msg)
| allennlp-models-main | allennlp_models/generation/dataset_readers/seq2seq.py |
from allennlp_models.generation.predictors.seq2seq import Seq2SeqPredictor
| allennlp-models-main | allennlp_models/generation/predictors/__init__.py |
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register("seq2seq")
class Seq2SeqPredictor(Predictor):
"""
Predictor for sequence to sequence models, including
- [`ComposedSeq2Seq`](../models/composed_seq2seq.md),
- [`SimpleSeq2Seq`](../models/simple_seq2seq.md),
- [`CopyNetSeq2Seq`](../models/copynet_seq2seq.md),
- [`Bart`](../models/bart.md), and
- [`T5`](../models/t5.md).
"""
def predict(self, source: str) -> JsonDict:
return self.predict_json({"source": source})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"source": "..."}`.
"""
source = json_dict["source"]
return self._dataset_reader.text_to_instance(source)
@classmethod
def pretrained_t5_for_generation(cls, model_name: str = "t5-base") -> "Seq2SeqPredictor":
"""
A helper method for creating a predictor for a pretrained T5 generation model.
# Examples
```python
from allennlp_models.generation.predictors import Seq2SeqPredictor
ARTICLE_TO_SUMMARIZE = '''
summarize: The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building,
and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side.
During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest
man-made structure in the world, a title it held for 41 years until the Chrysler Building in
New York City was finished in 1930. It was the first structure to reach a height of 300 metres.
Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller
than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is
the second tallest free-standing structure in France after the Millau Viaduct.
'''.strip().replace(
"\n", " "
)
predictor = Seq2SeqPredictor.pretrained_t5_for_generation("t5-small")
predictor.predict(ARTICLE_TO_SUMMARIZE)
```
"""
from allennlp.data import Vocabulary
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp_models.generation.dataset_readers import Seq2SeqDatasetReader
from allennlp_models.generation.models import T5
tokenizer, token_indexer = (
PretrainedTransformerTokenizer(model_name),
PretrainedTransformerIndexer(model_name),
)
reader = Seq2SeqDatasetReader(
source_tokenizer=tokenizer,
source_token_indexers={"tokens": token_indexer},
source_add_start_token=False,
source_add_end_token=False,
target_add_start_token=False,
target_add_end_token=False,
)
vocab = Vocabulary.from_pretrained_transformer(model_name)
model = T5(vocab, model_name)
return cls(model, reader)
| allennlp-models-main | allennlp_models/generation/predictors/seq2seq.py |
import warnings
from typing import Dict, List, Tuple, Iterable, Any
import numpy
import torch
import torch.nn.functional as F
from torch.nn.modules.linear import Linear
from torch.nn.modules.rnn import LSTMCell, LSTM
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Attention, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.models.model import Model
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import util
from allennlp.nn.beam_search import BeamSearch
from allennlp.common.lazy import Lazy
from allennlp.training.metrics import BLEU
@Model.register("simple_seq2seq")
class SimpleSeq2Seq(Model):
"""
This `SimpleSeq2Seq` class is a `Model` which takes a sequence, encodes it, and then
uses the encoded representations to decode another sequence. You can use this as the basis for
a neural machine translation system, an abstractive summarization system, or any other common
seq2seq problem. The model here is simple, but should be a decent starting place for
implementing recent models for these tasks.
# Parameters
vocab : `Vocabulary`, required
Vocabulary containing source and target vocabularies. They may be under the same namespace
(`tokens`) or the target tokens can have a different namespace, in which case it needs to
be specified as `target_namespace`.
source_embedder : `TextFieldEmbedder`, required
Embedder for source side sequences
encoder : `Seq2SeqEncoder`, required
The encoder of the "encoder/decoder" model
beam_search : `BeamSearch`, optional (default = `Lazy(BeamSearch)`)
This is used to during inference to select the tokens of the decoded output sequence.
target_namespace : `str`, optional (default = `'tokens'`)
If the target side vocabulary is different from the source side's, you need to specify the
target's namespace here. If not, we'll assume it is "tokens", which is also the default
choice for the source side, and this might cause them to share vocabularies.
target_embedding_dim : `int`, optional (default = `'source_embedding_dim'`)
You can specify an embedding dimensionality for the target side. If not, we'll use the same
value as the source embedder's.
target_pretrain_file : `str`, optional (default = `None`)
Path to target pretrain embedding files
target_decoder_layers : `int`, optional (default = `1`)
Nums of layer for decoder
attention : `Attention`, optional (default = `None`)
If you want to use attention to get a dynamic summary of the encoder outputs at each step
of decoding, this is the function used to compute similarity between the decoder hidden
state and encoder outputs.
scheduled_sampling_ratio : `float`, optional (default = `0.`)
At each timestep during training, we sample a random number between 0 and 1, and if it is
not less than this value, we use the ground truth labels for the whole batch. Else, we use
the predictions from the previous time step for the whole batch. If this value is 0.0
(default), this corresponds to teacher forcing, and if it is 1.0, it corresponds to not
using target side ground truth labels. See the following paper for more information:
[Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks. Bengio et al.,
2015](https://arxiv.org/abs/1506.03099).
use_bleu : `bool`, optional (default = `True`)
If True, the BLEU metric will be calculated during validation.
ngram_weights : `Iterable[float]`, optional (default = `(0.25, 0.25, 0.25, 0.25)`)
Weights to assign to scores for each ngram size.
"""
def __init__(
self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
beam_search: Lazy[BeamSearch] = Lazy(BeamSearch),
attention: Attention = None,
target_namespace: str = "tokens",
target_embedding_dim: int = None,
scheduled_sampling_ratio: float = 0.0,
use_bleu: bool = True,
bleu_ngram_weights: Iterable[float] = (0.25, 0.25, 0.25, 0.25),
target_pretrain_file: str = None,
target_decoder_layers: int = 1,
**kwargs
) -> None:
super().__init__(vocab)
self._target_namespace = target_namespace
self._target_decoder_layers = target_decoder_layers
self._scheduled_sampling_ratio = scheduled_sampling_ratio
# We need the start symbol to provide as the input at the first timestep of decoding, and
# end symbol as a way to indicate the end of the decoded sequence.
self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
if use_bleu:
pad_index = self.vocab.get_token_index(
self.vocab._padding_token, self._target_namespace
)
self._bleu = BLEU(
bleu_ngram_weights,
exclude_indices={pad_index, self._end_index, self._start_index},
)
else:
self._bleu = None
# At prediction time, we'll use a beam search to find the best target sequence.
# For backwards compatibility, check if beam_size or max_decoding_steps were passed in as
# kwargs. If so, update the BeamSearch object before constructing and raise a DeprecationWarning
deprecation_warning = (
"The parameter {} has been deprecated."
" Provide this parameter as argument to beam_search instead."
)
beam_search_extras = {}
if "beam_size" in kwargs:
beam_search_extras["beam_size"] = kwargs["beam_size"]
warnings.warn(deprecation_warning.format("beam_size"), DeprecationWarning)
if "max_decoding_steps" in kwargs:
beam_search_extras["max_steps"] = kwargs["max_decoding_steps"]
warnings.warn(deprecation_warning.format("max_decoding_steps"), DeprecationWarning)
self._beam_search = beam_search.construct(
end_index=self._end_index, vocab=self.vocab, **beam_search_extras
)
# Dense embedding of source vocab tokens.
self._source_embedder = source_embedder
# Encodes the sequence of source embeddings into a sequence of hidden states.
self._encoder = encoder
num_classes = self.vocab.get_vocab_size(self._target_namespace)
# Attention mechanism applied to the encoder output for each step.
self._attention = attention
# Dense embedding of vocab words in the target space.
target_embedding_dim = target_embedding_dim or source_embedder.get_output_dim()
if not target_pretrain_file:
self._target_embedder = Embedding(
num_embeddings=num_classes, embedding_dim=target_embedding_dim
)
else:
self._target_embedder = Embedding(
embedding_dim=target_embedding_dim,
pretrained_file=target_pretrain_file,
vocab_namespace=self._target_namespace,
vocab=self.vocab,
)
# Decoder output dim needs to be the same as the encoder output dim since we initialize the
# hidden state of the decoder with the final hidden state of the encoder.
self._encoder_output_dim = self._encoder.get_output_dim()
self._decoder_output_dim = self._encoder_output_dim
if self._attention:
# If using attention, a weighted average over encoder outputs will be concatenated
# to the previous target embedding to form the input to the decoder at each
# time step.
self._decoder_input_dim = self._decoder_output_dim + target_embedding_dim
else:
# Otherwise, the input to the decoder is just the previous target embedding.
self._decoder_input_dim = target_embedding_dim
# We'll use an LSTM cell as the recurrent cell that produces a hidden state
# for the decoder at each time step.
# TODO (pradeep): Do not hardcode decoder cell type.
if self._target_decoder_layers > 1:
self._decoder_cell = LSTM(
self._decoder_input_dim,
self._decoder_output_dim,
self._target_decoder_layers,
)
else:
self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)
# We project the hidden state from the decoder into the output vocabulary space
# in order to get log probabilities of each target token, at each time step.
self._output_projection_layer = Linear(self._decoder_output_dim, num_classes)
def take_step(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor], step: int
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take a decoding step. This is called by the beam search class.
# Parameters
last_predictions : `torch.Tensor`
A tensor of shape `(group_size,)`, which gives the indices of the predictions
during the last time step.
state : `Dict[str, torch.Tensor]`
A dictionary of tensors that contain the current state information
needed to predict the next step, which includes the encoder outputs,
the source mask, and the decoder hidden state and context. Each of these
tensors has shape `(group_size, *)`, where `*` can be any other number
of dimensions.
step : `int`
The time step in beam search decoding.
# Returns
Tuple[torch.Tensor, Dict[str, torch.Tensor]]
A tuple of `(log_probabilities, updated_state)`, where `log_probabilities`
is a tensor of shape `(group_size, num_classes)` containing the predicted
log probability of each class for the next step, for each item in the group,
while `updated_state` is a dictionary of tensors containing the encoder outputs,
source mask, and updated decoder hidden state and context.
Notes
-----
We treat the inputs as a batch, even though `group_size` is not necessarily
equal to `batch_size`, since the group may contain multiple states
for each source sentence in the batch.
"""
# shape: (group_size, num_classes)
output_projections, state = self._prepare_output_projections(last_predictions, state)
# shape: (group_size, num_classes)
class_log_probabilities = F.log_softmax(output_projections, dim=-1)
return class_log_probabilities, state
def forward(
self, # type: ignore
source_tokens: TextFieldTensors,
target_tokens: TextFieldTensors = None,
) -> Dict[str, torch.Tensor]:
"""
Make foward pass with decoder logic for producing the entire target sequence.
# Parameters
source_tokens : `TextFieldTensors`
The output of `TextField.as_array()` applied on the source `TextField`. This will be
passed through a `TextFieldEmbedder` and then through an encoder.
target_tokens : `TextFieldTensors`, optional (default = `None`)
Output of `Textfield.as_array()` applied on target `TextField`. We assume that the
target tokens are also represented as a `TextField`.
# Returns
`Dict[str, torch.Tensor]`
"""
state = self._encode(source_tokens)
if target_tokens:
state = self._init_decoder_state(state)
# The `_forward_loop` decodes the input sequence and computes the loss during training
# and validation.
output_dict = self._forward_loop(state, target_tokens)
else:
output_dict = {}
if not self.training:
state = self._init_decoder_state(state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens and self._bleu:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
self._bleu(best_predictions, target_tokens["tokens"]["tokens"])
return output_dict
def make_output_human_readable(self, output_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
Finalize predictions.
This method overrides `Model.make_output_human_readable`, which gets called after `Model.forward`, at test
time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives
within the `forward` method.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called `predicted_tokens` to the `output_dict`.
"""
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for top_k_predictions in predicted_indices:
# Beam search gives us the top k results for each source sentence in the batch
# we want top-k results.
if len(top_k_predictions.shape) == 1:
top_k_predictions = [top_k_predictions]
batch_predicted_tokens = []
for indices in top_k_predictions:
indices = list(indices)
# Collect indices till the first end_symbol
if self._end_index in indices:
indices = indices[: indices.index(self._end_index)]
predicted_tokens = [
self.vocab.get_token_from_index(x, namespace=self._target_namespace)
for x in indices
]
batch_predicted_tokens.append(predicted_tokens)
all_predicted_tokens.append(batch_predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def _encode(self, source_tokens: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = self._encoder(embedded_input, source_mask)
return {"source_mask": source_mask, "encoder_outputs": encoder_outputs}
def _init_decoder_state(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
batch_size = state["source_mask"].size(0)
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
state["encoder_outputs"],
state["source_mask"],
self._encoder.is_bidirectional(),
)
# Initialize the decoder hidden state with the final output of the encoder.
# shape: (batch_size, decoder_output_dim)
state["decoder_hidden"] = final_encoder_output
# shape: (batch_size, decoder_output_dim)
state["decoder_context"] = state["encoder_outputs"].new_zeros(
batch_size, self._decoder_output_dim
)
if self._target_decoder_layers > 1:
# shape: (num_layers, batch_size, decoder_output_dim)
state["decoder_hidden"] = (
state["decoder_hidden"].unsqueeze(0).repeat(self._target_decoder_layers, 1, 1)
)
# shape: (num_layers, batch_size, decoder_output_dim)
state["decoder_context"] = (
state["decoder_context"].unsqueeze(0).repeat(self._target_decoder_layers, 1, 1)
)
return state
def _forward_loop(
self, state: Dict[str, torch.Tensor], target_tokens: TextFieldTensors = None
) -> Dict[str, torch.Tensor]:
"""
Make forward pass during training or do greedy search during prediction.
Notes
-----
We really only use the predictions from the method to test that beam search
with a beam size of 1 gives the same results.
"""
# shape: (batch_size, max_input_sequence_length)
source_mask = state["source_mask"]
batch_size = source_mask.size()[0]
if target_tokens:
# shape: (batch_size, max_target_sequence_length)
targets = target_tokens["tokens"]["tokens"]
_, target_sequence_length = targets.size()
# The last input from the target is either padding or the end symbol.
# Either way, we don't have to process it.
num_decoding_steps = target_sequence_length - 1
else:
num_decoding_steps = self._beam_search.max_steps
# Initialize target predictions with the start index.
# shape: (batch_size,)
last_predictions = source_mask.new_full(
(batch_size,), fill_value=self._start_index, dtype=torch.long
)
step_logits: List[torch.Tensor] = []
step_predictions: List[torch.Tensor] = []
for timestep in range(num_decoding_steps):
if (
self.training
and self._scheduled_sampling_ratio > 0.0
and torch.rand(1).item() < self._scheduled_sampling_ratio
):
# Use gold tokens at test time and at a rate of 1 - _scheduled_sampling_ratio
# during training.
# shape: (batch_size,)
input_choices = last_predictions
elif not target_tokens:
# shape: (batch_size,)
input_choices = last_predictions
else:
# shape: (batch_size,)
input_choices = targets[:, timestep]
# shape: (batch_size, num_classes)
output_projections, state = self._prepare_output_projections(input_choices, state)
# list of tensors, shape: (batch_size, 1, num_classes)
step_logits.append(output_projections.unsqueeze(1))
# shape: (batch_size, num_classes)
class_probabilities = F.softmax(output_projections, dim=-1)
# shape (predicted_classes): (batch_size,)
_, predicted_classes = torch.max(class_probabilities, 1)
# shape (predicted_classes): (batch_size,)
last_predictions = predicted_classes
step_predictions.append(last_predictions.unsqueeze(1))
# shape: (batch_size, num_decoding_steps)
predictions = torch.cat(step_predictions, 1)
output_dict = {"predictions": predictions}
if target_tokens:
# shape: (batch_size, num_decoding_steps, num_classes)
logits = torch.cat(step_logits, 1)
# Compute loss.
target_mask = util.get_text_field_mask(target_tokens)
loss = self._get_loss(logits, targets, target_mask)
output_dict["loss"] = loss
return output_dict
def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Make forward pass during prediction using a beam search."""
batch_size = state["source_mask"].size()[0]
start_predictions = state["source_mask"].new_full(
(batch_size,), fill_value=self._start_index, dtype=torch.long
)
# shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)
# shape (log_probabilities): (batch_size, beam_size)
all_top_k_predictions, log_probabilities = self._beam_search.search(
start_predictions, state, self.take_step
)
output_dict = {
"class_log_probabilities": log_probabilities,
"predictions": all_top_k_predictions,
}
return output_dict
def _prepare_output_projections(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Decode current state and last prediction to produce produce projections
into the target space, which can then be used to get probabilities of
each target token for the next step.
Inputs are the same as for `take_step()`.
"""
# shape: (group_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (group_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (num_layers, group_size, decoder_output_dim)
decoder_hidden = state["decoder_hidden"]
# shape: (num_layers, group_size, decoder_output_dim)
decoder_context = state["decoder_context"]
# shape: (group_size, target_embedding_dim)
embedded_input = self._target_embedder(last_predictions)
if self._attention:
# shape: (group_size, encoder_output_dim)
if self._target_decoder_layers > 1:
attended_input = self._prepare_attended_input(
decoder_hidden[0], encoder_outputs, source_mask
)
else:
attended_input = self._prepare_attended_input(
decoder_hidden, encoder_outputs, source_mask
)
# shape: (group_size, decoder_output_dim + target_embedding_dim)
decoder_input = torch.cat((attended_input, embedded_input), -1)
else:
# shape: (group_size, target_embedding_dim)
decoder_input = embedded_input
if self._target_decoder_layers > 1:
# shape: (1, batch_size, target_embedding_dim)
decoder_input = decoder_input.unsqueeze(0)
# shape (decoder_hidden): (num_layers, batch_size, decoder_output_dim)
# shape (decoder_context): (num_layers, batch_size, decoder_output_dim)
_, (decoder_hidden, decoder_context) = self._decoder_cell(
decoder_input.float(), (decoder_hidden.float(), decoder_context.float())
)
else:
# shape (decoder_hidden): (batch_size, decoder_output_dim)
# shape (decoder_context): (batch_size, decoder_output_dim)
decoder_hidden, decoder_context = self._decoder_cell(
decoder_input.float(), (decoder_hidden.float(), decoder_context.float())
)
state["decoder_hidden"] = decoder_hidden
state["decoder_context"] = decoder_context
# shape: (group_size, num_classes)
if self._target_decoder_layers > 1:
output_projections = self._output_projection_layer(decoder_hidden[-1])
else:
output_projections = self._output_projection_layer(decoder_hidden)
return output_projections, state
def _prepare_attended_input(
self,
decoder_hidden_state: torch.LongTensor = None,
encoder_outputs: torch.LongTensor = None,
encoder_outputs_mask: torch.BoolTensor = None,
) -> torch.Tensor:
"""Apply attention over encoder outputs and decoder state."""
# shape: (batch_size, max_input_sequence_length)
input_weights = self._attention(decoder_hidden_state, encoder_outputs, encoder_outputs_mask)
# shape: (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, input_weights)
return attended_input
@staticmethod
def _get_loss(
logits: torch.LongTensor,
targets: torch.LongTensor,
target_mask: torch.BoolTensor,
) -> torch.Tensor:
"""
Compute loss.
Takes logits (unnormalized outputs from the decoder) of size (batch_size,
num_decoding_steps, num_classes), target indices of size (batch_size, num_decoding_steps+1)
and corresponding masks of size (batch_size, num_decoding_steps+1) steps and computes cross
entropy loss while taking the mask into account.
The length of `targets` is expected to be greater than that of `logits` because the
decoder does not need to compute the output corresponding to the last timestep of
`targets`. This method aligns the inputs appropriately to compute the loss.
During training, we want the logit corresponding to timestep i to be similar to the target
token from timestep i + 1. That is, the targets should be shifted by one timestep for
appropriate comparison. Consider a single example where the target has 3 words, and
padding is to 7 tokens.
The complete sequence would correspond to <S> w1 w2 w3 <E> <P> <P>
and the mask would be 1 1 1 1 1 0 0
and let the logits be l1 l2 l3 l4 l5 l6
We actually need to compare:
the sequence w1 w2 w3 <E> <P> <P>
with masks 1 1 1 1 0 0
against l1 l2 l3 l4 l5 l6
(where the input was) <S> w1 w2 w3 <E> <P>
"""
# shape: (batch_size, num_decoding_steps)
relevant_targets = targets[:, 1:].contiguous()
# shape: (batch_size, num_decoding_steps)
relevant_mask = target_mask[:, 1:].contiguous()
return util.sequence_cross_entropy_with_logits(logits, relevant_targets, relevant_mask)
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if self._bleu and not self.training:
all_metrics.update(self._bleu.get_metric(reset=reset))
return all_metrics
default_predictor = "seq2seq"
| allennlp-models-main | allennlp_models/generation/models/simple_seq2seq.py |
from os import PathLike
from typing import Optional, Dict, Any, Union, List, Tuple
import torch
from allennlp.common.lazy import Lazy
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.models.model import Model
from allennlp.modules.transformer.t5 import T5 as T5Module, T5Output, IntT, BoolT
from allennlp.nn.beam_search import BeamSearch
from allennlp.nn.checkpoint import CheckpointWrapper
from allennlp.training.metrics import ROUGE, BLEU
@Model.register("t5")
class T5(Model):
def __init__(
self,
vocab: Vocabulary,
model_name: str,
beam_search: Lazy[BeamSearch] = Lazy(BeamSearch, beam_size=3, max_steps=50),
checkpoint_wrapper: Optional[CheckpointWrapper] = None,
weights_path: Optional[Union[str, PathLike]] = None,
**kwargs
) -> None:
super().__init__(vocab, **kwargs)
self._model_name = model_name
# We only instantiate this when we need it.
self._tokenizer: Optional[PretrainedTransformerTokenizer] = None
self.t5 = T5Module.from_pretrained_module(
model_name,
beam_search=beam_search,
ddp_accelerator=self.ddp_accelerator,
checkpoint_wrapper=checkpoint_wrapper,
weights_path=weights_path,
)
exclude_indices = {
self.t5.pad_token_id,
self.t5.decoder_start_token_id,
self.t5.eos_token_id,
}
self._metrics = [
ROUGE(exclude_indices=exclude_indices),
BLEU(exclude_indices=exclude_indices),
]
def _post_load_state_dict(
self, missing_keys: List[str], unexpected_keys: List[str]
) -> Tuple[List[str], List[str]]:
missing_keys_to_ignore = [
"t5.encoder.token_embeddings.weight",
"t5.decoder.token_embeddings.weight",
]
if self.t5._tie_word_embeddings:
missing_keys_to_ignore.append("t5.lm_head.weight")
for key in missing_keys_to_ignore:
if key in missing_keys:
missing_keys.remove(key)
return missing_keys, unexpected_keys
@property
def tokenizer(self) -> PretrainedTransformerTokenizer:
if self._tokenizer is None:
self._tokenizer = PretrainedTransformerTokenizer(self._model_name)
return self._tokenizer
def forward( # type: ignore
self, source_tokens: TextFieldTensors, target_tokens: Optional[TextFieldTensors] = None
) -> Dict[str, torch.Tensor]:
"""
Performs the forward step of T5.
# Parameters
source_tokens : `TextFieldTensors`, required
The source tokens for the encoder. We assume they are stored under the `tokens` key/namespace.
target_tokens : `TextFieldTensors`, optional (default = `None`)
The target tokens for the decoder. We assume they are also stored under the `tokens` key/namespace.
If no target tokens are given during training / validation, the source tokens are shifted
to the right by 1.
# Returns
`Dict[str, torch.Tensor]`
Contains the `loss` when `target_tokens` is provided.
And during prediction, includes `predictions` and `predicted_log_probs` from beam search.
"""
input_ids, attention_mask = (
source_tokens["tokens"]["token_ids"],
source_tokens["tokens"]["mask"],
)
labels: Optional[IntT] = None
decoder_attention_mask: Optional[BoolT] = None
if target_tokens is not None:
labels, decoder_attention_mask = (
target_tokens["tokens"]["token_ids"], # type: ignore[assignment]
target_tokens["tokens"]["mask"], # type: ignore[assignment]
)
elif self.training:
raise ValueError("'target_tokens' required during training")
output: T5Output = self.t5(
input_ids,
attention_mask=attention_mask,
labels=labels,
decoder_attention_mask=decoder_attention_mask,
)
output_dict: Dict[str, torch.Tensor] = {}
if self.training:
assert output.loss is not None
output_dict["loss"] = output.loss
else:
# Shape: (batch_size, beam_size, num_tokens)
assert output.predictions is not None
# Shape: (batch_size, beam_size)
assert output.predicted_log_probs is not None
# Shape: (batch_size, num_tokens)
output_dict["predictions"] = output.predictions[:, 0, :]
# Shape: (batch_size, )
output_dict["predicted_log_probs"] = output.predicted_log_probs[:, 0]
if labels is not None:
assert output.loss is not None
output_dict["loss"] = output.loss
for metric in self._metrics:
metric(output_dict["predictions"], labels) # type: ignore[call-arg]
return output_dict
def make_output_human_readable(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, Any]:
predictions = output_dict["predictions"]
output_dict["predicted_text"] = self.tokenizer.tokenizer.batch_decode(
predictions, skip_special_tokens=True # type: ignore[attr-defined]
)
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics: Dict[str, float] = {}
if not self.training:
for metric in self._metrics:
metrics.update(metric.get_metric(reset=reset))
return metrics
default_predictor = "seq2seq"
| allennlp-models-main | allennlp_models/generation/models/t5.py |
import warnings
from typing import Dict, Tuple, Any, cast
from allennlp.data import Vocabulary
from allennlp.data.fields.text_field import TextFieldTensors
from allennlp.data.token_indexers.pretrained_transformer_indexer import PretrainedTransformerIndexer
from allennlp.models.model import Model
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.nn.beam_search import BeamSearch
from allennlp.nn.util import sequence_cross_entropy_with_logits
from allennlp.training.metrics import ROUGE, BLEU
from allennlp.common.lazy import Lazy
from transformers.models.bart.modeling_bart import BartModel, BartForConditionalGeneration
import torch
from torch import nn
import torch.nn.functional as F
DecoderCacheType = Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor], ...]
@Seq2SeqEncoder.register("bart_encoder")
class BartEncoder(Seq2SeqEncoder):
"""
The BART encoder, implemented as a `Seq2SeqEncoder`, which assumes it operates on
already embedded inputs. This means that we remove the token and position embeddings
from BART in this module. For the typical use case of using BART to encode inputs to your
model (where we include the token and position embeddings from BART), you should use
`PretrainedTransformerEmbedder(bart_model_name, sub_module="encoder")` instead of this.
# Parameters
model_name : `str`, required
Name of the pre-trained BART model to use. Available options can be found in
`transformers.models.bart.modeling_bart.BART_PRETRAINED_MODEL_ARCHIVE_MAP`.
"""
def __init__(self, model_name):
super().__init__()
bart = BartModel.from_pretrained(model_name)
self.hidden_dim = bart.config.hidden_size
self.bart_encoder = bart.encoder
self.bart_encoder.embed_tokens = lambda x: x
self.bart_encoder.embed_positions = lambda x: torch.zeros(
(x.shape[0], x.shape[1], self.hidden_dim), dtype=torch.float32
)
def get_input_dim(self) -> int:
return self.hidden_dim
def get_output_dim(self) -> int:
return self.hidden_dim
def is_bidirectional(self) -> bool:
return False
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor):
# The first element is always the last encoder states for each input token.
# Depending on the config, the second output will contain a list of the encoder states
# after each transformer layer. Similarly, the third output can contain the attentions from each layer.
# We only care about the first element.
return self.bart_encoder(input_ids=inputs, attention_mask=mask)[0]
class _BartEncoderWrapper(nn.Module):
"""
A wrapper class for a `Seq2SeqEncoder` allowing it to replace the encoder in `Bart`.
This class is only used internally by `Bart`.
"""
def __init__(
self, encoder: Seq2SeqEncoder, embed_tokens: nn.Embedding, embed_positions: nn.Embedding
):
"""
# Parameters
encoder : `Seq2SeqEncoder`, required
Encoder to be used by `Bart`.
embed_tokens : `nn.Embedding`, required
The token embedding layer of the BART model.
embed_positions : `nn.Embedding`, required
The positional embedding layer of the BART model.
"""
super().__init__()
self.encoder = encoder
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
# AllenNLP Seq2SeqEncoder's don't necessarily return those and the encoder might not even use
# Attention, thus ensure those are not expected.
# assert not bart_config.output_attentions
# assert not bart_config.output_hidden_states
def forward(
self,
input_ids,
attention_mask=None,
):
x = self.embed_tokens(input_ids) + self.embed_positions(input_ids)
encoder_states = self.encoder(x, attention_mask)
# The last two elements are attention and history of hidden states, respectively
return encoder_states, [], []
@Model.register("bart")
class Bart(Model):
"""
BART model from the paper "BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation,
Translation, and Comprehension" (https://arxiv.org/abs/1910.13461). The Bart model here uses a language
modeling head and thus can be used for text generation.
# Parameters
model_name : `str`, required
Name of the pre-trained BART model to use. Available options can be found in
`transformers.models.bart.modeling_bart.BART_PRETRAINED_MODEL_ARCHIVE_MAP`.
vocab : `Vocabulary`, required
Vocabulary containing source and target vocabularies.
beam_search : `Lazy[BeamSearch]`, optional (default = `Lazy(BeamSearch)`)
This is used to during inference to select the tokens of the decoded output sequence.
indexer : `PretrainedTransformerIndexer`, optional (default = `None`)
Indexer to be used for converting decoded sequences of ids to to sequences of tokens.
encoder : `Seq2SeqEncoder`, optional (default = `None`)
Encoder to used in BART. By default, the original BART encoder is used.
"""
def __init__(
self,
model_name: str,
vocab: Vocabulary,
beam_search: Lazy[BeamSearch] = Lazy(BeamSearch),
indexer: PretrainedTransformerIndexer = None,
encoder: Seq2SeqEncoder = None,
**kwargs,
):
super().__init__(vocab)
self.bart = BartForConditionalGeneration.from_pretrained(model_name)
self._indexer = indexer or PretrainedTransformerIndexer(model_name, namespace="tokens")
self._start_id = self.bart.config.bos_token_id # CLS
self._decoder_start_id = self.bart.config.decoder_start_token_id or self._start_id
self._end_id = self.bart.config.eos_token_id # SEP
self._pad_id = self.bart.config.pad_token_id # PAD
# At prediction time, we'll use a beam search to find the best target sequence.
# For backwards compatibility, check if beam_size or max_decoding_steps were passed in as
# kwargs. If so, update the BeamSearch object before constructing and raise a DeprecationWarning
deprecation_warning = (
"The parameter {} has been deprecated."
" Provide this parameter as argument to beam_search instead."
)
beam_search_extras = {}
if "beam_size" in kwargs:
beam_search_extras["beam_size"] = kwargs["beam_size"]
warnings.warn(deprecation_warning.format("beam_size"), DeprecationWarning)
if "max_decoding_steps" in kwargs:
beam_search_extras["max_steps"] = kwargs["max_decoding_steps"]
warnings.warn(deprecation_warning.format("max_decoding_steps"), DeprecationWarning)
self._beam_search = beam_search.construct(
end_index=self._end_id, vocab=self.vocab, **beam_search_extras
)
self._rouge = ROUGE(exclude_indices={self._start_id, self._pad_id, self._end_id})
self._bleu = BLEU(exclude_indices={self._start_id, self._pad_id, self._end_id})
# Replace bart encoder with given encoder. We need to extract the two embedding layers so that
# we can use them in the encoder wrapper
if encoder is not None:
assert (
encoder.get_input_dim() == encoder.get_output_dim() == self.bart.config.hidden_size
)
self.bart.model.encoder = _BartEncoderWrapper(
encoder,
self.bart.model.encoder.embed_tokens,
self.bart.model.encoder.embed_positions,
)
def forward(
self, source_tokens: TextFieldTensors, target_tokens: TextFieldTensors = None
) -> Dict[str, torch.Tensor]:
"""
Performs the forward step of Bart.
# Parameters
source_tokens : `TextFieldTensors`, required
The source tokens for the encoder. We assume they are stored under the `tokens` key.
target_tokens : `TextFieldTensors`, optional (default = `None`)
The target tokens for the decoder. We assume they are stored under the `tokens` key. If no target
tokens are given, the source tokens are shifted to the right by 1.
# Returns
`Dict[str, torch.Tensor]`
During training, this dictionary contains the `decoder_logits` of shape `(batch_size,
max_target_length, target_vocab_size)` and the `loss`. During inference, it contains `predictions`
of shape `(batch_size, max_decoding_steps)` and `log_probabilities` of shape `(batch_size,)`.
"""
inputs = source_tokens
targets = target_tokens
input_ids, input_mask = inputs["tokens"]["token_ids"], inputs["tokens"]["mask"]
outputs = {}
# If no targets are provided, then shift input to right by 1. Bart already does this internally
# but it does not use them for loss calculation.
if targets is not None:
target_ids, target_mask = targets["tokens"]["token_ids"], targets["tokens"]["mask"]
else:
target_ids = input_ids[:, 1:]
target_mask = input_mask[:, 1:]
if self.training:
bart_outputs = self.bart(
input_ids=input_ids,
attention_mask=input_mask,
decoder_input_ids=target_ids[:, :-1].contiguous(),
decoder_attention_mask=target_mask[:, :-1].contiguous(),
use_cache=False,
return_dict=True,
)
outputs["decoder_logits"] = bart_outputs.logits
# The BART paper mentions label smoothing of 0.1 for sequence generation tasks
outputs["loss"] = sequence_cross_entropy_with_logits(
bart_outputs.logits,
cast(torch.LongTensor, target_ids[:, 1:].contiguous()),
cast(torch.BoolTensor, target_mask[:, 1:].contiguous()),
label_smoothing=0.1,
average="token",
)
else:
# Use decoder start id and start of sentence to start decoder
initial_decoder_ids = torch.tensor(
[[self._decoder_start_id]],
dtype=input_ids.dtype,
device=input_ids.device,
).repeat(input_ids.shape[0], 1)
inital_state = {
"input_ids": input_ids,
"input_mask": input_mask,
}
beam_result = self._beam_search.search(
initial_decoder_ids, inital_state, self.take_step
)
predictions = beam_result[0]
max_pred_indices = (
beam_result[1].argmax(dim=-1).view(-1, 1, 1).expand(-1, -1, predictions.shape[-1])
)
predictions = predictions.gather(dim=1, index=max_pred_indices).squeeze(dim=1)
self._rouge(predictions, target_ids)
self._bleu(predictions, target_ids)
outputs["predictions"] = predictions
outputs["log_probabilities"] = (
beam_result[1].gather(dim=-1, index=max_pred_indices[..., 0]).squeeze(dim=-1)
)
self.make_output_human_readable(outputs)
return outputs
@staticmethod
def _decoder_cache_to_dict(decoder_cache: DecoderCacheType) -> Dict[str, torch.Tensor]:
cache_dict = {}
for layer_index, layer_cache in enumerate(decoder_cache):
# Each layer caches the key and value tensors for its self-attention and cross-attention.
# Hence the `layer_cache` tuple has 4 elements.
assert len(layer_cache) == 4
for tensor_index, tensor in enumerate(layer_cache):
key = f"decoder_cache_{layer_index}_{tensor_index}"
cache_dict[key] = tensor
return cache_dict
def _dict_to_decoder_cache(self, cache_dict: Dict[str, torch.Tensor]) -> DecoderCacheType:
decoder_cache = []
for layer_index in range(len(self.bart.model.decoder.layers)):
base_key = f"decoder_cache_{layer_index}_"
layer_cache = (
cache_dict[base_key + "0"],
cache_dict[base_key + "1"],
cache_dict[base_key + "2"],
cache_dict[base_key + "3"],
)
decoder_cache.append(layer_cache)
assert decoder_cache
return tuple(decoder_cache)
def take_step(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor], step: int
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take step during beam search.
# Parameters
last_predictions : `torch.Tensor`
The predicted token ids from the previous step. Shape: `(group_size,)`
state : `Dict[str, torch.Tensor]`
State required to generate next set of predictions
step : `int`
The time step in beam search decoding.
# Returns
`Tuple[torch.Tensor, Dict[str, torch.Tensor]]`
A tuple containing logits for the next tokens of shape `(group_size, target_vocab_size)` and
an updated state dictionary.
"""
if len(last_predictions.shape) == 1:
last_predictions = last_predictions.unsqueeze(-1)
decoder_cache = None
decoder_cache_dict = {
k: state[k].contiguous()
for k in state
if k not in {"input_ids", "input_mask", "encoder_states"}
}
if len(decoder_cache_dict) != 0:
decoder_cache = self._dict_to_decoder_cache(decoder_cache_dict)
encoder_outputs = (state["encoder_states"],) if "encoder_states" in state else None
outputs = self.bart(
input_ids=state["input_ids"] if encoder_outputs is None else None,
attention_mask=state["input_mask"],
encoder_outputs=encoder_outputs,
decoder_input_ids=last_predictions,
past_key_values=decoder_cache,
use_cache=True,
return_dict=True,
)
logits = outputs.logits[:, -1, :]
log_probabilities = F.log_softmax(logits, dim=-1)
decoder_cache = outputs.past_key_values
if decoder_cache is not None:
decoder_cache_dict = self._decoder_cache_to_dict(decoder_cache)
state.update(decoder_cache_dict)
state["encoder_states"] = outputs.encoder_last_hidden_state
return log_probabilities, state
def make_output_human_readable(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, Any]:
"""
# Parameters
output_dict : `Dict[str, torch.Tensor]`
A dictionary containing a batch of predictions with key `predictions`. The tensor should have
shape `(batch_size, max_sequence_length)`
# Returns
`Dict[str, Any]`
Original `output_dict` with an additional `predicted_tokens` key that maps to a list of lists of
tokens.
"""
predictions = output_dict["predictions"]
predicted_tokens = [None] * predictions.shape[0]
for i in range(predictions.shape[0]):
predicted_tokens[i] = self._indexer.indices_to_tokens(
{"token_ids": predictions[i].tolist()},
self.vocab,
)
output_dict["predicted_tokens"] = predicted_tokens # type: ignore
output_dict["predicted_text"] = self._indexer._tokenizer.batch_decode(
predictions.tolist(), skip_special_tokens=True
)
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics: Dict[str, float] = {}
if not self.training:
metrics.update(self._rouge.get_metric(reset=reset))
metrics.update(self._bleu.get_metric(reset=reset))
return metrics
default_predictor = "seq2seq"
| allennlp-models-main | allennlp_models/generation/models/bart.py |
import logging
import warnings
from typing import Any, Dict, List, Tuple, Union
import numpy
import torch
from allennlp.common.lazy import Lazy
from allennlp.common.util import END_SYMBOL, START_SYMBOL
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Attention, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import InitializerApplicator, util
from allennlp.nn.beam_search import BeamSearch
from allennlp.training.metrics import BLEU, Metric
from torch.nn.modules.linear import Linear
from torch.nn.modules.rnn import LSTMCell
logger = logging.getLogger(__name__)
@Model.register("copynet_seq2seq")
class CopyNetSeq2Seq(Model):
"""
This is an implementation of [CopyNet](https://api.semanticscholar.org/CorpusID:8174613).
CopyNet is a sequence-to-sequence encoder-decoder model with a copying mechanism
that can copy tokens from the source sentence into the target sentence instead of
generating all target tokens only from the target vocabulary.
It is very similar to a typical seq2seq model used in neural machine translation
tasks, for example, except that in addition to providing a "generation" score at each timestep
for the tokens in the target vocabulary, it also provides a "copy" score for each
token that appears in the source sentence. In other words, you can think of CopyNet
as a seq2seq model with a dynamic target vocabulary that changes based on the tokens
in the source sentence, allowing it to predict tokens that are out-of-vocabulary (OOV)
with respect to the actual target vocab.
# Parameters
vocab : `Vocabulary`, required
Vocabulary containing source and target vocabularies.
source_embedder : `TextFieldEmbedder`, required
Embedder for source side sequences
encoder : `Seq2SeqEncoder`, required
The encoder of the "encoder/decoder" model
attention : `Attention`, required
This is used to get a dynamic summary of encoder outputs at each timestep
when producing the "generation" scores for the target vocab.
label_smoothing : `float`, optional (default = `None`)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classification
target would look like `[0.05, 0.05, 0.85, 0.05]` if the 3rd class was
the correct label.
beam_search : `BeamSearch`, optional (default = `Lazy(BeamSearch)`)
This is used to during inference to select the tokens of the decoded output sequence.
target_embedding_dim : `int`, optional (default = `30`)
The size of the embeddings for the target vocabulary.
scheduled_sampling_ratio : `float`, optional (default = `0.`)
At each timestep during training, we sample a random number between 0 and 1, and if it is
not less than this value, we use the ground truth labels for the whole batch. Else, we use
the predictions from the previous time step for the whole batch. If this value is 0.0
(default), this corresponds to teacher forcing, and if it is 1.0, it corresponds to not
using target side ground truth labels. See the following paper for more information:
[Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks. Bengio et al.,
2015](https://arxiv.org/abs/1506.03099).
copy_token : `str`, optional (default = `'@COPY@'`)
The token used to indicate that a target token was copied from the source.
If this token is not already in your target vocabulary, it will be added.
target_namespace : `str`, optional (default = `'target_tokens'`)
The namespace for the target vocabulary.
tensor_based_metric : `Metric`, optional (default = `'BLEU'`)
A metric to track on validation data that takes raw tensors when its called.
This metric must accept two arguments when called: a batched tensor
of predicted token indices, and a batched tensor of gold token indices.
token_based_metric : `Metric`, optional (default = `None`)
A metric to track on validation data that takes lists of lists of tokens
as input. This metric must accept two arguments when called, both
of type `List[List[str]]`. The first is a predicted sequence for each item
in the batch and the second is a gold sequence for each item in the batch.
initializer : `InitializerApplicator`, optional
An initialization strategy for the model weights.
"""
def __init__(
self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
attention: Attention,
label_smoothing: float = None,
beam_search: Lazy[BeamSearch] = Lazy(BeamSearch),
target_embedding_dim: int = 30,
scheduled_sampling_ratio: float = 0.0,
copy_token: str = "@COPY@",
target_namespace: str = "target_tokens",
tensor_based_metric: Metric = None,
token_based_metric: Metric = None,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs
) -> None:
super().__init__(vocab)
self._target_namespace = target_namespace
self._scheduled_sampling_ratio = scheduled_sampling_ratio
self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
self._oov_index = self.vocab.get_token_index(self.vocab._oov_token, self._target_namespace)
self._pad_index = self.vocab.get_token_index(
self.vocab._padding_token, self._target_namespace
)
self._copy_index = self.vocab.add_token_to_namespace(copy_token, self._target_namespace)
self._tensor_based_metric = tensor_based_metric or BLEU(
exclude_indices={self._pad_index, self._end_index, self._start_index}
)
self._token_based_metric = token_based_metric
self._target_vocab_size = self.vocab.get_vocab_size(self._target_namespace)
# Encoding modules.
self._source_embedder = source_embedder
self._encoder = encoder
# Decoder output dim needs to be the same as the encoder output dim since we initialize the
# hidden state of the decoder with the final hidden state of the encoder.
# We arbitrarily set the decoder's input dimension to be the same as the output dimension.
self.encoder_output_dim = self._encoder.get_output_dim()
self.decoder_output_dim = self.encoder_output_dim
self.decoder_input_dim = self.decoder_output_dim
# The decoder input will be a function of the embedding of the previous predicted token,
# an attended encoder hidden state called the "attentive read", and another
# weighted sum of the encoder hidden state called the "selective read".
# While the weights for the attentive read are calculated by an `Attention` module,
# the weights for the selective read are simply the predicted probabilities
# corresponding to each token in the source sentence that matches the target
# token from the previous timestep.
self._target_embedder = Embedding(
num_embeddings=self._target_vocab_size, embedding_dim=target_embedding_dim
)
self._attention = attention
self._label_smoothing = label_smoothing
self._input_projection_layer = Linear(
target_embedding_dim + self.encoder_output_dim * 2, self.decoder_input_dim
)
# We then run the projected decoder input through an LSTM cell to produce
# the next hidden state.
self._decoder_cell = LSTMCell(self.decoder_input_dim, self.decoder_output_dim)
# We create a "generation" score for each token in the target vocab
# with a linear projection of the decoder hidden state.
self._output_generation_layer = Linear(self.decoder_output_dim, self._target_vocab_size)
# We create a "copying" score for each source token by applying a non-linearity
# (tanh) to a linear projection of the encoded hidden state for that token,
# and then taking the dot product of the result with the decoder hidden state.
self._output_copying_layer = Linear(self.encoder_output_dim, self.decoder_output_dim)
# At prediction time, we'll use a beam search to find the best target sequence.
# For backwards compatibility, check if beam_size or max_decoding_steps were passed in as
# kwargs. If so, update the BeamSearch object before constructing and raise a DeprecationWarning
deprecation_warning = (
"The parameter {} has been deprecated."
" Provide this parameter as argument to beam_search instead."
)
beam_search_extras = {}
if "beam_size" in kwargs:
beam_search_extras["beam_size"] = kwargs["beam_size"]
warnings.warn(deprecation_warning.format("beam_size"), DeprecationWarning)
if "max_decoding_steps" in kwargs:
beam_search_extras["max_steps"] = kwargs["max_decoding_steps"]
warnings.warn(deprecation_warning.format("max_decoding_steps"), DeprecationWarning)
self._beam_search = beam_search.construct(
end_index=self._end_index, vocab=self.vocab, **beam_search_extras
)
initializer(self)
def forward(
self, # type: ignore
source_tokens: TextFieldTensors,
source_token_ids: torch.Tensor,
source_to_target: torch.Tensor,
metadata: List[Dict[str, Any]],
target_tokens: TextFieldTensors = None,
target_token_ids: torch.Tensor = None,
weight: torch.Tensor = None,
) -> Dict[str, torch.Tensor]:
"""
Make foward pass with decoder logic for producing the entire target sequence.
# Parameters
source_tokens : `TextFieldTensors`, required
The output of `TextField.as_array()` applied on the source `TextField`. This will be
passed through a `TextFieldEmbedder` and then through an encoder.
source_token_ids : `torch.Tensor`, required
Tensor containing IDs that indicate which source tokens match each other.
Has shape: `(batch_size, source_sequence_length)`.
source_to_target : `torch.Tensor`, required
Tensor containing vocab index of each source token with respect to the
target vocab namespace. Shape: `(batch_size, source_sequence_length)`.
metadata : `List[Dict[str, Any]]`, required
Metadata field that contains the original source tokens with key 'source_tokens'
and any other meta fields. When 'target_tokens' is also passed, the metadata
should also contain the original target tokens with key 'target_tokens'.
target_tokens : `TextFieldTensors`, optional (default = `None`)
Output of `Textfield.as_array()` applied on target `TextField`. We assume that the
target tokens are also represented as a `TextField` which must contain a "tokens"
key that uses single ids.
target_token_ids : `torch.Tensor`, optional (default = `None`)
A tensor of shape `(batch_size, target_sequence_length)` which indicates which
tokens in the target sequence match tokens in the source sequence.
weight : `torch.Tensor`, optional (default = `None`)
A optional tensor of shape `(batch_size,)` or `(batch_size, 1)` which determines
how to weight each instance in the batch when calculating the loss.
The default of `None` is equivalent to `weights = torch.tensor([1.0] * batch_size)`.
The final loss is calculated as `-(weight * log_likelihood).sum() / batch_size`,
where `log_likelihood` is a tensor of shape `(batch_size,)`, representing the overall
log likelihood of the gold target sequence.
# Returns
`Dict[str, torch.Tensor]`
"""
state = self._encode(source_tokens)
state["source_token_ids"] = source_token_ids
state["source_to_target"] = source_to_target
if target_tokens:
state = self._init_decoder_state(state)
output_dict = self._forward_loss(target_tokens, target_token_ids, state, weight=weight)
else:
output_dict = {}
output_dict["metadata"] = metadata
if not self.training:
state = self._init_decoder_state(state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens:
if self._tensor_based_metric is not None:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
# shape: (batch_size, target_sequence_length)
gold_tokens = self._gather_extended_gold_tokens(
target_tokens["tokens"]["tokens"], source_token_ids, target_token_ids
)
self._tensor_based_metric(best_predictions, gold_tokens) # type: ignore
if self._token_based_metric is not None:
predicted_tokens = self._get_predicted_tokens(
output_dict["predictions"], metadata, n_best=1
)
self._token_based_metric( # type: ignore
predicted_tokens, [x["target_tokens"] for x in metadata]
)
return output_dict
def _gather_extended_gold_tokens(
self,
target_tokens: torch.Tensor,
source_token_ids: torch.Tensor,
target_token_ids: torch.Tensor,
) -> torch.LongTensor:
"""
Modify the gold target tokens relative to the extended vocabulary.
For gold targets that are OOV but were copied from the source, the OOV index
will be changed to the index of the first occurence in the source sentence,
offset by the size of the target vocabulary.
# Parameters
target_tokens : `torch.Tensor`
Shape: `(batch_size, target_sequence_length)`.
source_token_ids : `torch.Tensor`
Shape: `(batch_size, source_sequence_length)`.
target_token_ids : `torch.Tensor`
Shape: `(batch_size, target_sequence_length)`.
# Returns
torch.Tensor
Modified `target_tokens` with OOV indices replaced by offset index
of first match in source sentence.
"""
batch_size, target_sequence_length = target_tokens.size()
source_sequence_length = source_token_ids.size(1)
# Only change indices for tokens that were OOV in target vocab but copied from source.
# shape: (batch_size, target_sequence_length)
oov = target_tokens == self._oov_index
# shape: (batch_size, target_sequence_length, source_sequence_length)
expanded_source_token_ids = source_token_ids.unsqueeze(1).expand(
batch_size, target_sequence_length, source_sequence_length
)
# shape: (batch_size, target_sequence_length, source_sequence_length)
expanded_target_token_ids = target_token_ids.unsqueeze(-1).expand(
batch_size, target_sequence_length, source_sequence_length
)
# shape: (batch_size, target_sequence_length, source_sequence_length)
matches = expanded_source_token_ids == expanded_target_token_ids
# shape: (batch_size, target_sequence_length)
copied = matches.sum(-1) > 0
# shape: (batch_size, target_sequence_length)
mask = oov & copied
# shape: (batch_size, target_sequence_length)
first_match = ((matches.cumsum(-1) == 1) & matches).to(torch.uint8).argmax(-1)
# shape: (batch_size, target_sequence_length)
new_target_tokens = (
target_tokens * ~mask + (first_match.long() + self._target_vocab_size) * mask
)
return new_target_tokens
def _init_decoder_state(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Initialize the encoded state to be passed to the first decoding time step.
"""
batch_size, _ = state["source_mask"].size()
# Initialize the decoder hidden state with the final output of the encoder,
# and the decoder context with zeros.
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
state["encoder_outputs"], state["source_mask"], self._encoder.is_bidirectional()
)
# shape: (batch_size, decoder_output_dim)
state["decoder_hidden"] = final_encoder_output
# shape: (batch_size, decoder_output_dim)
state["decoder_context"] = state["encoder_outputs"].new_zeros(
batch_size, self.decoder_output_dim
)
return state
def _encode(self, source_tokens: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Encode source input sentences.
"""
# shape: (batch_size, source_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, source_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, source_sequence_length, encoder_output_dim)
encoder_outputs = self._encoder(embedded_input, source_mask)
return {"source_mask": source_mask, "encoder_outputs": encoder_outputs}
def _decoder_step(
self,
last_predictions: torch.Tensor,
selective_weights: torch.Tensor,
state: Dict[str, torch.Tensor],
) -> Dict[str, torch.Tensor]:
# shape: (group_size, source_sequence_length, encoder_output_dim)
encoder_outputs_mask = state["source_mask"]
# shape: (group_size, target_embedding_dim)
embedded_input = self._target_embedder(last_predictions)
# shape: (group_size, source_sequence_length)
attentive_weights = self._attention(
state["decoder_hidden"], state["encoder_outputs"], encoder_outputs_mask
)
# shape: (group_size, encoder_output_dim)
attentive_read = util.weighted_sum(state["encoder_outputs"], attentive_weights)
# shape: (group_size, encoder_output_dim)
selective_read = util.weighted_sum(state["encoder_outputs"], selective_weights)
# shape: (group_size, target_embedding_dim + encoder_output_dim * 2)
decoder_input = torch.cat((embedded_input, attentive_read, selective_read), -1)
# shape: (group_size, decoder_input_dim)
projected_decoder_input = self._input_projection_layer(decoder_input)
state["decoder_hidden"], state["decoder_context"] = self._decoder_cell(
projected_decoder_input.float(),
(state["decoder_hidden"].float(), state["decoder_context"].float()),
)
return state
def _get_generation_scores(self, state: Dict[str, torch.Tensor]) -> torch.Tensor:
return self._output_generation_layer(state["decoder_hidden"])
def _get_copy_scores(self, state: Dict[str, torch.Tensor]) -> torch.Tensor:
# shape: (batch_size, source_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (batch_size, source_sequence_length, decoder_output_dim)
copy_projection = self._output_copying_layer(encoder_outputs)
# shape: (batch_size, source_sequence_length, decoder_output_dim)
copy_projection = torch.tanh(copy_projection)
# shape: (batch_size, source_sequence_length)
copy_scores = copy_projection.bmm(state["decoder_hidden"].unsqueeze(-1)).squeeze(-1)
return copy_scores
def _get_ll_contrib(
self,
generation_scores: torch.Tensor,
generation_scores_mask: torch.BoolTensor,
copy_scores: torch.Tensor,
target_tokens: torch.Tensor,
target_to_source: torch.Tensor,
source_mask: torch.BoolTensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Get the log-likelihood contribution from a single timestep.
# Parameters
generation_scores : `torch.Tensor`
Shape: `(batch_size, target_vocab_size)`
generation_scores_mask : `torch.BoolTensor`
Shape: `(batch_size, target_vocab_size)`. This is just a tensor of 1's.
copy_scores : `torch.Tensor`
Shape: `(batch_size, source_sequence_length)`
target_tokens : `torch.Tensor`
Shape: `(batch_size,)`
target_to_source : `torch.Tensor`
Shape: `(batch_size, source_sequence_length)`
source_mask : `torch.BoolTensor`
Shape: `(batch_size, source_sequence_length)`
# Returns
Tuple[torch.Tensor, torch.Tensor]
Shape: `(batch_size,), (batch_size, source_sequence_length)`
"""
_, target_size = generation_scores.size()
# The point of this mask is to just mask out all source token scores
# that just represent padding. We apply the mask to the concatenation
# of the generation scores and the copy scores to normalize the scores
# correctly during the softmax.
# shape: (batch_size, target_vocab_size + source_sequence_length)
mask = torch.cat((generation_scores_mask, source_mask), dim=-1)
# shape: (batch_size, target_vocab_size + source_sequence_length)
all_scores = torch.cat((generation_scores, copy_scores), dim=-1)
# Normalize generation and copy scores.
# shape: (batch_size, target_vocab_size + source_sequence_length)
log_probs = util.masked_log_softmax(all_scores, mask)
# Calculate the log probability (`copy_log_probs`) for each token in the source sentence
# that matches the current target token. We use the sum of these copy probabilities
# for matching tokens in the source sentence to get the total probability
# for the target token. We also need to normalize the individual copy probabilities
# to create `selective_weights`, which are used in the next timestep to create
# a selective read state.
# shape: (batch_size, source_sequence_length)
copy_log_probs = (
log_probs[:, target_size:]
+ (
target_to_source.to(log_probs.dtype) + util.tiny_value_of_dtype(log_probs.dtype)
).log()
)
# Since `log_probs[:, target_size]` gives us the raw copy log probabilities,
# we use a non-log softmax to get the normalized non-log copy probabilities.
selective_weights = util.masked_softmax(log_probs[:, target_size:], target_to_source)
# This mask ensures that item in the batch has a non-zero generation probabilities
# for this timestep only when the gold target token is not OOV or there are no
# matching tokens in the source sentence.
# shape: (batch_size, 1)
gen_mask = (target_tokens != self._oov_index) | (target_to_source.sum(-1) == 0)
log_gen_mask = (gen_mask + util.tiny_value_of_dtype(log_probs.dtype)).log().unsqueeze(-1)
# Now we get the generation score for the gold target token.
if self._label_smoothing is not None and self._label_smoothing > 0.0:
smoothing_value = self._label_smoothing / target_size
# Create the smoothed targets to be multiplied with `log_probs`
# shape: (batch_size, target_vocab_size + source_sequence_length)
smoothed_targets = torch.full_like(log_probs, smoothing_value).scatter_(
1, target_tokens.unsqueeze(1), 1.0 - self._label_smoothing + smoothing_value
)
generation_log_probs = log_probs * smoothed_targets
# shape: (batch_size, 1)
generation_log_probs = generation_log_probs.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices which contribute to the loss.
# shape: (batch_size, 1)
generation_log_probs = log_probs.gather(1, target_tokens.unsqueeze(1))
generation_log_probs = generation_log_probs + log_gen_mask
# ... and add the copy score to get the step log likelihood.
# shape: (batch_size, 1 + source_sequence_length)
combined_gen_and_copy = torch.cat((generation_log_probs, copy_log_probs), dim=-1)
# shape: (batch_size,)
step_log_likelihood = util.logsumexp(combined_gen_and_copy)
return step_log_likelihood, selective_weights, log_probs
def _forward_loss(
self,
target_tokens: TextFieldTensors,
target_token_ids: torch.Tensor,
state: Dict[str, torch.Tensor],
weight: torch.Tensor = None,
) -> Dict[str, torch.Tensor]:
"""
Calculate the loss against gold targets.
"""
batch_size, target_sequence_length = target_tokens["tokens"]["tokens"].size()
# shape: (batch_size, source_sequence_length)
source_mask = state["source_mask"]
# We have a decoding step for every target token except the "START" token.
num_decoding_steps = target_sequence_length - 1
# We use this to fill in the copy index when the previous input was copied.
# shape: (batch_size,)
copy_input_choices = source_mask.new_full(
(batch_size,), fill_value=self._copy_index, dtype=torch.long
)
# We need to keep track of the probabilities assigned to tokens in the source
# sentence that were copied during the previous timestep, since we use
# those probabilities as weights when calculating the "selective read".
# shape: (batch_size, source_sequence_length)
selective_weights = state["decoder_hidden"].new_zeros(source_mask.size())
# Indicates which tokens in the source sentence match the current target token.
# shape: (batch_size, source_sequence_length)
target_to_source = state["source_token_ids"].new_zeros(source_mask.size())
# This is just a tensor of ones which we use repeatedly in `self._get_ll_contrib`,
# so we create it once here to avoid doing it over-and-over.
generation_scores_mask = state["decoder_hidden"].new_full(
(batch_size, self._target_vocab_size), fill_value=1.0, dtype=torch.bool
)
# Initialize target predictions with the start index.
# shape: (batch_size,)
last_predictions = source_mask.new_full(
(batch_size,), fill_value=self._start_index, dtype=torch.long
)
step_log_likelihoods = []
for timestep in range(num_decoding_steps):
if (
self.training
and self._scheduled_sampling_ratio > 0.0
and torch.rand(1).item() < self._scheduled_sampling_ratio
):
# Use gold tokens at test time and at a rate of 1 - _scheduled_sampling_ratio
# during training.
# shape: (batch_size,)
input_choices = last_predictions
# Get mask tensor indicating which instances were copied.
# shape: (batch_size,)
copied = (input_choices >= self._target_vocab_size).long()
else:
# shape: (batch_size,)
input_choices = target_tokens["tokens"]["tokens"][:, timestep]
# shape: (batch_size,)
copied = (
(input_choices == self._oov_index) & (target_to_source.sum(-1) > 0)
).long()
# shape: (batch_size,)
input_choices = input_choices * (1 - copied) + copy_input_choices * copied
# shape: (batch_size, source_sequence_length)
target_to_source = state["source_token_ids"] == target_token_ids[
:, timestep + 1
].unsqueeze(-1)
# Update the decoder state by taking a step through the RNN.
state = self._decoder_step(input_choices, selective_weights, state)
# Get generation scores for each token in the target vocab.
# shape: (batch_size, target_vocab_size)
generation_scores = self._get_generation_scores(state)
# Get copy scores for each token in the source sentence, excluding the start
# and end tokens.
# shape: (batch_size, source_sequence_length)
copy_scores = self._get_copy_scores(state)
# shape: (batch_size,)
step_target_tokens = target_tokens["tokens"]["tokens"][:, timestep + 1]
step_log_likelihood, selective_weights, log_probs = self._get_ll_contrib(
generation_scores,
generation_scores_mask,
copy_scores,
step_target_tokens,
target_to_source,
source_mask,
)
step_log_likelihoods.append(step_log_likelihood.unsqueeze(1))
# shape (predicted_classes): (batch_size,)
_, last_predictions = torch.max(log_probs, 1)
# Gather step log-likelihoods.
# shape: (batch_size, num_decoding_steps = target_sequence_length - 1)
log_likelihoods = torch.cat(step_log_likelihoods, 1)
# Get target mask to exclude likelihood contributions from timesteps after
# the END token.
# shape: (batch_size, target_sequence_length)
target_mask = util.get_text_field_mask(target_tokens)
# The first timestep is just the START token, which is not included in the likelihoods.
# shape: (batch_size, num_decoding_steps)
target_mask = target_mask[:, 1:]
# Sum of step log-likelihoods.
# shape: (batch_size,)
log_likelihood = (log_likelihoods * target_mask).sum(dim=-1)
# The loss is the negative log-likelihood, averaged over the batch.
if weight is None:
loss = -log_likelihood.sum() / batch_size
else:
# shape: (batch_size,)
if len(weight.shape) > 1:
weight = weight.squeeze()
loss = -(weight * log_likelihood).sum() / batch_size
return {"loss": loss}
def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
batch_size, source_sequence_length = state["source_mask"].size()
# Initialize the copy scores to zero.
state["copy_log_probs"] = (
state["decoder_hidden"].new_zeros((batch_size, source_sequence_length))
+ util.tiny_value_of_dtype(state["decoder_hidden"].dtype)
).log()
# shape: (batch_size,)
start_predictions = state["source_mask"].new_full(
(batch_size,), fill_value=self._start_index, dtype=torch.long
)
# shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)
# shape (log_probabilities): (batch_size, beam_size)
all_top_k_predictions, log_probabilities = self._beam_search.search(
start_predictions, state, self.take_search_step
)
return {"predicted_log_probs": log_probabilities, "predictions": all_top_k_predictions}
def _get_input_and_selective_weights(
self, last_predictions: torch.LongTensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.LongTensor, torch.Tensor]:
"""
Get input choices for the decoder and the selective copy weights.
The decoder input choices are simply the `last_predictions`, except for
target OOV predictions that were copied from source tokens, in which case
the prediction will be changed to the COPY symbol in the target namespace.
The selective weights are just the probabilities assigned to source
tokens that were copied, normalized to sum to 1. If no source tokens were copied,
there will be all zeros.
# Parameters
last_predictions : `torch.LongTensor`
Shape: `(group_size,)`
state : `Dict[str, torch.Tensor]`
# Returns
Tuple[torch.LongTensor, torch.Tensor]
`input_choices` (shape `(group_size,)`) and `selective_weights`
(shape `(group_size, source_sequence_length)`).
"""
group_size, source_sequence_length = state["source_to_target"].size()
# This is a mask indicating which last predictions were copied from the
# the source AND not in the target vocabulary (OOV).
# (group_size,)
only_copied_mask = last_predictions >= self._target_vocab_size
# If the last prediction was in the target vocab or OOV but not copied,
# we use that as input, otherwise we use the COPY token.
# shape: (group_size,)
copy_input_choices = only_copied_mask.new_full(
(group_size,), fill_value=self._copy_index, dtype=torch.long
)
input_choices = last_predictions * ~only_copied_mask + copy_input_choices * only_copied_mask
# In order to get the `selective_weights`, we need to find out which predictions
# were copied or copied AND generated, which is the case when a prediction appears
# in both the source sentence and the target vocab. But whenever a prediction
# is in the target vocab (even if it also appeared in the source sentence),
# its index will be the corresponding target vocab index, not its index in
# the source sentence offset by the target vocab size. So we first
# use `state["source_to_target"]` to get an indicator of every source token
# that matches the predicted target token.
# shape: (group_size, source_sequence_length)
expanded_last_predictions = last_predictions.unsqueeze(-1).expand(
group_size, source_sequence_length
)
# shape: (group_size, source_sequence_length)
source_copied_and_generated = state["source_to_target"] == expanded_last_predictions
# In order to get indicators for copied source tokens that are OOV with respect
# to the target vocab, we'll make use of `state["source_token_ids"]`.
# First we adjust predictions relative to the start of the source tokens.
# This makes sense because predictions for copied tokens are given by the index of the copied
# token in the source sentence, offset by the size of the target vocabulary.
# shape: (group_size,)
adjusted_predictions = last_predictions - self._target_vocab_size
# The adjusted indices for items that were not copied will be negative numbers,
# and therefore invalid. So we zero them out.
adjusted_predictions = adjusted_predictions * only_copied_mask
# shape: (group_size, source_sequence_length)
source_token_ids = state["source_token_ids"]
# shape: (group_size, source_sequence_length)
adjusted_prediction_ids = source_token_ids.gather(-1, adjusted_predictions.unsqueeze(-1))
# This mask will contain indicators for source tokens that were copied
# during the last timestep.
# shape: (group_size, source_sequence_length)
source_only_copied = source_token_ids == adjusted_prediction_ids
# Since we zero'd-out indices for predictions that were not copied,
# we need to zero out all entries of this mask corresponding to those predictions.
source_only_copied = source_only_copied & only_copied_mask.unsqueeze(-1)
# shape: (group_size, source_sequence_length)
mask = source_only_copied | source_copied_and_generated
# shape: (group_size, source_sequence_length)
selective_weights = util.masked_softmax(state["copy_log_probs"], mask)
return input_choices, selective_weights
def _gather_final_log_probs(
self,
generation_log_probs: torch.Tensor,
copy_log_probs: torch.Tensor,
state: Dict[str, torch.Tensor],
) -> torch.Tensor:
"""
Combine copy probabilities with generation probabilities for matching tokens.
# Parameters
generation_log_probs : `torch.Tensor`
Shape: `(group_size, target_vocab_size)`
copy_log_probs : `torch.Tensor`
Shape: `(group_size, source_sequence_length)`
state : `Dict[str, torch.Tensor]`
# Returns
torch.Tensor
Shape: `(group_size, target_vocab_size + source_sequence_length)`.
"""
_, source_sequence_length = state["source_to_target"].size()
source_token_ids = state["source_token_ids"]
# shape: [(batch_size, *)]
modified_log_probs_list: List[torch.Tensor] = []
for i in range(source_sequence_length):
# shape: (group_size,)
copy_log_probs_slice = copy_log_probs[:, i]
# `source_to_target` is a matrix of shape (group_size, source_sequence_length)
# where element (i, j) is the vocab index of the target token that matches the jth
# source token in the ith group, if there is one, or the index of the OOV symbol otherwise.
# We'll use this to add copy scores to corresponding generation scores.
# shape: (group_size,)
source_to_target_slice = state["source_to_target"][:, i]
# The OOV index in the source_to_target_slice indicates that the source
# token is not in the target vocab, so we don't want to add that copy score
# to the OOV token.
copy_log_probs_to_add_mask = source_to_target_slice != self._oov_index
copy_log_probs_to_add = (
copy_log_probs_slice
+ (
copy_log_probs_to_add_mask
+ util.tiny_value_of_dtype(copy_log_probs_slice.dtype)
).log()
)
# shape: (batch_size, 1)
copy_log_probs_to_add = copy_log_probs_to_add.unsqueeze(-1)
# shape: (batch_size, 1)
selected_generation_log_probs = generation_log_probs.gather(
1, source_to_target_slice.unsqueeze(-1)
)
combined_scores = util.logsumexp(
torch.cat((selected_generation_log_probs, copy_log_probs_to_add), dim=1)
)
generation_log_probs = generation_log_probs.scatter(
-1, source_to_target_slice.unsqueeze(-1), combined_scores.unsqueeze(-1)
)
# We have to combine copy scores for duplicate source tokens so that
# we can find the overall most likely source token. So, if this is the first
# occurence of this particular source token, we add the log_probs from all other
# occurences, otherwise we zero it out since it was already accounted for.
if i < (source_sequence_length - 1):
# Sum copy scores from future occurences of source token.
# shape: (group_size, source_sequence_length - i)
source_future_occurences = source_token_ids[:, (i + 1) :] == source_token_ids[
:, i
].unsqueeze(-1)
# shape: (group_size, source_sequence_length - i)
future_copy_log_probs = (
copy_log_probs[:, (i + 1) :]
+ (
source_future_occurences + util.tiny_value_of_dtype(copy_log_probs.dtype)
).log()
)
# shape: (group_size, 1 + source_sequence_length - i)
combined = torch.cat(
(copy_log_probs_slice.unsqueeze(-1), future_copy_log_probs), dim=-1
)
# shape: (group_size,)
copy_log_probs_slice = util.logsumexp(combined)
if i > 0:
# Remove copy log_probs that we have already accounted for.
# shape: (group_size, i)
source_previous_occurences = source_token_ids[:, 0:i] == source_token_ids[
:, i
].unsqueeze(-1)
# shape: (group_size,)
duplicate_mask = source_previous_occurences.sum(dim=-1) == 0
copy_log_probs_slice = (
copy_log_probs_slice
+ (duplicate_mask + util.tiny_value_of_dtype(copy_log_probs_slice.dtype)).log()
)
# Finally, we zero-out copy scores that we added to the generation scores
# above so that we don't double-count them.
# shape: (group_size,)
left_over_copy_log_probs = (
copy_log_probs_slice
+ (
~copy_log_probs_to_add_mask
+ util.tiny_value_of_dtype(copy_log_probs_slice.dtype)
).log()
)
modified_log_probs_list.append(left_over_copy_log_probs.unsqueeze(-1))
modified_log_probs_list.insert(0, generation_log_probs)
# shape: (group_size, target_vocab_size + source_sequence_length)
modified_log_probs = torch.cat(modified_log_probs_list, dim=-1)
return modified_log_probs
def take_search_step(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor], step: int
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take step during beam search.
This function is what gets passed to the `BeamSearch.search` method. It takes
predictions from the last timestep and the current state and outputs
the log probabilities assigned to tokens for the next timestep, as well as the updated
state.
Since we are predicting tokens out of the extended vocab (target vocab + all unique
tokens from the source sentence), this is a little more complicated that just
making a forward pass through the model. The output log probs will have
shape `(group_size, target_vocab_size + source_sequence_length)` so that each
token in the target vocab and source sentence are assigned a probability.
Note that copy scores are assigned to each source token based on their position, not unique value.
So if a token appears more than once in the source sentence, it will have more than one score.
Further, if a source token is also part of the target vocab, its final score
will be the sum of the generation and copy scores. Therefore, in order to
get the score for all tokens in the extended vocab at this step,
we have to combine copy scores for re-occuring source tokens and potentially
add them to the generation scores for the matching token in the target vocab, if
there is one.
So we can break down the final log probs output as the concatenation of two
matrices, A: `(group_size, target_vocab_size)`, and B: `(group_size, source_sequence_length)`.
Matrix A contains the sum of the generation score and copy scores (possibly 0)
for each target token. Matrix B contains left-over copy scores for source tokens
that do NOT appear in the target vocab, with zeros everywhere else. But since
a source token may appear more than once in the source sentence, we also have to
sum the scores for each appearance of each unique source token. So matrix B
actually only has non-zero values at the first occurence of each source token
that is not in the target vocab.
# Parameters
last_predictions : `torch.Tensor`
Shape: `(group_size,)`
state : `Dict[str, torch.Tensor]`
Contains all state tensors necessary to produce generation and copy scores
for next step.
step : `int`
The time step in beam search decoding.
# Notes
`group_size != batch_size`. In fact, `group_size = batch_size * beam_size`.
"""
_, source_sequence_length = state["source_to_target"].size()
# Get input to the decoder RNN and the selective weights. `input_choices`
# is the result of replacing target OOV tokens in `last_predictions` with the
# copy symbol. `selective_weights` consist of the normalized copy probabilities
# assigned to the source tokens that were copied. If no tokens were copied,
# there will be all zeros.
# shape: (group_size,), (group_size, source_sequence_length)
input_choices, selective_weights = self._get_input_and_selective_weights(
last_predictions, state
)
# Update the decoder state by taking a step through the RNN.
state = self._decoder_step(input_choices, selective_weights, state)
# Get the un-normalized generation scores for each token in the target vocab.
# shape: (group_size, target_vocab_size)
generation_scores = self._get_generation_scores(state)
# Get the un-normalized copy scores for each token in the source sentence,
# excluding the start and end tokens.
# shape: (group_size, source_sequence_length)
copy_scores = self._get_copy_scores(state)
# Concat un-normalized generation and copy scores.
# shape: (batch_size, target_vocab_size + source_sequence_length)
all_scores = torch.cat((generation_scores, copy_scores), dim=-1)
# shape: (group_size, source_sequence_length)
source_mask = state["source_mask"]
# shape: (batch_size, target_vocab_size + source_sequence_length)
mask = torch.cat(
(
generation_scores.new_full(generation_scores.size(), True, dtype=torch.bool),
source_mask,
),
dim=-1,
)
# Normalize generation and copy scores.
# shape: (batch_size, target_vocab_size + source_sequence_length)
log_probs = util.masked_log_softmax(all_scores, mask)
# shape: (group_size, target_vocab_size), (group_size, source_sequence_length)
generation_log_probs, copy_log_probs = log_probs.split(
[self._target_vocab_size, source_sequence_length], dim=-1
)
# Update copy_probs needed for getting the `selective_weights` at the next timestep.
state["copy_log_probs"] = copy_log_probs
# We now have normalized generation and copy scores, but to produce the final
# score for each token in the extended vocab, we have to go through and add
# the copy scores to the generation scores of matching target tokens, and sum
# the copy scores of duplicate source tokens.
# shape: (group_size, target_vocab_size + source_sequence_length)
final_log_probs = self._gather_final_log_probs(generation_log_probs, copy_log_probs, state)
return final_log_probs, state
def _get_predicted_tokens(
self,
predicted_indices: Union[torch.Tensor, numpy.ndarray],
batch_metadata: List[Any],
n_best: int = None,
) -> List[Union[List[List[str]], List[str]]]:
"""
Convert predicted indices into tokens.
If `n_best = 1`, the result type will be `List[List[str]]`. Otherwise the result
type will be `List[List[List[str]]]`.
"""
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
predicted_tokens: List[Union[List[List[str]], List[str]]] = []
for top_k_predictions, metadata in zip(predicted_indices, batch_metadata):
batch_predicted_tokens: List[List[str]] = []
for indices in top_k_predictions[:n_best]:
tokens: List[str] = []
indices = list(indices)
if self._end_index in indices:
indices = indices[: indices.index(self._end_index)]
for index in indices:
if index >= self._target_vocab_size:
adjusted_index = index - self._target_vocab_size
token = metadata["source_tokens"][adjusted_index]
else:
token = self.vocab.get_token_from_index(index, self._target_namespace)
tokens.append(token)
batch_predicted_tokens.append(tokens)
if n_best == 1:
predicted_tokens.append(batch_predicted_tokens[0])
else:
predicted_tokens.append(batch_predicted_tokens)
return predicted_tokens
def make_output_human_readable(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, Any]:
"""
Finalize predictions.
After a beam search, the predicted indices correspond to tokens in the target vocabulary
OR tokens in source sentence. Here we gather the actual tokens corresponding to
the indices.
"""
predicted_tokens = self._get_predicted_tokens(
output_dict["predictions"], output_dict["metadata"]
)
output_dict["predicted_tokens"] = predicted_tokens
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._tensor_based_metric is not None:
all_metrics.update(
self._tensor_based_metric.get_metric(reset=reset) # type: ignore
)
if self._token_based_metric is not None:
all_metrics.update(self._token_based_metric.get_metric(reset=reset)) # type: ignore
return all_metrics
default_predictor = "seq2seq"
| allennlp-models-main | allennlp_models/generation/models/copynet_seq2seq.py |
from allennlp_models.generation.models.composed_seq2seq import ComposedSeq2Seq
from allennlp_models.generation.models.copynet_seq2seq import CopyNetSeq2Seq
from allennlp_models.generation.models.simple_seq2seq import SimpleSeq2Seq
from allennlp_models.generation.models.bart import Bart
from allennlp_models.generation.models.t5 import T5
| allennlp-models-main | allennlp_models/generation/models/__init__.py |
from typing import Dict, Optional
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.nn import util, InitializerApplicator
from allennlp_models.generation.modules.seq_decoders.seq_decoder import SeqDecoder
@Model.register("composed_seq2seq")
class ComposedSeq2Seq(Model):
"""
This `ComposedSeq2Seq` class is a `Model` which takes a sequence, encodes it, and then
uses the encoded representations to decode another sequence. You can use this as the basis for
a neural machine translation system, an abstractive summarization system, or any other common
seq2seq problem. The model here is simple, but should be a decent starting place for
implementing recent models for these tasks.
The `ComposedSeq2Seq` class composes separate `Seq2SeqEncoder` and `SeqDecoder` classes.
These parts are customizable and are independent from each other.
# Parameters
vocab : `Vocabulary`, required
Vocabulary containing source and target vocabularies. They may be under the same namespace
(`tokens`) or the target tokens can have a different namespace, in which case it needs to
be specified as `target_namespace`.
source_text_embedders : `TextFieldEmbedder`, required
Embedders for source side sequences
encoder : `Seq2SeqEncoder`, required
The encoder of the "encoder/decoder" model
decoder : `SeqDecoder`, required
The decoder of the "encoder/decoder" model
tied_source_embedder_key : `str`, optional (default=`None`)
If specified, this key is used to obtain token_embedder in `source_text_embedder` and
the weights are shared/tied with the decoder's target embedding weights.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
source_text_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
decoder: SeqDecoder,
tied_source_embedder_key: Optional[str] = None,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._source_text_embedder = source_text_embedder
self._encoder = encoder
self._decoder = decoder
if self._encoder.get_output_dim() != self._decoder.get_output_dim():
raise ConfigurationError(
f"Encoder output dimension {self._encoder.get_output_dim()} should be"
f" equal to decoder dimension {self._decoder.get_output_dim()}."
)
if tied_source_embedder_key:
# A bit of a ugly hack to tie embeddings.
# Works only for `BasicTextFieldEmbedder`, and since
# it can have multiple embedders, and `SeqDecoder` contains only a single embedder, we need
# the key to select the source embedder to replace it with the target embedder from the decoder.
if not isinstance(self._source_text_embedder, BasicTextFieldEmbedder):
raise ConfigurationError(
"Unable to tie embeddings,"
"Source text embedder is not an instance of `BasicTextFieldEmbedder`."
)
source_embedder = self._source_text_embedder._token_embedders[tied_source_embedder_key]
if not isinstance(source_embedder, Embedding):
raise ConfigurationError(
"Unable to tie embeddings,"
"Selected source embedder is not an instance of `Embedding`."
)
if source_embedder.get_output_dim() != self._decoder.target_embedder.get_output_dim():
raise ConfigurationError(
"Output Dimensions mismatch between source embedder and target embedder."
)
self._source_text_embedder._token_embedders[
tied_source_embedder_key
] = self._decoder.target_embedder
initializer(self)
def forward(
self, # type: ignore
source_tokens: TextFieldTensors,
target_tokens: TextFieldTensors = None,
) -> Dict[str, torch.Tensor]:
"""
Make forward pass on the encoder and decoder for producing the entire target sequence.
# Parameters
source_tokens : `TextFieldTensors`
The output of `TextField.as_array()` applied on the source `TextField`. This will be
passed through a `TextFieldEmbedder` and then through an encoder.
target_tokens : `TextFieldTensors`, optional (default = `None`)
Output of `Textfield.as_array()` applied on target `TextField`. We assume that the
target tokens are also represented as a `TextField`.
# Returns
`Dict[str, torch.Tensor]`
The output tensors from the decoder.
"""
state = self._encode(source_tokens)
return self._decoder(state, target_tokens)
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Finalize predictions.
"""
return self._decoder.post_process(output_dict)
def _encode(self, source_tokens: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Make foward pass on the encoder.
# Parameters
source_tokens : `TextFieldTensors`
The output of `TextField.as_array()` applied on the source `TextField`. This will be
passed through a `TextFieldEmbedder` and then through an encoder.
# Returns
Dict[str, torch.Tensor]
Map consisting of the key `source_mask` with the mask over the
`source_tokens` text field,
and the key `encoder_outputs` with the output tensor from
forward pass on the encoder.
"""
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_text_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = self._encoder(embedded_input, source_mask)
return {"source_mask": source_mask, "encoder_outputs": encoder_outputs}
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return self._decoder.get_metrics(reset)
default_predictor = "seq2seq"
| allennlp-models-main | allennlp_models/generation/models/composed_seq2seq.py |
# flake8: noqa: F403
from allennlp_models.generation.modules.decoder_nets import *
from allennlp_models.generation.modules.seq_decoders import *
| allennlp-models-main | allennlp_models/generation/modules/__init__.py |
from allennlp_models.generation.modules.seq_decoders.auto_regressive import AutoRegressiveSeqDecoder
from allennlp_models.generation.modules.seq_decoders.seq_decoder import SeqDecoder
| allennlp-models-main | allennlp_models/generation/modules/seq_decoders/__init__.py |
import warnings
from typing import Dict, List, Tuple, Optional
import numpy
import torch
import torch.nn.functional as F
from torch.nn import Linear
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import END_SYMBOL, START_SYMBOL
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Embedding
from allennlp.nn import util
from allennlp.nn.beam_search import BeamSearch
from allennlp.training.metrics import Metric
from allennlp.common import Lazy
from allennlp_models.generation.modules.decoder_nets.decoder_net import DecoderNet
from .seq_decoder import SeqDecoder
@SeqDecoder.register("auto_regressive_seq_decoder")
class AutoRegressiveSeqDecoder(SeqDecoder):
"""
An autoregressive decoder that can be used for most seq2seq tasks.
# Parameters
vocab : `Vocabulary`, required
Vocabulary containing source and target vocabularies. They may be under the same namespace
(`tokens`) or the target tokens can have a different namespace, in which case it needs to
be specified as `target_namespace`.
decoder_net : `DecoderNet`, required
Module that contains implementation of neural network for decoding output elements
target_embedder : `Embedding`
Embedder for target tokens.
target_namespace : `str`, optional (default = `'tokens'`)
If the target side vocabulary is different from the source side's, you need to specify the
target's namespace here. If not, we'll assume it is "tokens", which is also the default
choice for the source side, and this might cause them to share vocabularies.
beam_search : `BeamSearch`, optional (default = `Lazy(BeamSearch)`)
This is used to during inference to select the tokens of the decoded output sequence.
tensor_based_metric : `Metric`, optional (default = `None`)
A metric to track on validation data that takes raw tensors when its called.
This metric must accept two arguments when called: a batched tensor
of predicted token indices, and a batched tensor of gold token indices.
token_based_metric : `Metric`, optional (default = `None`)
A metric to track on validation data that takes lists of lists of tokens
as input. This metric must accept two arguments when called, both
of type `List[List[str]]`. The first is a predicted sequence for each item
in the batch and the second is a gold sequence for each item in the batch.
scheduled_sampling_ratio : `float` optional (default = `0.0`)
Defines ratio between teacher forced training and real output usage. If its zero
(teacher forcing only) and `decoder_net`supports parallel decoding, we get the output
predictions in a single forward pass of the `decoder_net`.
"""
def __init__(
self,
vocab: Vocabulary,
decoder_net: DecoderNet,
target_embedder: Embedding,
target_namespace: str = "tokens",
beam_search: Lazy[BeamSearch] = Lazy(BeamSearch),
tie_output_embedding: bool = False,
scheduled_sampling_ratio: float = 0,
label_smoothing_ratio: Optional[float] = None,
tensor_based_metric: Metric = None,
token_based_metric: Metric = None,
**kwargs
) -> None:
super().__init__(target_embedder)
self._vocab = vocab
# Decodes the sequence of encoded hidden states into e new sequence of hidden states.
self._decoder_net = decoder_net
self._target_namespace = target_namespace
self._label_smoothing_ratio = label_smoothing_ratio
# At prediction time, we use a beam search to find the most likely sequence of target tokens.
# We need the start symbol to provide as the input at the first timestep of decoding, and
# end symbol as a way to indicate the end of the decoded sequence.
self._start_index = self._vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self._vocab.get_token_index(END_SYMBOL, self._target_namespace)
# For backwards compatibility, check if beam_size or max_decoding_steps were passed in as
# kwargs. If so, update the BeamSearch object before constructing and raise a DeprecationWarning
deprecation_warning = (
"The parameter {} has been deprecated."
" Provide this parameter as argument to beam_search instead."
)
beam_search_extras = {}
if "beam_size" in kwargs:
beam_search_extras["beam_size"] = kwargs["beam_size"]
warnings.warn(deprecation_warning.format("beam_size"), DeprecationWarning)
if "max_decoding_steps" in kwargs:
beam_search_extras["max_steps"] = kwargs["max_decoding_steps"]
warnings.warn(deprecation_warning.format("max_decoding_steps"), DeprecationWarning)
self._beam_search = beam_search.construct(
end_index=self._end_index, vocab=self._vocab, **beam_search_extras
)
target_vocab_size = self._vocab.get_vocab_size(self._target_namespace)
if self.target_embedder.get_output_dim() != self._decoder_net.target_embedding_dim:
raise ConfigurationError(
"Target Embedder output_dim doesn't match decoder module's input."
)
# We project the hidden state from the decoder into the output vocabulary space
# in order to get log probabilities of each target token, at each time step.
self._output_projection_layer = Linear(
self._decoder_net.get_output_dim(), target_vocab_size
)
if tie_output_embedding:
if self._output_projection_layer.weight.shape != self.target_embedder.weight.shape:
raise ConfigurationError(
"Can't tie embeddings with output linear layer, due to shape mismatch"
)
self._output_projection_layer.weight = self.target_embedder.weight
# These metrics will be updated during training and validation
self._tensor_based_metric = tensor_based_metric
self._token_based_metric = token_based_metric
self._scheduled_sampling_ratio = scheduled_sampling_ratio
def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Prepare inputs for the beam search, does beam search and returns beam search results.
"""
batch_size = state["source_mask"].size()[0]
start_predictions = state["source_mask"].new_full(
(batch_size,), fill_value=self._start_index, dtype=torch.long
)
# shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)
# shape (log_probabilities): (batch_size, beam_size)
all_top_k_predictions, log_probabilities = self._beam_search.search(
start_predictions, state, self.take_step
)
output_dict = {
"class_log_probabilities": log_probabilities,
"predictions": all_top_k_predictions,
}
return output_dict
def _forward_loss(
self, state: Dict[str, torch.Tensor], target_tokens: TextFieldTensors
) -> Dict[str, torch.Tensor]:
"""
Make forward pass during training or do greedy search during prediction.
Notes
-----
We really only use the predictions from the method to test that beam search
with a beam size of 1 gives the same results.
"""
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (batch_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (batch_size, max_target_sequence_length)
targets = util.get_token_ids_from_text_field_tensors(target_tokens)
# Prepare embeddings for targets. They will be used as gold embeddings during decoder training
# shape: (batch_size, max_target_sequence_length, embedding_dim)
target_embedding = self.target_embedder(targets)
# shape: (batch_size, max_target_batch_sequence_length)
target_mask = util.get_text_field_mask(target_tokens)
if self._scheduled_sampling_ratio == 0 and self._decoder_net.decodes_parallel:
_, decoder_output = self._decoder_net(
previous_state=state,
previous_steps_predictions=target_embedding[:, :-1, :],
encoder_outputs=encoder_outputs,
source_mask=source_mask,
previous_steps_mask=target_mask[:, :-1],
)
# shape: (group_size, max_target_sequence_length, num_classes)
logits = self._output_projection_layer(decoder_output)
else:
batch_size = source_mask.size()[0]
_, target_sequence_length = targets.size()
# The last input from the target is either padding or the end symbol.
# Either way, we don't have to process it.
num_decoding_steps = target_sequence_length - 1
# Initialize target predictions with the start index.
# shape: (batch_size,)
last_predictions = source_mask.new_full(
(batch_size,), fill_value=self._start_index, dtype=torch.long
)
# shape: (steps, batch_size, target_embedding_dim)
steps_embeddings = torch.Tensor([])
step_logits: List[torch.Tensor] = []
for timestep in range(num_decoding_steps):
if self.training and torch.rand(1).item() < self._scheduled_sampling_ratio:
# Use gold tokens at test time and at a rate of 1 - _scheduled_sampling_ratio
# during training.
# shape: (batch_size, steps, target_embedding_dim)
state["previous_steps_predictions"] = steps_embeddings
# shape: (batch_size, )
effective_last_prediction = last_predictions
else:
# shape: (batch_size, )
effective_last_prediction = targets[:, timestep]
if timestep == 0:
state["previous_steps_predictions"] = torch.Tensor([])
else:
# shape: (batch_size, steps, target_embedding_dim)
state["previous_steps_predictions"] = target_embedding[:, :timestep]
# shape: (batch_size, num_classes)
output_projections, state = self._prepare_output_projections(
effective_last_prediction, state
)
# list of tensors, shape: (batch_size, 1, num_classes)
step_logits.append(output_projections.unsqueeze(1))
# shape (predicted_classes): (batch_size,)
_, predicted_classes = torch.max(output_projections, 1)
# shape (predicted_classes): (batch_size,)
last_predictions = predicted_classes
# shape: (batch_size, 1, target_embedding_dim)
last_predictions_embeddings = self.target_embedder(last_predictions).unsqueeze(1)
# This step is required, since we want to keep up two different prediction history: gold and real
if steps_embeddings.shape[-1] == 0:
# There is no previous steps, except for start vectors in `last_predictions`
# shape: (group_size, 1, target_embedding_dim)
steps_embeddings = last_predictions_embeddings
else:
# shape: (group_size, steps_count, target_embedding_dim)
steps_embeddings = torch.cat([steps_embeddings, last_predictions_embeddings], 1)
# shape: (batch_size, num_decoding_steps, num_classes)
logits = torch.cat(step_logits, 1)
# Compute loss.
target_mask = util.get_text_field_mask(target_tokens)
loss = self._get_loss(logits, targets, target_mask)
# TODO: We will be using beam search to get predictions for validation, but if beam size in 1
# we could consider taking the last_predictions here and building step_predictions
# and use that instead of running beam search again, if performance in validation is taking a hit
output_dict = {"loss": loss}
return output_dict
def _prepare_output_projections(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Decode current state and last prediction to produce produce projections
into the target space, which can then be used to get probabilities of
each target token for the next step.
Inputs are the same as for `take_step()`.
"""
# shape: (group_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (group_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (group_size, steps_count, decoder_output_dim)
previous_steps_predictions = state.get("previous_steps_predictions")
# shape: (batch_size, 1, target_embedding_dim)
last_predictions_embeddings = self.target_embedder(last_predictions).unsqueeze(1)
if previous_steps_predictions is None or previous_steps_predictions.shape[-1] == 0:
# There is no previous steps, except for start vectors in `last_predictions`
# shape: (group_size, 1, target_embedding_dim)
previous_steps_predictions = last_predictions_embeddings
else:
# shape: (group_size, steps_count, target_embedding_dim)
previous_steps_predictions = torch.cat(
[previous_steps_predictions, last_predictions_embeddings], 1
)
decoder_state, decoder_output = self._decoder_net(
previous_state=state,
encoder_outputs=encoder_outputs,
source_mask=source_mask,
previous_steps_predictions=previous_steps_predictions,
)
state["previous_steps_predictions"] = previous_steps_predictions
# Update state with new decoder state, override previous state
state.update(decoder_state)
if self._decoder_net.decodes_parallel:
decoder_output = decoder_output[:, -1, :]
# shape: (group_size, num_classes)
output_projections = self._output_projection_layer(decoder_output)
return output_projections, state
def _get_loss(
self, logits: torch.LongTensor, targets: torch.LongTensor, target_mask: torch.BoolTensor
) -> torch.Tensor:
"""
Compute loss.
Takes logits (unnormalized outputs from the decoder) of size (batch_size,
num_decoding_steps, num_classes), target indices of size (batch_size, num_decoding_steps+1)
and corresponding masks of size (batch_size, num_decoding_steps+1) steps and computes cross
entropy loss while taking the mask into account.
The length of `targets` is expected to be greater than that of `logits` because the
decoder does not need to compute the output corresponding to the last timestep of
`targets`. This method aligns the inputs appropriately to compute the loss.
During training, we want the logit corresponding to timestep i to be similar to the target
token from timestep i + 1. That is, the targets should be shifted by one timestep for
appropriate comparison. Consider a single example where the target has 3 words, and
padding is to 7 tokens.
The complete sequence would correspond to <S> w1 w2 w3 <E> <P> <P>
and the mask would be 1 1 1 1 1 0 0
and let the logits be l1 l2 l3 l4 l5 l6
We actually need to compare:
the sequence w1 w2 w3 <E> <P> <P>
with masks 1 1 1 1 0 0
against l1 l2 l3 l4 l5 l6
(where the input was) <S> w1 w2 w3 <E> <P>
"""
# shape: (batch_size, num_decoding_steps)
relevant_targets = targets[:, 1:].contiguous()
# shape: (batch_size, num_decoding_steps)
relevant_mask = target_mask[:, 1:].contiguous()
return util.sequence_cross_entropy_with_logits(
logits, relevant_targets, relevant_mask, label_smoothing=self._label_smoothing_ratio
)
def get_output_dim(self):
return self._decoder_net.get_output_dim()
def take_step(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor], step: int
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take a decoding step. This is called by the beam search class.
# Parameters
last_predictions : `torch.Tensor`
A tensor of shape `(group_size,)`, which gives the indices of the predictions
during the last time step.
state : `Dict[str, torch.Tensor]`
A dictionary of tensors that contain the current state information
needed to predict the next step, which includes the encoder outputs,
the source mask, and the decoder hidden state and context. Each of these
tensors has shape `(group_size, *)`, where `*` can be any other number
of dimensions.
step : `int`
The time step in beam search decoding.
# Returns
Tuple[torch.Tensor, Dict[str, torch.Tensor]]
A tuple of `(log_probabilities, updated_state)`, where `log_probabilities`
is a tensor of shape `(group_size, num_classes)` containing the predicted
log probability of each class for the next step, for each item in the group,
while `updated_state` is a dictionary of tensors containing the encoder outputs,
source mask, and updated decoder hidden state and context.
Notes
-----
We treat the inputs as a batch, even though `group_size` is not necessarily
equal to `batch_size`, since the group may contain multiple states
for each source sentence in the batch.
"""
# shape: (group_size, num_classes)
output_projections, state = self._prepare_output_projections(last_predictions, state)
# shape: (group_size, num_classes)
class_log_probabilities = F.log_softmax(output_projections, dim=-1)
return class_log_probabilities, state
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._tensor_based_metric is not None:
all_metrics.update(
self._tensor_based_metric.get_metric(reset=reset) # type: ignore
)
if self._token_based_metric is not None:
all_metrics.update(self._token_based_metric.get_metric(reset=reset)) # type: ignore
return all_metrics
def forward(
self,
encoder_out: Dict[str, torch.LongTensor],
target_tokens: TextFieldTensors = None,
) -> Dict[str, torch.Tensor]:
state = encoder_out
decoder_init_state = self._decoder_net.init_decoder_state(state)
state.update(decoder_init_state)
if target_tokens:
state_forward_loss = (
state if self.training else {k: v.clone() for k, v in state.items()}
)
output_dict = self._forward_loss(state_forward_loss, target_tokens)
else:
output_dict = {}
if not self.training:
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens:
targets = util.get_token_ids_from_text_field_tensors(target_tokens)
if self._tensor_based_metric is not None:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
self._tensor_based_metric(best_predictions, targets) # type: ignore
if self._token_based_metric is not None:
output_dict = self.post_process(output_dict)
predicted_tokens = output_dict["predicted_tokens"]
self._token_based_metric( # type: ignore
predicted_tokens,
self.indices_to_tokens(targets[:, 1:]),
)
return output_dict
def post_process(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called `predicted_tokens` to the `output_dict`.
"""
predicted_indices = output_dict["predictions"]
all_predicted_tokens = self.indices_to_tokens(predicted_indices)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def indices_to_tokens(self, batch_indeces: numpy.ndarray) -> List[List[str]]:
if not isinstance(batch_indeces, numpy.ndarray):
batch_indeces = batch_indeces.detach().cpu().numpy()
all_tokens = []
for indices in batch_indeces:
# Beam search gives us the top k results for each source sentence in the batch
# but we just want the single best.
if len(indices.shape) > 1:
indices = indices[0]
indices = list(indices)
# Collect indices till the first end_symbol
if self._end_index in indices:
indices = indices[: indices.index(self._end_index)]
tokens = [
self._vocab.get_token_from_index(x, namespace=self._target_namespace)
for x in indices
]
all_tokens.append(tokens)
return all_tokens
| allennlp-models-main | allennlp_models/generation/modules/seq_decoders/auto_regressive.py |
from typing import Dict, Optional
import torch
from torch.nn import Module
from allennlp.common import Registrable
from allennlp.modules import Embedding
class SeqDecoder(Module, Registrable):
"""
A `SeqDecoder` abstract class representing the entire decoder (embedding and neural network) of
a Seq2Seq architecture.
This is meant to be used with `allennlp.models.encoder_decoder.composed_seq2seq.ComposedSeq2Seq`.
The implementation of this abstract class ideally uses a
decoder neural net `allennlp.modules.seq2seq_decoders.decoder_net.DecoderNet` for decoding.
The `default_implementation`
`allennlp.modules.seq2seq_decoders.seq_decoder.auto_regressive_seq_decoder.AutoRegressiveSeqDecoder`
covers most use cases. More likely that we will use the default implementation instead of creating a new
implementation.
# Parameters
target_embedder : `Embedding`, required
Embedder for target tokens. Needed in the base class to enable weight tying.
"""
default_implementation = "auto_regressive_seq_decoder"
def __init__(self, target_embedder: Embedding) -> None:
super().__init__()
self.target_embedder = target_embedder
def get_output_dim(self) -> int:
"""
The dimension of each timestep of the hidden state in the layer before final softmax.
Needed to check whether the model is compatible for embedding-final layer weight tying.
"""
raise NotImplementedError()
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
The decoder is responsible for computing metrics using the target tokens.
"""
raise NotImplementedError()
def forward(
self,
encoder_out: Dict[str, torch.LongTensor],
target_tokens: Optional[Dict[str, torch.LongTensor]] = None,
) -> Dict[str, torch.Tensor]:
"""
Decoding from encoded states to sequence of outputs
also computes loss if `target_tokens` are given.
# Parameters
encoder_out : `Dict[str, torch.LongTensor]`, required
Dictionary with encoded state, ideally containing the encoded vectors and the
source mask.
target_tokens : `Dict[str, torch.LongTensor]`, optional
The output of `TextField.as_array()` applied on the target `TextField`.
"""
raise NotImplementedError()
def post_process(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Post processing for converting raw outputs to prediction during inference.
The composing models such `allennlp.models.encoder_decoders.composed_seq2seq.ComposedSeq2Seq`
can call this method when `decode` is called.
"""
raise NotImplementedError()
| allennlp-models-main | allennlp_models/generation/modules/seq_decoders/seq_decoder.py |
from typing import Tuple, Dict, Optional
import torch
from allennlp.common import Registrable
class DecoderNet(torch.nn.Module, Registrable):
"""
This class abstracts the neural architectures for decoding the encoded states and
embedded previous step prediction vectors into a new sequence of output vectors.
The implementations of `DecoderNet` is used by implementations of
`allennlp.modules.seq2seq_decoders.seq_decoder.SeqDecoder` such as
`allennlp.modules.seq2seq_decoders.seq_decoder.auto_regressive_seq_decoder.AutoRegressiveSeqDecoder`.
The outputs of this module would be likely used by `allennlp.modules.seq2seq_decoders.seq_decoder.SeqDecoder`
to apply the final output feedforward layer and softmax.
# Parameters
decoding_dim : `int`, required
Defines dimensionality of output vectors.
target_embedding_dim : `int`, required
Defines dimensionality of target embeddings. Since this model takes it's output on a previous step
as input of following step, this is also an input dimensionality.
decodes_parallel : `bool`, required
Defines whether the decoder generates multiple next step predictions at in a single `forward`.
"""
def __init__(
self, decoding_dim: int, target_embedding_dim: int, decodes_parallel: bool
) -> None:
super().__init__()
self.target_embedding_dim = target_embedding_dim
self.decoding_dim = decoding_dim
self.decodes_parallel = decodes_parallel
def get_output_dim(self) -> int:
"""
Returns the dimension of each vector in the sequence output by this `DecoderNet`.
This is `not` the shape of the returned tensor, but the last element of that shape.
"""
return self.decoding_dim
def init_decoder_state(
self, encoder_out: Dict[str, torch.LongTensor]
) -> Dict[str, torch.Tensor]:
"""
Initialize the encoded state to be passed to the first decoding time step.
# Parameters
batch_size : `int`
Size of batch
final_encoder_output : `torch.Tensor`
Last state of the Encoder
# Returns
`Dict[str, torch.Tensor]`
Initial state
"""
raise NotImplementedError()
def forward(
self,
previous_state: Dict[str, torch.Tensor],
encoder_outputs: torch.Tensor,
source_mask: torch.BoolTensor,
previous_steps_predictions: torch.Tensor,
previous_steps_mask: Optional[torch.BoolTensor] = None,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
"""
Performs a decoding step, and returns dictionary with decoder hidden state or cache and the decoder output.
The decoder output is a 3d tensor (group_size, steps_count, decoder_output_dim)
if `self.decodes_parallel` is True, else it is a 2d tensor with (group_size, decoder_output_dim).
# Parameters
previous_steps_predictions : `torch.Tensor`, required
Embeddings of predictions on previous step.
Shape: (group_size, steps_count, decoder_output_dim)
encoder_outputs : `torch.Tensor`, required
Vectors of all encoder outputs.
Shape: (group_size, max_input_sequence_length, encoder_output_dim)
source_mask : `torch.BoolTensor`, required
This tensor contains mask for each input sequence.
Shape: (group_size, max_input_sequence_length)
previous_state : `Dict[str, torch.Tensor]`, required
previous state of decoder
# Returns
Tuple[Dict[str, torch.Tensor], torch.Tensor]
Tuple of new decoder state and decoder output. Output should be used to generate out sequence elements
"""
raise NotImplementedError()
| allennlp-models-main | allennlp_models/generation/modules/decoder_nets/decoder_net.py |
import math
from copy import deepcopy
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from torch.autograd import Variable
from allennlp.modules.layer_norm import LayerNorm
from allennlp.nn import util as nn_util
from allennlp_models.lm.modules.seq2seq_encoders.bidirectional_lm_transformer import (
SublayerConnection,
subsequent_mask,
PositionwiseFeedForward,
PositionalEncoding,
MultiHeadedAttention,
)
from .decoder_net import DecoderNet
@DecoderNet.register("stacked_self_attention")
class StackedSelfAttentionDecoderNet(DecoderNet):
"""
A Stacked self-attention decoder implementation.
# Parameters
decoding_dim : `int`, required
Defines dimensionality of output vectors.
target_embedding_dim : `int`, required
Defines dimensionality of input target embeddings. Since this model takes it's output on a previous step
as input of following step, this is also an input dimensionality.
feedforward_hidden_dim : `int`, required.
The middle dimension of the FeedForward network. The input and output
dimensions are fixed to ensure sizes match up for the self attention layers.
num_layers : `int`, required.
The number of stacked self attention -> feedfoward -> layer normalisation blocks.
num_attention_heads : `int`, required.
The number of attention heads to use per layer.
use_positional_encoding : `bool`, optional, (default = `True`)
Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended,
as without this feature, the self attention layers have no idea of absolute or relative
position (as they are just computing pairwise similarity between vectors of elements),
which can be important features for many tasks.
dropout_prob : `float`, optional, (default = `0.1`)
The dropout probability for the feedforward network.
residual_dropout_prob : `float`, optional, (default = `0.2`)
The dropout probability for the residual connections.
attention_dropout_prob : `float`, optional, (default = `0.1`)
The dropout probability for the attention distributions in each attention layer.
"""
def __init__(
self,
decoding_dim: int,
target_embedding_dim: int,
feedforward_hidden_dim: int,
num_layers: int,
num_attention_heads: int,
use_positional_encoding: bool = True,
positional_encoding_max_steps: int = 5000,
dropout_prob: float = 0.1,
residual_dropout_prob: float = 0.2,
attention_dropout_prob: float = 0.1,
) -> None:
super().__init__(
decoding_dim=decoding_dim,
target_embedding_dim=target_embedding_dim,
decodes_parallel=True,
)
attn = MultiHeadedAttention(num_attention_heads, decoding_dim, attention_dropout_prob)
feed_forward = PositionwiseFeedForward(decoding_dim, feedforward_hidden_dim, dropout_prob)
self._embed_scale = math.sqrt(decoding_dim)
self._positional_embedder = (
PositionalEncoding(decoding_dim, positional_encoding_max_steps)
if use_positional_encoding
else None
)
self._dropout = nn.Dropout(dropout_prob)
self._self_attention = Decoder(
DecoderLayer(
decoding_dim, deepcopy(attn), deepcopy(attn), feed_forward, residual_dropout_prob
),
num_layers,
)
def init_decoder_state(
self, encoder_out: Dict[str, torch.LongTensor]
) -> Dict[str, torch.Tensor]:
return {}
def forward(
self,
previous_state: Dict[str, torch.Tensor],
encoder_outputs: torch.Tensor,
source_mask: torch.BoolTensor,
previous_steps_predictions: torch.Tensor,
previous_steps_mask: Optional[torch.BoolTensor] = None,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
source_mask = source_mask.unsqueeze(-2)
future_mask = Variable(
subsequent_mask(previous_steps_predictions.size(-2), device=source_mask.device).type_as(
source_mask.data
)
)
if previous_steps_mask is None:
previous_steps_mask = future_mask
else:
previous_steps_mask = previous_steps_mask.unsqueeze(-2) & future_mask
previous_steps_predictions = previous_steps_predictions * self._embed_scale
if self._positional_embedder:
previous_steps_predictions = self._positional_embedder(previous_steps_predictions)
previous_steps_predictions = self._dropout(previous_steps_predictions)
decoded = self._self_attention(
previous_steps_predictions, encoder_outputs, source_mask, previous_steps_mask
)
return {}, decoded
class Decoder(nn.Module):
"""
Transformer N layer decoder with masking.
Code taken from http://nlp.seas.harvard.edu/2018/04/03/attention.html
"""
def __init__(self, layer: nn.Module, num_layers: int) -> None:
super().__init__()
self.layers = nn_util.clone(layer, num_layers)
self.norm = LayerNorm(layer.size)
def forward(
self,
x: torch.Tensor,
memory: torch.Tensor,
src_mask: torch.BoolTensor,
tgt_mask: torch.BoolTensor,
) -> torch.Tensor:
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
"""
A single layer of transformer decoder.
Code taken from http://nlp.seas.harvard.edu/2018/04/03/attention.html
"""
def __init__(
self,
size: int,
self_attn: MultiHeadedAttention,
src_attn: MultiHeadedAttention,
feed_forward: F,
dropout: float,
) -> None:
super().__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = nn_util.clone(SublayerConnection(size, dropout), 3)
def forward(
self,
x: torch.Tensor,
memory: torch.Tensor,
src_mask: torch.BoolTensor,
tgt_mask: torch.BoolTensor,
) -> torch.Tensor:
# Follow Figure 1 (right) for connections.
x = self.sublayer[0](x, lambda y: self.self_attn(y, y, y, tgt_mask))
x = self.sublayer[1](x, lambda y: self.src_attn(y, memory, memory, src_mask))
return self.sublayer[2](x, self.feed_forward)
| allennlp-models-main | allennlp_models/generation/modules/decoder_nets/stacked_self_attention.py |
from allennlp_models.generation.modules.decoder_nets.lstm_cell import LstmCellDecoderNet
from allennlp_models.generation.modules.decoder_nets.decoder_net import DecoderNet
from allennlp_models.generation.modules.decoder_nets.stacked_self_attention import (
StackedSelfAttentionDecoderNet,
)
| allennlp-models-main | allennlp_models/generation/modules/decoder_nets/__init__.py |
from typing import Tuple, Dict, Optional
import torch
from torch.nn import LSTMCell
from allennlp.modules import Attention
from allennlp.nn import util
from .decoder_net import DecoderNet
@DecoderNet.register("lstm_cell")
class LstmCellDecoderNet(DecoderNet):
"""
This decoder net implements simple decoding network with LSTMCell and Attention.
# Parameters
decoding_dim : `int`, required
Defines dimensionality of output vectors.
target_embedding_dim : `int`, required
Defines dimensionality of input target embeddings. Since this model takes it's output on a previous step
as input of following step, this is also an input dimensionality.
attention : `Attention`, optional (default = `None`)
If you want to use attention to get a dynamic summary of the encoder outputs at each step
of decoding, this is the function used to compute similarity between the decoder hidden
state and encoder outputs.
"""
def __init__(
self,
decoding_dim: int,
target_embedding_dim: int,
attention: Optional[Attention] = None,
bidirectional_input: bool = False,
) -> None:
super().__init__(
decoding_dim=decoding_dim,
target_embedding_dim=target_embedding_dim,
decodes_parallel=False,
)
# In this particular type of decoder output of previous step passes directly to the input of current step
# We also assume that decoder output dimensionality is equal to the encoder output dimensionality
decoder_input_dim = self.target_embedding_dim
# Attention mechanism applied to the encoder output for each step.
self._attention = attention
if self._attention:
# If using attention, a weighted average over encoder outputs will be concatenated
# to the previous target embedding to form the input to the decoder at each
# time step. encoder output dim will be same as decoding_dim
decoder_input_dim += decoding_dim
# We'll use an LSTM cell as the recurrent cell that produces a hidden state
# for the decoder at each time step.
self._decoder_cell = LSTMCell(decoder_input_dim, self.decoding_dim)
self._bidirectional_input = bidirectional_input
def _prepare_attended_input(
self,
decoder_hidden_state: torch.Tensor = None,
encoder_outputs: torch.Tensor = None,
encoder_outputs_mask: torch.BoolTensor = None,
) -> torch.Tensor:
"""Apply attention over encoder outputs and decoder state."""
# shape: (batch_size, max_input_sequence_length)
input_weights = self._attention(decoder_hidden_state, encoder_outputs, encoder_outputs_mask)
# shape: (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, input_weights)
return attended_input
def init_decoder_state(
self, encoder_out: Dict[str, torch.LongTensor]
) -> Dict[str, torch.Tensor]:
batch_size, _ = encoder_out["source_mask"].size()
# Initialize the decoder hidden state with the final output of the encoder,
# and the decoder context with zeros.
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
encoder_out["encoder_outputs"],
encoder_out["source_mask"],
bidirectional=self._bidirectional_input,
)
return {
"decoder_hidden": final_encoder_output, # shape: (batch_size, decoder_output_dim)
"decoder_context": final_encoder_output.new_zeros(batch_size, self.decoding_dim)
# shape: (batch_size, decoder_output_dim)
}
def forward(
self,
previous_state: Dict[str, torch.Tensor],
encoder_outputs: torch.Tensor,
source_mask: torch.BoolTensor,
previous_steps_predictions: torch.Tensor,
previous_steps_mask: Optional[torch.BoolTensor] = None,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
decoder_hidden = previous_state["decoder_hidden"]
decoder_context = previous_state["decoder_context"]
# shape: (group_size, output_dim)
last_predictions_embedding = previous_steps_predictions[:, -1]
if self._attention:
# shape: (group_size, encoder_output_dim)
attended_input = self._prepare_attended_input(
decoder_hidden, encoder_outputs, source_mask
)
# shape: (group_size, decoder_output_dim + target_embedding_dim)
decoder_input = torch.cat((attended_input, last_predictions_embedding), -1)
else:
# shape: (group_size, target_embedding_dim)
decoder_input = last_predictions_embedding
# shape (decoder_hidden): (batch_size, decoder_output_dim)
# shape (decoder_context): (batch_size, decoder_output_dim)
decoder_hidden, decoder_context = self._decoder_cell(
decoder_input.float(), (decoder_hidden.float(), decoder_context.float())
)
return (
{"decoder_hidden": decoder_hidden, "decoder_context": decoder_context},
decoder_hidden,
)
| allennlp-models-main | allennlp_models/generation/modules/decoder_nets/lstm_cell.py |
from typing import Any, Dict, List, Optional, Tuple, Set
from allennlp.data.fields import (
Field,
ListField,
TextField,
SpanField,
MetadataField,
SequenceLabelField,
)
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.dataset_readers.dataset_utils import enumerate_spans
def make_coref_instance(
sentences: List[List[str]],
token_indexers: Dict[str, TokenIndexer],
max_span_width: int,
gold_clusters: Optional[List[List[Tuple[int, int]]]] = None,
wordpiece_modeling_tokenizer: PretrainedTransformerTokenizer = None,
max_sentences: int = None,
remove_singleton_clusters: bool = True,
) -> Instance:
"""
# Parameters
sentences : `List[List[str]]`, required.
A list of lists representing the tokenised words and sentences in the document.
token_indexers : `Dict[str, TokenIndexer]`
This is used to index the words in the document. See :class:`TokenIndexer`.
max_span_width : `int`, required.
The maximum width of candidate spans to consider.
gold_clusters : `Optional[List[List[Tuple[int, int]]]]`, optional (default = `None`)
A list of all clusters in the document, represented as word spans with absolute indices
in the entire document. Each cluster contains some number of spans, which can be nested
and overlap. If there are exact matches between clusters, they will be resolved
using `_canonicalize_clusters`.
wordpiece_modeling_tokenizer: `PretrainedTransformerTokenizer`, optional (default = `None`)
If not None, this dataset reader does subword tokenization using the supplied tokenizer
and distribute the labels to the resulting wordpieces. All the modeling will be based on
wordpieces. If this is set to `False` (default), the user is expected to use
`PretrainedTransformerMismatchedIndexer` and `PretrainedTransformerMismatchedEmbedder`,
and the modeling will be on the word-level.
max_sentences: `int`, optional (default = `None`)
The maximum number of sentences in each document to keep. By default keeps all sentences.
remove_singleton_clusters : `bool`, optional (default = `True`)
Some datasets contain clusters that are singletons (i.e. no coreferents). This option allows
the removal of them.
# Returns
An `Instance` containing the following `Fields`:
text : `TextField`
The text of the full document.
spans : `ListField[SpanField]`
A ListField containing the spans represented as `SpanFields`
with respect to the document text.
span_labels : `SequenceLabelField`, optional
The id of the cluster which each possible span belongs to, or -1 if it does
not belong to a cluster. As these labels have variable length (it depends on
how many spans we are considering), we represent this a as a `SequenceLabelField`
with respect to the spans `ListField`.
"""
if max_sentences is not None and len(sentences) > max_sentences:
sentences = sentences[:max_sentences]
total_length = sum(len(sentence) for sentence in sentences)
if gold_clusters is not None:
new_gold_clusters = []
for cluster in gold_clusters:
new_cluster = []
for mention in cluster:
if mention[1] < total_length:
new_cluster.append(mention)
if new_cluster:
new_gold_clusters.append(new_cluster)
gold_clusters = new_gold_clusters
flattened_sentences = [_normalize_word(word) for sentence in sentences for word in sentence]
if wordpiece_modeling_tokenizer is not None:
flat_sentences_tokens, offsets = wordpiece_modeling_tokenizer.intra_word_tokenize(
flattened_sentences
)
flattened_sentences = [t.text for t in flat_sentences_tokens]
else:
flat_sentences_tokens = [Token(word) for word in flattened_sentences]
text_field = TextField(flat_sentences_tokens, token_indexers)
cluster_dict = {}
if gold_clusters is not None:
gold_clusters = _canonicalize_clusters(gold_clusters)
if remove_singleton_clusters:
gold_clusters = [cluster for cluster in gold_clusters if len(cluster) > 1]
if wordpiece_modeling_tokenizer is not None:
for cluster in gold_clusters:
for mention_id, mention in enumerate(cluster):
start = offsets[mention[0]][0]
end = offsets[mention[1]][1]
cluster[mention_id] = (start, end)
for cluster_id, cluster in enumerate(gold_clusters):
for mention in cluster:
cluster_dict[tuple(mention)] = cluster_id
spans: List[Field] = []
span_labels: Optional[List[int]] = [] if gold_clusters is not None else None
sentence_offset = 0
for sentence in sentences:
for start, end in enumerate_spans(
sentence, offset=sentence_offset, max_span_width=max_span_width
):
if wordpiece_modeling_tokenizer is not None:
start = offsets[start][0]
end = offsets[end][1]
# `enumerate_spans` uses word-level width limit; here we apply it to wordpieces
# We have to do this check here because we use a span width embedding that has
# only `max_span_width` entries, and since we are doing wordpiece
# modeling, the span width embedding operates on wordpiece lengths. So a check
# here is necessary or else we wouldn't know how many entries there would be.
if end - start + 1 > max_span_width:
continue
# We also don't generate spans that contain special tokens
if start < len(wordpiece_modeling_tokenizer.single_sequence_start_tokens):
continue
if end >= len(flat_sentences_tokens) - len(
wordpiece_modeling_tokenizer.single_sequence_end_tokens
):
continue
if span_labels is not None:
if (start, end) in cluster_dict:
span_labels.append(cluster_dict[(start, end)])
else:
span_labels.append(-1)
spans.append(SpanField(start, end, text_field))
sentence_offset += len(sentence)
span_field = ListField(spans)
metadata: Dict[str, Any] = {"original_text": flattened_sentences}
if gold_clusters is not None:
metadata["clusters"] = gold_clusters
metadata_field = MetadataField(metadata)
fields: Dict[str, Field] = {
"text": text_field,
"spans": span_field,
"metadata": metadata_field,
}
if span_labels is not None:
fields["span_labels"] = SequenceLabelField(span_labels, span_field)
return Instance(fields)
def _normalize_word(word):
if word in ("/.", "/?"):
return word[1:]
else:
return word
def _canonicalize_clusters(clusters: List[List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]:
"""
The data might include 2 annotated spans which are identical,
but have different ids. This checks all clusters for spans which are
identical, and if it finds any, merges the clusters containing the
identical spans.
"""
merged_clusters: List[Set[Tuple[int, int]]] = []
for cluster in clusters:
cluster_with_overlapping_mention = None
for mention in cluster:
# Look at clusters we have already processed to
# see if they contain a mention in the current
# cluster for comparison.
for cluster2 in merged_clusters:
if mention in cluster2:
# first cluster in merged clusters
# which contains this mention.
cluster_with_overlapping_mention = cluster2
break
# Already encountered overlap - no need to keep looking.
if cluster_with_overlapping_mention is not None:
break
if cluster_with_overlapping_mention is not None:
# Merge cluster we are currently processing into
# the cluster in the processed list.
cluster_with_overlapping_mention.update(cluster)
else:
merged_clusters.append(set(cluster))
return [list(c) for c in merged_clusters]
| allennlp-models-main | allennlp_models/coref/util.py |
"""
Coreference resolution is defined as follows: given a document, find and cluster entity mentions.
"""
from allennlp_models.coref.dataset_readers.conll import ConllCorefReader
from allennlp_models.coref.dataset_readers.preco import PrecoReader
from allennlp_models.coref.dataset_readers.winobias import WinobiasReader
from allennlp_models.coref.models.coref import CoreferenceResolver
from allennlp_models.coref.predictors.coref import CorefPredictor
| allennlp-models-main | allennlp_models/coref/__init__.py |
from typing import Any, Dict, List, Set, Tuple
import torch
from allennlp.nn.util import dist_reduce_sum
from allennlp.training.metrics.metric import Metric
@Metric.register("mention_recall")
class MentionRecall(Metric):
def __init__(self) -> None:
self._num_gold_mentions = 0
self._num_recalled_mentions = 0
def __call__(
self, # type: ignore
batched_top_spans: torch.Tensor,
batched_metadata: List[Dict[str, Any]],
):
num_gold_mentions = 0
num_recalled_mentions = 0
for top_spans, metadata in zip(batched_top_spans.tolist(), batched_metadata):
gold_mentions: Set[Tuple[int, int]] = {
mention for cluster in metadata["clusters"] for mention in cluster
}
predicted_spans: Set[Tuple[int, int]] = {(span[0], span[1]) for span in top_spans}
num_gold_mentions += len(gold_mentions)
num_recalled_mentions += len(gold_mentions & predicted_spans)
self._num_gold_mentions += dist_reduce_sum(num_gold_mentions)
self._num_recalled_mentions += dist_reduce_sum(num_recalled_mentions)
def get_metric(self, reset: bool = False) -> float:
if self._num_gold_mentions == 0:
recall = 0.0
else:
recall = self._num_recalled_mentions / self._num_gold_mentions
if reset:
self.reset()
return recall
def reset(self):
self._num_gold_mentions = 0
self._num_recalled_mentions = 0
| allennlp-models-main | allennlp_models/coref/metrics/mention_recall.py |
allennlp-models-main | allennlp_models/coref/metrics/__init__.py |
|
from typing import Any, Dict, List, Tuple
from collections import Counter
from scipy.optimize import linear_sum_assignment
import numpy as np
import torch
from allennlp.nn.util import dist_reduce_sum
from allennlp.training.metrics.metric import Metric
@Metric.register("conll_coref_scores")
class ConllCorefScores(Metric):
supports_distributed = True
def __init__(self, allow_singletons=False) -> None:
self.scorers = [Scorer(m) for m in (Scorer.muc, Scorer.b_cubed, Scorer.ceafe)]
self.allow_singletons = allow_singletons
def __call__(
self, # type: ignore
top_spans: torch.Tensor,
antecedent_indices: torch.Tensor,
predicted_antecedents: torch.Tensor,
metadata_list: List[Dict[str, Any]],
):
"""
# Parameters
top_spans : `torch.Tensor`
(start, end) indices for all spans kept after span pruning in the model.
Expected shape: (batch_size, num_spans, 2)
antecedent_indices : `torch.Tensor`
For each span, the indices of all allowed antecedents for that span.
Expected shape: (batch_size, num_spans, num_antecedents)
predicted_antecedents : `torch.Tensor`
For each span, this contains the index (into antecedent_indices) of the most likely
antecedent for that span.
Expected shape: (batch_size, num_spans)
metadata_list : `List[Dict[str, Any]]`
A metadata dictionary for each instance in the batch. We use the "clusters" key from
this dictionary, which has the annotated gold coreference clusters for that instance.
"""
top_spans, antecedent_indices, predicted_antecedents = self.detach_tensors(
top_spans, antecedent_indices, predicted_antecedents
)
# They need to be in CPU because Scorer.ceafe uses a SciPy function.
top_spans = top_spans.cpu()
antecedent_indices = antecedent_indices.cpu()
predicted_antecedents = predicted_antecedents.cpu()
for i, metadata in enumerate(metadata_list):
gold_clusters, mention_to_gold = self.get_gold_clusters(metadata["clusters"])
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(
top_spans[i], antecedent_indices[i], predicted_antecedents[i], self.allow_singletons
)
for scorer in self.scorers:
scorer.update(
predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold
)
def get_metric(self, reset: bool = False) -> Tuple[float, float, float]:
metrics = (lambda e: e.get_precision(), lambda e: e.get_recall(), lambda e: e.get_f1())
precision, recall, f1_score = tuple(
sum(metric(e) for e in self.scorers) / len(self.scorers) for metric in metrics
)
if reset:
self.reset()
return precision, recall, f1_score
def reset(self):
self.scorers = [Scorer(metric) for metric in (Scorer.muc, Scorer.b_cubed, Scorer.ceafe)]
@staticmethod
def get_gold_clusters(gold_clusters):
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gold_cluster in gold_clusters:
for mention in gold_cluster:
mention_to_gold[mention] = gold_cluster
return gold_clusters, mention_to_gold
@staticmethod
def get_predicted_clusters(
top_spans: torch.Tensor, # (num_spans, 2)
antecedent_indices: torch.Tensor, # (num_spans, num_antecedents)
predicted_antecedents: torch.Tensor, # (num_spans,)
allow_singletons: bool,
) -> Tuple[
List[Tuple[Tuple[int, int], ...]], Dict[Tuple[int, int], Tuple[Tuple[int, int], ...]]
]:
predicted_clusters_to_ids: Dict[Tuple[int, int], int] = {}
clusters: List[List[Tuple[int, int]]] = []
for i, predicted_antecedent in enumerate(predicted_antecedents):
if predicted_antecedent < 0:
continue
# Find predicted index in the antecedent spans.
predicted_index = antecedent_indices[i, predicted_antecedent]
# Must be a previous span.
if allow_singletons:
assert i >= predicted_index
else:
assert i > predicted_index
antecedent_span: Tuple[int, int] = tuple( # type: ignore
top_spans[predicted_index].tolist()
)
# Check if we've seen the span before.
if antecedent_span in predicted_clusters_to_ids.keys():
predicted_cluster_id: int = predicted_clusters_to_ids[antecedent_span]
else:
# We start a new cluster.
predicted_cluster_id = len(clusters)
clusters.append([antecedent_span])
predicted_clusters_to_ids[antecedent_span] = predicted_cluster_id
mention: Tuple[int, int] = tuple(top_spans[i].tolist()) # type: ignore
clusters[predicted_cluster_id].append(mention)
predicted_clusters_to_ids[mention] = predicted_cluster_id
# finalise the spans and clusters.
final_clusters = [tuple(cluster) for cluster in clusters]
# Return a mapping of each mention to the cluster containing it.
mention_to_cluster: Dict[Tuple[int, int], Tuple[Tuple[int, int], ...]] = {
mention: final_clusters[cluster_id]
for mention, cluster_id in predicted_clusters_to_ids.items()
}
return final_clusters, mention_to_cluster
class Scorer:
"""
Mostly borrowed from <https://github.com/clarkkev/deep-coref/blob/master/evaluation.py>
"""
def __init__(self, metric):
self.precision_numerator = 0
self.precision_denominator = 0
self.recall_numerator = 0
self.recall_denominator = 0
self.metric = metric
def update(self, predicted, gold, mention_to_predicted, mention_to_gold):
if self.metric == self.ceafe:
p_num, p_den, r_num, r_den = self.metric(predicted, gold)
else:
p_num, p_den = self.metric(predicted, mention_to_gold)
r_num, r_den = self.metric(gold, mention_to_predicted)
self.precision_numerator += dist_reduce_sum(p_num)
self.precision_denominator += dist_reduce_sum(p_den)
self.recall_numerator += dist_reduce_sum(r_num)
self.recall_denominator += dist_reduce_sum(r_den)
def get_f1(self):
precision = self.get_precision()
recall = self.get_recall()
return 0 if precision + recall == 0 else 2 * precision * recall / (precision + recall)
def get_recall(self):
if self.recall_denominator == 0:
return 0
else:
return self.recall_numerator / self.recall_denominator
def get_precision(self):
if self.precision_denominator == 0:
return 0
else:
return self.precision_numerator / self.precision_denominator
def get_prf(self):
return self.get_precision(), self.get_recall(), self.get_f1()
@staticmethod
def b_cubed(clusters, mention_to_gold):
"""
Averaged per-mention precision and recall.
<https://pdfs.semanticscholar.org/cfe3/c24695f1c14b78a5b8e95bcbd1c666140fd1.pdf>
"""
numerator, denominator = 0, 0
for cluster in clusters:
if len(cluster) == 1:
continue
gold_counts = Counter()
correct = 0
for mention in cluster:
if mention in mention_to_gold:
gold_counts[tuple(mention_to_gold[mention])] += 1
for cluster2, count in gold_counts.items():
if len(cluster2) != 1:
correct += count * count
numerator += correct / float(len(cluster))
denominator += len(cluster)
return numerator, denominator
@staticmethod
def muc(clusters, mention_to_gold):
"""
Counts the mentions in each predicted cluster which need to be re-allocated in
order for each predicted cluster to be contained by the respective gold cluster.
<https://aclweb.org/anthology/M/M95/M95-1005.pdf>
"""
true_p, all_p = 0, 0
for cluster in clusters:
all_p += len(cluster) - 1
true_p += len(cluster)
linked = set()
for mention in cluster:
if mention in mention_to_gold:
linked.add(mention_to_gold[mention])
else:
true_p -= 1
true_p -= len(linked)
return true_p, all_p
@staticmethod
def phi4(gold_clustering, predicted_clustering):
"""
Subroutine for ceafe. Computes the mention F measure between gold and
predicted mentions in a cluster.
"""
return (
2
* len([mention for mention in gold_clustering if mention in predicted_clustering])
/ (len(gold_clustering) + len(predicted_clustering))
)
@staticmethod
def ceafe(clusters, gold_clusters):
"""
Computes the Constrained Entity-Alignment F-Measure (CEAF) for evaluating coreference.
Gold and predicted mentions are aligned into clusterings which maximise a metric - in
this case, the F measure between gold and predicted clusters.
<https://www.semanticscholar.org/paper/On-Coreference-Resolution-Performance-Metrics-Luo/de133c1f22d0dfe12539e25dda70f28672459b99>
"""
clusters = [cluster for cluster in clusters if len(cluster) != 1]
scores = np.zeros((len(gold_clusters), len(clusters)))
for i, gold_cluster in enumerate(gold_clusters):
for j, cluster in enumerate(clusters):
scores[i, j] = Scorer.phi4(gold_cluster, cluster)
row, col = linear_sum_assignment(-scores)
similarity = sum(scores[row, col])
return similarity, len(clusters), similarity, len(gold_clusters)
| allennlp-models-main | allennlp_models/coref/metrics/conll_coref_scores.py |
import logging
import collections
from typing import Dict, List, Optional, Tuple, DefaultDict
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp_models.common.ontonotes import Ontonotes
from allennlp_models.coref.util import make_coref_instance
logger = logging.getLogger(__name__)
@DatasetReader.register("coref")
class ConllCorefReader(DatasetReader):
"""
Reads a single CoNLL-formatted file. This is the same file format as used in the
:class:`~allennlp.data.dataset_readers.semantic_role_labelling.SrlReader`, but is preprocessed
to dump all documents into a single file per train, dev and test split. See
scripts/compile_coref_data.sh for more details of how to pre-process the Ontonotes 5.0 data
into the correct format.
Returns a `Dataset` where the `Instances` have four fields : `text`, a `TextField`
containing the full document text, `spans`, a `ListField[SpanField]` of inclusive start and
end indices for span candidates, and `metadata`, a `MetadataField` that stores the instance's
original text. For data with gold cluster labels, we also include the original `clusters`
(a list of list of index pairs) and a `SequenceLabelField` of cluster ids for every span
candidate.
# Parameters
max_span_width : `int`, required.
The maximum width of candidate spans to consider.
token_indexers : `Dict[str, TokenIndexer]`, optional
This is used to index the words in the document. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
wordpiece_modeling_tokenizer: `PretrainedTransformerTokenizer`, optional (default = `None`)
If not None, this dataset reader does subword tokenization using the supplied tokenizer
and distribute the labels to the resulting wordpieces. All the modeling will be based on
wordpieces. If this is set to `False` (default), the user is expected to use
`PretrainedTransformerMismatchedIndexer` and `PretrainedTransformerMismatchedEmbedder`,
and the modeling will be on the word-level.
max_sentences: `int`, optional (default = `None`)
The maximum number of sentences in each document to keep. By default keeps all sentences.
remove_singleton_clusters : `bool`, optional (default = `False`)
Some datasets contain clusters that are singletons (i.e. no coreferents). This option allows
the removal of them. Ontonotes shouldn't have these, and this option should be used for
testing only.
"""
def __init__(
self,
max_span_width: int,
token_indexers: Dict[str, TokenIndexer] = None,
wordpiece_modeling_tokenizer: Optional[PretrainedTransformerTokenizer] = None,
max_sentences: int = None,
remove_singleton_clusters: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._max_span_width = max_span_width
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._wordpiece_modeling_tokenizer = wordpiece_modeling_tokenizer
self._max_sentences = max_sentences
self._remove_singleton_clusters = remove_singleton_clusters
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
ontonotes_reader = Ontonotes()
for sentences in ontonotes_reader.dataset_document_iterator(file_path):
clusters: DefaultDict[int, List[Tuple[int, int]]] = collections.defaultdict(list)
total_tokens = 0
for sentence in sentences:
for typed_span in sentence.coref_spans:
# Coref annotations are on a _per sentence_
# basis, so we need to adjust them to be relative
# to the length of the document.
span_id, (start, end) = typed_span
clusters[span_id].append((start + total_tokens, end + total_tokens))
total_tokens += len(sentence.words)
yield self.text_to_instance([s.words for s in sentences], list(clusters.values()))
def text_to_instance(
self, # type: ignore
sentences: List[List[str]],
gold_clusters: Optional[List[List[Tuple[int, int]]]] = None,
) -> Instance:
return make_coref_instance(
sentences,
self._token_indexers,
self._max_span_width,
gold_clusters,
self._wordpiece_modeling_tokenizer,
self._max_sentences,
self._remove_singleton_clusters,
)
| allennlp-models-main | allennlp_models/coref/dataset_readers/conll.py |
allennlp-models-main | allennlp_models/coref/dataset_readers/__init__.py |
|
import json
import logging
from typing import Dict, List, Optional, Tuple
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp_models.coref.util import make_coref_instance
logger = logging.getLogger(__name__)
@DatasetReader.register("preco")
class PrecoReader(DatasetReader):
"""
Reads a single JSON-lines file for [the PreCo dataset](https://www.aclweb.org/anthology/D18-1016.pdf).
Each line contains a "sentences" key for a list of sentences and a "mention_clusters" key
for the clusters.
Returns a `Dataset` where the `Instances` have four fields : `text`, a `TextField`
containing the full document text, `spans`, a `ListField[SpanField]` of inclusive start and
end indices for span candidates, and `metadata`, a `MetadataField` that stores the instance's
original text. For data with gold cluster labels, we also include the original `clusters`
(a list of list of index pairs) and a `SequenceLabelField` of cluster ids for every span
candidate.
# Parameters
max_span_width : `int`, required.
The maximum width of candidate spans to consider.
token_indexers : `Dict[str, TokenIndexer]`, optional
This is used to index the words in the document. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
wordpiece_modeling_tokenizer: `PretrainedTransformerTokenizer`, optional (default = `None`)
If not None, this dataset reader does subword tokenization using the supplied tokenizer
and distribute the labels to the resulting wordpieces. All the modeling will be based on
wordpieces. If this is set to `False` (default), the user is expected to use
`PretrainedTransformerMismatchedIndexer` and `PretrainedTransformerMismatchedEmbedder`,
and the modeling will be on the word-level.
max_sentences: `int`, optional (default = `None`)
The maximum number of sentences in each document to keep. By default keeps all sentences.
remove_singleton_clusters : `bool`, optional (default = `False`)
Some datasets contain clusters that are singletons (i.e. no coreferents). This option allows
the removal of them.
"""
def __init__(
self,
max_span_width: int,
token_indexers: Dict[str, TokenIndexer] = None,
wordpiece_modeling_tokenizer: Optional[PretrainedTransformerTokenizer] = None,
max_sentences: int = None,
remove_singleton_clusters: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._max_span_width = max_span_width
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._wordpiece_modeling_tokenizer = wordpiece_modeling_tokenizer
self._max_sentences = max_sentences
self._remove_singleton_clusters = remove_singleton_clusters
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as preco_file:
for line in preco_file:
example = json.loads(line)
yield self.text_to_instance(example["sentences"], example["mention_clusters"])
def text_to_instance(
self, # type: ignore
sentences: List[List[str]],
gold_clusters: Optional[List[List[Tuple[int, int, int]]]] = None,
) -> Instance:
sentence_offsets = [0]
for sentence in sentences[:-1]:
sent_length = len(sentence)
if sentence == [" "]: # paragraph separator
sent_length = 0 # we ignore them
sentence_offsets.append(sentence_offsets[-1] + sent_length)
sentences = [sentence for sentence in sentences if sentence != [" "]]
# Convert (sent_idx, rel_start, rel_exclusive_end) to (abs_start, abs_inclusive_end)
if gold_clusters:
for cluster in gold_clusters:
for mention_id, (sent_idx, start, end) in enumerate(cluster):
start = start + sentence_offsets[sent_idx]
end = end + sentence_offsets[sent_idx] - 1 # exclusive -> inclusive
cluster[mention_id] = (start, end) # type: ignore
return make_coref_instance(
sentences,
self._token_indexers,
self._max_span_width,
gold_clusters, # type: ignore
self._wordpiece_modeling_tokenizer,
self._max_sentences,
self._remove_singleton_clusters,
)
| allennlp-models-main | allennlp_models/coref/dataset_readers/preco.py |
import logging
import collections
from typing import Any, Dict, List, Optional, Tuple, DefaultDict
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import (
Field,
ListField,
TextField,
SpanField,
MetadataField,
SequenceLabelField,
)
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.dataset_readers.dataset_utils import enumerate_spans
logger = logging.getLogger(__name__)
@DatasetReader.register("winobias")
class WinobiasReader(DatasetReader):
"""
A dataset reader for the dataset described in
[Gender Bias in Coreference Resolution: Evaluation and Debiasing Methods](https://arxiv.org/abs/1804.06876)
Winobias is a dataset to analyse the issue of gender bias in co-reference
resolution. It contains simple sentences with pro/anti stereotypical gender
associations with which to measure the bias of a coreference system trained
on another corpus. It is effectively a toy dataset and as such, uses very
simplistic language; it has little use outside of evaluating a model for bias.
The dataset is formatted with a single sentence per line, with a maximum of 2
non-nested coreference clusters annotated using either square or round brackets.
For example:
> [The salesperson] sold (some books) to the librarian because [she] was trying to sell (them).
Returns a list of `Instances` which have four fields : `text`, a `TextField`
containing the full sentence text, `spans`, a `ListField[SpanField]` of inclusive start and
end indices for span candidates, and `metadata`, a `MetadataField` that stores the instance's
original text. For data with gold cluster labels, we also include the original `clusters`
(a list of list of index pairs) and a `SequenceLabelField` of cluster ids for every span
candidate in the `metadata` also.
# Parameters
max_span_width : `int`, required.
The maximum width of candidate spans to consider.
token_indexers : `Dict[str, TokenIndexer]`, optional
This is used to index the words in the sentence. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
"""
def __init__(
self,
max_span_width: int,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._max_span_width = max_span_width
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path: str):
for sentence in open(cached_path(file_path), "r"):
tokens = sentence.strip().split(" ")
clusters: DefaultDict[int, List[Tuple[int, int]]] = collections.defaultdict(list)
words = []
for index, token in enumerate(tokens):
# Coreference is annotated using [square brackets]
# or (round brackets) around coreferent phrases.
if "[" in token and "]" in token:
clusters[0].append((index, index))
elif "[" in token:
clusters[0].append((index, index))
elif "]" in token:
old_span = clusters[0][-1]
clusters[0][-1] = (old_span[0], index)
if "(" in token and ")" in token:
clusters[1].append((index, index))
elif "(" in token:
clusters[1].append((index, index))
elif ")" in token:
old_span = clusters[1][-1]
clusters[1][-1] = (old_span[0], index)
if token.endswith("."):
# Winobias is tokenised, but not for full stops.
# We'll just special case them here.
token = token[:-1]
words.append(token.strip("[]()"))
words.append(".")
else:
words.append(token.strip("[]()"))
yield self.text_to_instance([Token(x) for x in words], [x for x in clusters.values()])
def text_to_instance(
self, # type: ignore
sentence: List[Token],
gold_clusters: Optional[List[List[Tuple[int, int]]]] = None,
) -> Instance:
"""
# Parameters
sentence : `List[Token]`, required.
The already tokenised sentence to analyse.
gold_clusters : `Optional[List[List[Tuple[int, int]]]]`, optional (default = `None`)
A list of all clusters in the sentence, represented as word spans. Each cluster
contains some number of spans, which can be nested and overlap, but will never
exactly match between clusters.
# Returns
An `Instance` containing the following `Fields`:
text : `TextField`
The text of the full sentence.
spans : `ListField[SpanField]`
A ListField containing the spans represented as `SpanFields`
with respect to the sentence text.
span_labels : `SequenceLabelField`, optional
The id of the cluster which each possible span belongs to, or -1 if it does
not belong to a cluster. As these labels have variable length (it depends on
how many spans we are considering), we represent this a as a `SequenceLabelField`
with respect to the spans `ListField`.
"""
metadata: Dict[str, Any] = {"original_text": sentence}
if gold_clusters is not None:
metadata["clusters"] = gold_clusters
text_field = TextField(sentence, self._token_indexers)
cluster_dict = {}
if gold_clusters is not None:
for cluster_id, cluster in enumerate(gold_clusters):
for mention in cluster:
cluster_dict[tuple(mention)] = cluster_id
spans: List[Field] = []
span_labels: Optional[List[int]] = [] if gold_clusters is not None else None
for start, end in enumerate_spans(sentence, max_span_width=self._max_span_width):
if span_labels is not None:
if (start, end) in cluster_dict:
span_labels.append(cluster_dict[(start, end)])
else:
span_labels.append(-1)
spans.append(SpanField(start, end, text_field))
span_field = ListField(spans)
metadata_field = MetadataField(metadata)
fields: Dict[str, Field] = {
"text": text_field,
"spans": span_field,
"metadata": metadata_field,
}
if span_labels is not None:
fields["span_labels"] = SequenceLabelField(span_labels, span_field)
return Instance(fields)
| allennlp-models-main | allennlp_models/coref/dataset_readers/winobias.py |
from typing import List, Dict
from spacy.tokens import Doc
import numpy
from allennlp.common.util import JsonDict
from allennlp.common.util import get_spacy_model
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import ListField, SequenceLabelField
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
@Predictor.register("coreference_resolution")
class CorefPredictor(Predictor):
"""
Predictor for the [`CoreferenceResolver`](../models/coreference_resolution/coref.md) model.
Registered as a `Predictor` with name "coreference_resolution".
"""
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
# We have to use spacy to tokenize our document here, because we need
# to also know sentence boundaries to propose valid mentions.
self._spacy = get_spacy_model(language, pos_tags=True, parse=True, ner=False)
def predict(self, document: str) -> JsonDict:
"""
Predict the coreference clusters in the given document.
```
{
"document": [tokenized document text]
"clusters":
[
[
[start_index, end_index],
[start_index, end_index]
],
[
[start_index, end_index],
[start_index, end_index],
[start_index, end_index],
],
....
]
}
```
# Parameters
document : `str`
A string representation of a document.
# Returns
A dictionary representation of the predicted coreference clusters.
"""
return self.predict_json({"document": document})
def predict_tokenized(self, tokenized_document: List[str]) -> JsonDict:
"""
Predict the coreference clusters in the given document.
# Parameters
tokenized_document : `List[str]`
A list of words representation of a tokenized document.
# Returns
A dictionary representation of the predicted coreference clusters.
"""
instance = self._words_list_to_instance(tokenized_document)
return self.predict_instance(instance)
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
"""
Takes each predicted cluster and makes it into a labeled `Instance` with only that
cluster labeled, so we can compute gradients of the loss `on the model's prediction of that
cluster`. This lets us run interpretation methods using those gradients. See superclass
docstring for more info.
"""
# Digging into an Instance makes mypy go crazy, because we have all kinds of things where
# the type has been lost. So there are lots of `type: ignore`s here...
predicted_clusters = outputs["clusters"]
span_field: ListField = instance["spans"] # type: ignore
instances = []
for cluster in predicted_clusters:
new_instance = instance.duplicate()
span_labels = [
0 if (span.span_start, span.span_end) in cluster else -1 # type: ignore
for span in span_field
] # type: ignore
new_instance.add_field(
"span_labels", SequenceLabelField(span_labels, span_field), self._model.vocab
)
new_instance["metadata"].metadata["clusters"] = [cluster] # type: ignore
instances.append(new_instance)
if not instances:
# No predicted clusters; we just give an empty coref prediction.
new_instance = instance.duplicate()
span_labels = [-1] * len(span_field) # type: ignore
new_instance.add_field(
"span_labels", SequenceLabelField(span_labels, span_field), self._model.vocab
)
new_instance["metadata"].metadata["clusters"] = [] # type: ignore
instances.append(new_instance)
return instances
@staticmethod
def replace_corefs(document: Doc, clusters: List[List[List[int]]]) -> str:
"""
Uses a list of coreference clusters to convert a spacy document into a
string, where each coreference is replaced by its main mention.
"""
# Original tokens with correct whitespace
resolved = list(tok.text_with_ws for tok in document)
for cluster in clusters:
# The main mention is the first item in the cluster
mention_start, mention_end = cluster[0][0], cluster[0][1] + 1
mention_span = document[mention_start:mention_end]
# The coreferences are all items following the first in the cluster
for coref in cluster[1:]:
final_token = document[coref[1]]
# In both of the following cases, the first token in the coreference
# is replaced with the main mention, while all subsequent tokens
# are masked out with "", so that they can be eliminated from
# the returned document during "".join(resolved).
# The first case attempts to correctly handle possessive coreferences
# by inserting "'s" between the mention and the final whitespace
# These include my, his, her, their, our, etc.
# Disclaimer: Grammar errors can occur when the main mention is plural,
# e.g. "zebras" becomes "zebras's" because this case isn't
# being explictly checked and handled.
if final_token.tag_ in ["PRP$", "POS"]:
resolved[coref[0]] = mention_span.text + "'s" + final_token.whitespace_
else:
# If not possessive, then replace first token with main mention directly
resolved[coref[0]] = mention_span.text + final_token.whitespace_
# Mask out remaining tokens
for i in range(coref[0] + 1, coref[1] + 1):
resolved[i] = ""
return "".join(resolved)
def coref_resolved(self, document: str) -> str:
"""
Produce a document where each coreference is replaced by its main mention
# Parameters
document : `str`
A string representation of a document.
# Returns
A string with each coreference replaced by its main mention
"""
spacy_document = self._spacy(document)
clusters = self.predict(document).get("clusters")
# Passing a document with no coreferences returns its original form
if not clusters:
return document
return self.replace_corefs(spacy_document, clusters)
def _words_list_to_instance(self, words: List[str]) -> Instance:
"""
Create an instance from words list represent an already tokenized document,
for skipping tokenization when that information already exist for the user
"""
spacy_document = Doc(self._spacy.vocab, words=words)
for pipe in filter(None, self._spacy.pipeline):
pipe[1](spacy_document)
sentences = [[token.text for token in sentence] for sentence in spacy_document.sents]
instance = self._dataset_reader.text_to_instance(sentences)
return instance
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"document": "string of document text"}`
"""
document = json_dict["document"]
spacy_document = self._spacy(document)
sentences = [[token.text for token in sentence] for sentence in spacy_document.sents]
instance = self._dataset_reader.text_to_instance(sentences)
return instance
| allennlp-models-main | allennlp_models/coref/predictors/coref.py |
allennlp-models-main | allennlp_models/coref/predictors/__init__.py |
|
import logging
import math
from typing import Any, Dict, List, Tuple
import torch
import torch.nn.functional as F
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules.token_embedders import Embedding
from allennlp.modules import FeedForward, GatedSum
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor
from allennlp.nn import util, InitializerApplicator
from allennlp_models.coref.metrics.conll_coref_scores import ConllCorefScores
from allennlp_models.coref.metrics.mention_recall import MentionRecall
logger = logging.getLogger(__name__)
@Model.register("coref")
class CoreferenceResolver(Model):
"""
This `Model` implements the coreference resolution model described in
[Higher-order Coreference Resolution with Coarse-to-fine Inference](https://arxiv.org/pdf/1804.05392.pdf)
by Lee et al., 2018.
The basic outline of this model is to get an embedded representation of each span in the
document. These span representations are scored and used to prune away spans that are unlikely
to occur in a coreference cluster. For the remaining spans, the model decides which antecedent
span (if any) they are coreferent with. The resulting coreference links, after applying
transitivity, imply a clustering of the spans in the document.
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the `text` `TextField` we get as input to the model.
context_layer : `Seq2SeqEncoder`
This layer incorporates contextual information for each word in the document.
mention_feedforward : `FeedForward`
This feedforward network is applied to the span representations which is then scored
by a linear layer.
antecedent_feedforward : `FeedForward`
This feedforward network is applied to pairs of span representation, along with any
pairwise features, which is then scored by a linear layer.
feature_size : `int`
The embedding size for all the embedded features, such as distances or span widths.
max_span_width : `int`
The maximum width of candidate spans.
spans_per_word: `float`, required.
A multiplier between zero and one which controls what percentage of candidate mention
spans we retain with respect to the number of words in the document.
max_antecedents: `int`, required.
For each mention which survives the pruning stage, we consider this many antecedents.
coarse_to_fine: `bool`, optional (default = `False`)
Whether or not to apply the coarse-to-fine filtering.
inference_order: `int`, optional (default = `1`)
The number of inference orders. When greater than 1, the span representations are
updated and coreference scores re-computed.
lexical_dropout : `int`
The probability of dropping out dimensions of the embedded text.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
context_layer: Seq2SeqEncoder,
mention_feedforward: FeedForward,
antecedent_feedforward: FeedForward,
feature_size: int,
max_span_width: int,
spans_per_word: float,
max_antecedents: int,
coarse_to_fine: bool = False,
inference_order: int = 1,
lexical_dropout: float = 0.2,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
self._context_layer = context_layer
self._mention_feedforward = TimeDistributed(mention_feedforward)
self._mention_scorer = TimeDistributed(
torch.nn.Linear(mention_feedforward.get_output_dim(), 1)
)
self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)
self._antecedent_scorer = TimeDistributed(
torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)
)
self._endpoint_span_extractor = EndpointSpanExtractor(
context_layer.get_output_dim(),
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size,
bucket_widths=False,
)
self._attentive_span_extractor = SelfAttentiveSpanExtractor(
input_dim=text_field_embedder.get_output_dim()
)
# 10 possible distance buckets.
self._num_distance_buckets = 10
self._distance_embedding = Embedding(
embedding_dim=feature_size, num_embeddings=self._num_distance_buckets
)
self._max_span_width = max_span_width
self._spans_per_word = spans_per_word
self._max_antecedents = max_antecedents
self._coarse_to_fine = coarse_to_fine
if self._coarse_to_fine:
self._coarse2fine_scorer = torch.nn.Linear(
mention_feedforward.get_input_dim(), mention_feedforward.get_input_dim()
)
self._inference_order = inference_order
if self._inference_order > 1:
self._span_updating_gated_sum = GatedSum(mention_feedforward.get_input_dim())
self._mention_recall = MentionRecall()
self._conll_coref_scores = ConllCorefScores()
if lexical_dropout > 0:
self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
else:
self._lexical_dropout = lambda x: x
initializer(self)
def forward(
self, # type: ignore
text: TextFieldTensors,
spans: torch.IntTensor,
span_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
text : `TextFieldTensors`, required.
The output of a `TextField` representing the text of
the document.
spans : `torch.IntTensor`, required.
A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end
indices of candidate spans for mentions. Comes from a `ListField[SpanField]` of
indices into the text of the document.
span_labels : `torch.IntTensor`, optional (default = `None`).
A tensor of shape (batch_size, num_spans), representing the cluster ids
of each span, or -1 for those which do not appear in any clusters.
metadata : `List[Dict[str, Any]]`, optional (default = `None`).
A metadata dictionary for each instance in the batch. We use the "original_text" and "clusters" keys
from this dictionary, which respectively have the original text and the annotated gold coreference
clusters for that instance.
# Returns
An output dictionary consisting of:
top_spans : `torch.IntTensor`
A tensor of shape `(batch_size, num_spans_to_keep, 2)` representing
the start and end word indices of the top spans that survived the pruning stage.
antecedent_indices : `torch.IntTensor`
A tensor of shape `(num_spans_to_keep, max_antecedents)` representing for each top span
the index (with respect to top_spans) of the possible antecedents the model considered.
predicted_antecedents : `torch.IntTensor`
A tensor of shape `(batch_size, num_spans_to_keep)` representing, for each top span, the
index (with respect to antecedent_indices) of the most likely antecedent. -1 means there
was no predicted link.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
# Shape: (batch_size, document_length, embedding_size)
text_embeddings = self._lexical_dropout(self._text_field_embedder(text))
batch_size = spans.size(0)
document_length = text_embeddings.size(1)
num_spans = spans.size(1)
# Shape: (batch_size, document_length)
text_mask = util.get_text_field_mask(text)
# Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1)
# SpanFields return -1 when they are used as padding. As we do
# some comparisons based on span widths when we attend over the
# span representations that we generate from these indices, we
# need them to be <= 0. This is only relevant in edge cases where
# the number of spans we consider after the pruning stage is >= the
# total number of spans, because in this case, it is possible we might
# consider a masked span.
# Shape: (batch_size, num_spans, 2)
spans = F.relu(spans.float()).long()
# Shape: (batch_size, document_length, encoding_dim)
contextualized_embeddings = self._context_layer(text_embeddings, text_mask)
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
endpoint_span_embeddings = self._endpoint_span_extractor(contextualized_embeddings, spans)
# Shape: (batch_size, num_spans, emebedding_size)
attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)
# Shape: (batch_size, num_spans, emebedding_size + 2 * encoding_dim + feature_size)
span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
# Prune based on mention scores.
num_spans_to_keep = int(math.floor(self._spans_per_word * document_length))
num_spans_to_keep = min(num_spans_to_keep, num_spans)
# Shape: (batch_size, num_spans)
span_mention_scores = self._mention_scorer(
self._mention_feedforward(span_embeddings)
).squeeze(-1)
# Shape: (batch_size, num_spans) for all 3 tensors
top_span_mention_scores, top_span_mask, top_span_indices = util.masked_topk(
span_mention_scores, span_mask, num_spans_to_keep
)
# Shape: (batch_size * num_spans_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select spans for each element in the batch.
# This reformats the indices to take into account their
# index into the batch. We precompute this here to make
# the multiple calls to util.batched_index_select below more efficient.
flat_top_span_indices = util.flatten_and_batch_shift_indices(top_span_indices, num_spans)
# Compute final predictions for which spans to consider as mentions.
# Shape: (batch_size, num_spans_to_keep, 2)
top_spans = util.batched_index_select(spans, top_span_indices, flat_top_span_indices)
# Shape: (batch_size, num_spans_to_keep, embedding_size)
top_span_embeddings = util.batched_index_select(
span_embeddings, top_span_indices, flat_top_span_indices
)
# Compute indices for antecedent spans to consider.
max_antecedents = min(self._max_antecedents, num_spans_to_keep)
# Now that we have our variables in terms of num_spans_to_keep, we need to
# compare span pairs to decide each span's antecedent. Each span can only
# have prior spans as antecedents, and we only consider up to max_antecedents
# prior spans. So the first thing we do is construct a matrix mapping a span's
# index to the indices of its allowed antecedents.
# Once we have this matrix, we reformat our variables again to get embeddings
# for all valid antecedents for each span. This gives us variables with shapes
# like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which
# we can use to make coreference decisions between valid span pairs.
if self._coarse_to_fine:
pruned_antecedents = self._coarse_to_fine_pruning(
top_span_embeddings, top_span_mention_scores, top_span_mask, max_antecedents
)
else:
pruned_antecedents = self._distance_pruning(
top_span_embeddings, top_span_mention_scores, max_antecedents
)
# Shape: (batch_size, num_spans_to_keep, max_antecedents) for all 4 tensors
(
top_partial_coreference_scores,
top_antecedent_mask,
top_antecedent_offsets,
top_antecedent_indices,
) = pruned_antecedents
flat_top_antecedent_indices = util.flatten_and_batch_shift_indices(
top_antecedent_indices, num_spans_to_keep
)
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
top_antecedent_embeddings = util.batched_index_select(
top_span_embeddings, top_antecedent_indices, flat_top_antecedent_indices
)
# Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents)
coreference_scores = self._compute_coreference_scores(
top_span_embeddings,
top_antecedent_embeddings,
top_partial_coreference_scores,
top_antecedent_mask,
top_antecedent_offsets,
)
for _ in range(self._inference_order - 1):
dummy_mask = top_antecedent_mask.new_ones(batch_size, num_spans_to_keep, 1)
# Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents,)
top_antecedent_with_dummy_mask = torch.cat([dummy_mask, top_antecedent_mask], -1)
# Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents)
attention_weight = util.masked_softmax(
coreference_scores, top_antecedent_with_dummy_mask, memory_efficient=True
)
# Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents, embedding_size)
top_antecedent_with_dummy_embeddings = torch.cat(
[top_span_embeddings.unsqueeze(2), top_antecedent_embeddings], 2
)
# Shape: (batch_size, num_spans_to_keep, embedding_size)
attended_embeddings = util.weighted_sum(
top_antecedent_with_dummy_embeddings, attention_weight
)
# Shape: (batch_size, num_spans_to_keep, embedding_size)
top_span_embeddings = self._span_updating_gated_sum(
top_span_embeddings, attended_embeddings
)
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
top_antecedent_embeddings = util.batched_index_select(
top_span_embeddings, top_antecedent_indices, flat_top_antecedent_indices
)
# Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents)
coreference_scores = self._compute_coreference_scores(
top_span_embeddings,
top_antecedent_embeddings,
top_partial_coreference_scores,
top_antecedent_mask,
top_antecedent_offsets,
)
# We now have, for each span which survived the pruning stage,
# a predicted antecedent. This implies a clustering if we group
# mentions which refer to each other in a chain.
# Shape: (batch_size, num_spans_to_keep)
_, predicted_antecedents = coreference_scores.max(2)
# Subtract one here because index 0 is the "no antecedent" class,
# so this makes the indices line up with actual spans if the prediction
# is greater than -1.
predicted_antecedents -= 1
output_dict = {
"top_spans": top_spans,
"antecedent_indices": top_antecedent_indices,
"predicted_antecedents": predicted_antecedents,
}
if span_labels is not None:
# Find the gold labels for the spans which we kept.
# Shape: (batch_size, num_spans_to_keep, 1)
pruned_gold_labels = util.batched_index_select(
span_labels.unsqueeze(-1), top_span_indices, flat_top_span_indices
)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
antecedent_labels = util.batched_index_select(
pruned_gold_labels, top_antecedent_indices, flat_top_antecedent_indices
).squeeze(-1)
antecedent_labels = util.replace_masked_values(
antecedent_labels, top_antecedent_mask, -100
)
# Compute labels.
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
gold_antecedent_labels = self._compute_antecedent_gold_labels(
pruned_gold_labels, antecedent_labels
)
# Now, compute the loss using the negative marginal log-likelihood.
# This is equal to the log of the sum of the probabilities of all antecedent predictions
# that would be consistent with the data, in the sense that we are minimising, for a
# given span, the negative marginal log likelihood of all antecedents which are in the
# same gold cluster as the span we are currently considering. Each span i predicts a
# single antecedent j, but there might be several prior mentions k in the same
# coreference cluster that would be valid antecedents. Our loss is the sum of the
# probability assigned to all valid antecedents. This is a valid objective for
# clustering as we don't mind which antecedent is predicted, so long as they are in
# the same coreference cluster.
coreference_log_probs = util.masked_log_softmax(
coreference_scores, top_span_mask.unsqueeze(-1)
)
correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()
negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()
self._mention_recall(top_spans, metadata)
self._conll_coref_scores(
top_spans, top_antecedent_indices, predicted_antecedents, metadata
)
output_dict["loss"] = negative_marginal_log_likelihood
if metadata is not None:
output_dict["document"] = [x["original_text"] for x in metadata]
return output_dict
def make_output_human_readable(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
# Parameters
output_dict : `Dict[str, torch.Tensor]`, required.
The result of calling :func:`forward` on an instance or batch of instances.
# Returns
The same output dictionary, but with an additional `clusters` key:
clusters : `List[List[List[Tuple[int, int]]]]`
A nested list, representing, for each instance in the batch, the list of clusters,
which are in turn comprised of a list of (start, end) inclusive spans into the
original document.
"""
# A tensor of shape (batch_size, num_spans_to_keep, 2), representing
# the start and end indices of each span.
batch_top_spans = output_dict["top_spans"].detach().cpu()
# A tensor of shape (batch_size, num_spans_to_keep) representing, for each span,
# the index into `antecedent_indices` which specifies the antecedent span. Additionally,
# the index can be -1, specifying that the span has no predicted antecedent.
batch_predicted_antecedents = output_dict["predicted_antecedents"].detach().cpu()
# A tensor of shape (num_spans_to_keep, max_antecedents), representing the indices
# of the predicted antecedents with respect to the 2nd dimension of `batch_top_spans`
# for each antecedent we considered.
batch_antecedent_indices = output_dict["antecedent_indices"].detach().cpu()
batch_clusters: List[List[List[Tuple[int, int]]]] = []
# Calling zip() on two tensors results in an iterator over their
# first dimension. This is iterating over instances in the batch.
for top_spans, predicted_antecedents, antecedent_indices in zip(
batch_top_spans, batch_predicted_antecedents, batch_antecedent_indices
):
spans_to_cluster_ids: Dict[Tuple[int, int], int] = {}
clusters: List[List[Tuple[int, int]]] = []
for i, (span, predicted_antecedent) in enumerate(zip(top_spans, predicted_antecedents)):
if predicted_antecedent < 0:
# We don't care about spans which are
# not co-referent with anything.
continue
# Find the right cluster to update with this span.
# To do this, we find the row in `antecedent_indices`
# corresponding to this span we are considering.
# The predicted antecedent is then an index into this list
# of indices, denoting the span from `top_spans` which is the
# most likely antecedent.
predicted_index = antecedent_indices[i, predicted_antecedent]
antecedent_span = (
top_spans[predicted_index, 0].item(),
top_spans[predicted_index, 1].item(),
)
# Check if we've seen the span before.
if antecedent_span in spans_to_cluster_ids:
predicted_cluster_id: int = spans_to_cluster_ids[antecedent_span]
else:
# We start a new cluster.
predicted_cluster_id = len(clusters)
# Append a new cluster containing only this span.
clusters.append([antecedent_span])
# Record the new id of this span.
spans_to_cluster_ids[antecedent_span] = predicted_cluster_id
# Now add the span we are currently considering.
span_start, span_end = span[0].item(), span[1].item()
clusters[predicted_cluster_id].append((span_start, span_end))
spans_to_cluster_ids[(span_start, span_end)] = predicted_cluster_id
batch_clusters.append(clusters)
output_dict["clusters"] = batch_clusters
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
mention_recall = self._mention_recall.get_metric(reset)
coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)
return {
"coref_precision": coref_precision,
"coref_recall": coref_recall,
"coref_f1": coref_f1,
"mention_recall": mention_recall,
}
@staticmethod
def _generate_valid_antecedents(
num_spans_to_keep: int, max_antecedents: int, device: int
) -> Tuple[torch.IntTensor, torch.IntTensor, torch.BoolTensor]:
"""
This method generates possible antecedents per span which survived the pruning
stage. This procedure is `generic across the batch`. The reason this is the case is
that each span in a batch can be coreferent with any previous span, but here we
are computing the possible `indices` of these spans. So, regardless of the batch,
the 1st span _cannot_ have any antecedents, because there are none to select from.
Similarly, each element can only predict previous spans, so this returns a matrix
of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to
(i - 1) - j if j <= i, or zero otherwise.
# Parameters
num_spans_to_keep : `int`, required.
The number of spans that were kept while pruning.
max_antecedents : `int`, required.
The maximum number of antecedent spans to consider for every span.
device : `int`, required.
The CUDA device to use.
# Returns
valid_antecedent_indices : `torch.LongTensor`
The indices of every antecedent to consider with respect to the top k spans.
Has shape `(num_spans_to_keep, max_antecedents)`.
valid_antecedent_offsets : `torch.LongTensor`
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
Has shape `(1, max_antecedents)`.
valid_antecedent_mask : `torch.BoolTensor`
The mask representing whether each antecedent span is valid. Required since
different spans have different numbers of valid antecedents. For example, the first
span in the document should have no valid antecedents.
Has shape `(1, num_spans_to_keep, max_antecedents)`.
"""
# Shape: (num_spans_to_keep, 1)
target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1)
# Shape: (1, max_antecedents)
valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0)
# This is a broadcasted subtraction.
# Shape: (num_spans_to_keep, max_antecedents)
raw_antecedent_indices = target_indices - valid_antecedent_offsets
# In our matrix of indices, the upper triangular part will be negative
# because the offsets will be > the target indices. We want to mask these,
# because these are exactly the indices which we don't want to predict, per span.
# Shape: (1, num_spans_to_keep, max_antecedents)
valid_antecedent_mask = (raw_antecedent_indices >= 0).unsqueeze(0)
# Shape: (num_spans_to_keep, max_antecedents)
valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()
return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_mask
def _distance_pruning(
self,
top_span_embeddings: torch.FloatTensor,
top_span_mention_scores: torch.FloatTensor,
max_antecedents: int,
) -> Tuple[torch.FloatTensor, torch.BoolTensor, torch.LongTensor, torch.LongTensor]:
"""
Generates antecedents for each span and prunes down to `max_antecedents`. This method
prunes antecedents only based on distance (i.e. number of intervening spans). The closest
antecedents are kept.
# Parameters
top_span_embeddings: `torch.FloatTensor`, required.
The embeddings of the top spans.
(batch_size, num_spans_to_keep, embedding_size).
top_span_mention_scores: `torch.FloatTensor`, required.
The mention scores of the top spans.
(batch_size, num_spans_to_keep).
max_antecedents: `int`, required.
The maximum number of antecedents to keep for each span.
# Returns
top_partial_coreference_scores: `torch.FloatTensor`
The partial antecedent scores for each span-antecedent pair. Computed by summing
the span mentions scores of the span and the antecedent. This score is partial because
compared to the full coreference scores, it lacks the interaction term
w * FFNN([g_i, g_j, g_i * g_j, features]).
(batch_size, num_spans_to_keep, max_antecedents)
top_antecedent_mask: `torch.BoolTensor`
The mask representing whether each antecedent span is valid. Required since
different spans have different numbers of valid antecedents. For example, the first
span in the document should have no valid antecedents.
(batch_size, num_spans_to_keep, max_antecedents)
top_antecedent_offsets: `torch.LongTensor`
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
(batch_size, num_spans_to_keep, max_antecedents)
top_antecedent_indices: `torch.LongTensor`
The indices of every antecedent to consider with respect to the top k spans.
(batch_size, num_spans_to_keep, max_antecedents)
"""
# These antecedent matrices are independent of the batch dimension - they're just a function
# of the span's position in top_spans.
# The spans are in document order, so we can just use the relative
# index of the spans to know which other spans are allowed antecedents.
num_spans_to_keep = top_span_embeddings.size(1)
device = util.get_device_of(top_span_embeddings)
# Shapes:
# (num_spans_to_keep, max_antecedents),
# (1, max_antecedents),
# (1, num_spans_to_keep, max_antecedents)
(
top_antecedent_indices,
top_antecedent_offsets,
top_antecedent_mask,
) = self._generate_valid_antecedents( # noqa
num_spans_to_keep, max_antecedents, device
)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
top_antecedent_mention_scores = util.flattened_index_select(
top_span_mention_scores.unsqueeze(-1), top_antecedent_indices
).squeeze(-1)
# Shape: (batch_size, num_spans_to_keep, max_antecedents) * 4
top_partial_coreference_scores = (
top_span_mention_scores.unsqueeze(-1) + top_antecedent_mention_scores
)
top_antecedent_indices = top_antecedent_indices.unsqueeze(0).expand_as(
top_partial_coreference_scores
)
top_antecedent_offsets = top_antecedent_offsets.unsqueeze(0).expand_as(
top_partial_coreference_scores
)
top_antecedent_mask = top_antecedent_mask.expand_as(top_partial_coreference_scores)
return (
top_partial_coreference_scores,
top_antecedent_mask,
top_antecedent_offsets,
top_antecedent_indices,
)
def _coarse_to_fine_pruning(
self,
top_span_embeddings: torch.FloatTensor,
top_span_mention_scores: torch.FloatTensor,
top_span_mask: torch.BoolTensor,
max_antecedents: int,
) -> Tuple[torch.FloatTensor, torch.BoolTensor, torch.LongTensor, torch.LongTensor]:
"""
Generates antecedents for each span and prunes down to `max_antecedents`. This method
prunes antecedents using a fast bilinar interaction score between a span and a candidate
antecedent, and the highest-scoring antecedents are kept.
# Parameters
top_span_embeddings: `torch.FloatTensor`, required.
The embeddings of the top spans.
(batch_size, num_spans_to_keep, embedding_size).
top_span_mention_scores: `torch.FloatTensor`, required.
The mention scores of the top spans.
(batch_size, num_spans_to_keep).
top_span_mask: `torch.BoolTensor`, required.
The mask for the top spans.
(batch_size, num_spans_to_keep).
max_antecedents: `int`, required.
The maximum number of antecedents to keep for each span.
# Returns
top_partial_coreference_scores: `torch.FloatTensor`
The partial antecedent scores for each span-antecedent pair. Computed by summing
the span mentions scores of the span and the antecedent as well as a bilinear
interaction term. This score is partial because compared to the full coreference scores,
it lacks the interaction term
`w * FFNN([g_i, g_j, g_i * g_j, features])`.
`(batch_size, num_spans_to_keep, max_antecedents)`
top_antecedent_mask: `torch.BoolTensor`
The mask representing whether each antecedent span is valid. Required since
different spans have different numbers of valid antecedents. For example, the first
span in the document should have no valid antecedents.
`(batch_size, num_spans_to_keep, max_antecedents)`
top_antecedent_offsets: `torch.LongTensor`
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
`(batch_size, num_spans_to_keep, max_antecedents)`
top_antecedent_indices: `torch.LongTensor`
The indices of every antecedent to consider with respect to the top k spans.
`(batch_size, num_spans_to_keep, max_antecedents)`
"""
batch_size, num_spans_to_keep = top_span_embeddings.size()[:2]
device = util.get_device_of(top_span_embeddings)
# Shape: (1, num_spans_to_keep, num_spans_to_keep)
_, _, valid_antecedent_mask = self._generate_valid_antecedents(
num_spans_to_keep, num_spans_to_keep, device
)
mention_one_score = top_span_mention_scores.unsqueeze(1)
mention_two_score = top_span_mention_scores.unsqueeze(2)
bilinear_weights = self._coarse2fine_scorer(top_span_embeddings).transpose(1, 2)
bilinear_score = torch.matmul(top_span_embeddings, bilinear_weights)
# Shape: (batch_size, num_spans_to_keep, num_spans_to_keep); broadcast op
partial_antecedent_scores = mention_one_score + mention_two_score + bilinear_score
# Shape: (batch_size, num_spans_to_keep, num_spans_to_keep); broadcast op
span_pair_mask = top_span_mask.unsqueeze(-1) & valid_antecedent_mask
# Shape:
# (batch_size, num_spans_to_keep, max_antecedents) * 3
(
top_partial_coreference_scores,
top_antecedent_mask,
top_antecedent_indices,
) = util.masked_topk(partial_antecedent_scores, span_pair_mask, max_antecedents)
top_span_range = util.get_range_vector(num_spans_to_keep, device)
# Shape: (num_spans_to_keep, num_spans_to_keep); broadcast op
valid_antecedent_offsets = top_span_range.unsqueeze(-1) - top_span_range.unsqueeze(0)
# TODO: we need to make `batched_index_select` more general to make this less awkward.
top_antecedent_offsets = util.batched_index_select(
valid_antecedent_offsets.unsqueeze(0)
.expand(batch_size, num_spans_to_keep, num_spans_to_keep)
.reshape(batch_size * num_spans_to_keep, num_spans_to_keep, 1),
top_antecedent_indices.view(-1, max_antecedents),
).reshape(batch_size, num_spans_to_keep, max_antecedents)
return (
top_partial_coreference_scores,
top_antecedent_mask,
top_antecedent_offsets,
top_antecedent_indices,
)
def _compute_span_pair_embeddings(
self,
top_span_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor,
antecedent_offsets: torch.FloatTensor,
):
"""
Computes an embedding representation of pairs of spans for the pairwise scoring function
to consider. This includes both the original span representations, the element-wise
similarity of the span representations, and an embedding representation of the distance
between the two spans.
# Parameters
top_span_embeddings : `torch.FloatTensor`, required.
Embedding representations of the top spans. Has shape
(batch_size, num_spans_to_keep, embedding_size).
antecedent_embeddings : `torch.FloatTensor`, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size).
antecedent_offsets : `torch.IntTensor`, required.
The offsets between each top span and its antecedent spans in terms
of spans we are considering. Has shape (batch_size, num_spans_to_keep, max_antecedents).
# Returns
span_pair_embeddings : `torch.FloatTensor`
Embedding representation of the pair of spans to consider. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size)
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
antecedent_distance_embeddings = self._distance_embedding(
util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets)
)
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
span_pair_embeddings = torch.cat(
[
target_embeddings,
antecedent_embeddings,
antecedent_embeddings * target_embeddings,
antecedent_distance_embeddings,
],
-1,
)
return span_pair_embeddings
@staticmethod
def _compute_antecedent_gold_labels(
top_span_labels: torch.IntTensor, antecedent_labels: torch.IntTensor
):
"""
Generates a binary indicator for every pair of spans. This label is one if and
only if the pair of spans belong to the same cluster. The labels are augmented
with a dummy antecedent at the zeroth position, which represents the prediction
that a span does not have any antecedent.
# Parameters
top_span_labels : `torch.IntTensor`, required.
The cluster id label for every span. The id is arbitrary,
as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).
antecedent_labels : `torch.IntTensor`, required.
The cluster id label for every antecedent span. The id is arbitrary,
as we just care about the clustering. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
# Returns
pairwise_labels_with_dummy_label : `torch.FloatTensor`
A binary tensor representing whether a given pair of spans belong to
the same cluster in the gold clustering.
Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
target_labels = top_span_labels.expand_as(antecedent_labels)
same_cluster_indicator = (target_labels == antecedent_labels).float()
non_dummy_indicator = (target_labels >= 0).float()
pairwise_labels = same_cluster_indicator * non_dummy_indicator
# Shape: (batch_size, num_spans_to_keep, 1)
dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
pairwise_labels_with_dummy_label = torch.cat([dummy_labels, pairwise_labels], -1)
return pairwise_labels_with_dummy_label
def _compute_coreference_scores(
self,
top_span_embeddings: torch.FloatTensor,
top_antecedent_embeddings: torch.FloatTensor,
top_partial_coreference_scores: torch.FloatTensor,
top_antecedent_mask: torch.BoolTensor,
top_antecedent_offsets: torch.FloatTensor,
) -> torch.FloatTensor:
"""
Computes scores for every pair of spans. Additionally, a dummy label is included,
representing the decision that the span is not coreferent with anything. For the dummy
label, the score is always zero. For the true antecedent spans, the score consists of
the pairwise antecedent score and the unary mention scores for the span and its
antecedent. The factoring allows the model to blame many of the absent links on bad
spans, enabling the pruning strategy used in the forward pass.
# Parameters
top_span_embeddings : `torch.FloatTensor`, required.
Embedding representations of the kept spans. Has shape
(batch_size, num_spans_to_keep, embedding_size)
top_antecedent_embeddings: `torch.FloatTensor`, required.
The embeddings of antecedents for each span candidate. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size)
top_partial_coreference_scores : `torch.FloatTensor`, required.
Sum of span mention score and antecedent mention score. The coarse to fine settings
has an additional term which is the coarse bilinear score.
(batch_size, num_spans_to_keep, max_antecedents).
top_antecedent_mask : `torch.BoolTensor`, required.
The mask for valid antecedents.
(batch_size, num_spans_to_keep, max_antecedents).
top_antecedent_offsets : `torch.FloatTensor`, required.
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
(batch_size, num_spans_to_keep, max_antecedents).
# Returns
coreference_scores : `torch.FloatTensor`
A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),
representing the unormalised score for each (span, antecedent) pair
we considered.
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
span_pair_embeddings = self._compute_span_pair_embeddings(
top_span_embeddings, top_antecedent_embeddings, top_antecedent_offsets
)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
antecedent_scores = self._antecedent_scorer(
self._antecedent_feedforward(span_pair_embeddings)
).squeeze(-1)
antecedent_scores += top_partial_coreference_scores
antecedent_scores = util.replace_masked_values(
antecedent_scores, top_antecedent_mask, util.min_value_of_dtype(antecedent_scores.dtype)
)
# Shape: (batch_size, num_spans_to_keep, 1)
shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]
dummy_scores = antecedent_scores.new_zeros(*shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)
return coreference_scores
default_predictor = "coreference_resolution"
| allennlp-models-main | allennlp_models/coref/models/coref.py |
from allennlp_models.coref.models.coref import CoreferenceResolver
| allennlp-models-main | allennlp_models/coref/models/__init__.py |
from pathlib import Path
from glob import glob
import os
from typing import Dict, Tuple, Set
import pytest
from tests import FIXTURES_ROOT
from allennlp.commands.train import TrainModel
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.params import Params
from allennlp.common.plugins import import_plugins
CONFIGS_TO_IGNORE = {
# TODO (epwalsh): once the new data loading API is merged, try to get this model working.
# Requires some bi-directional LM archive path.
"constituency_parser_transformer_elmo.jsonnet",
}
FOLDERS_TO_IGNORE: Set[str] = set()
def find_configs():
for item in os.walk("training_config/"):
if os.path.basename(item[0]) in FOLDERS_TO_IGNORE:
continue
for pattern in ("*.json", "*.jsonnet"):
for path in glob(os.path.join(item[0], pattern)):
if os.path.basename(path) == "common.jsonnet":
continue
yield pytest.param(
path,
marks=pytest.mark.skipif(
any(x in path for x in CONFIGS_TO_IGNORE), reason="ignoring"
),
)
GLOVE_PATCHES = {
FIXTURES_ROOT
/ "glove.6B.100d.sample.txt.gz": (
"https://allennlp.s3.amazonaws.com/datasets/glove/glove.6B.100d.txt.gz",
),
FIXTURES_ROOT
/ "glove.6B.300d.sample.txt.gz": (
"https://allennlp.s3.amazonaws.com/datasets/glove/glove.6B.300d.txt.gz",
"https://allennlp.s3.amazonaws.com/datasets/glove/glove.840B.300d.txt.gz",
"https://allennlp.s3.amazonaws.com/datasets/glove/glove.840B.300d.lower.converted.zip",
),
}
def patch_glove(params):
for key, value in params.items():
if isinstance(value, str):
for patch, patch_targets in GLOVE_PATCHES.items():
if value in patch_targets:
params[key] = str(patch)
elif isinstance(value, Params):
patch_glove(value)
def patch_image_dir(params):
for key, value in params.items():
if key == "image_dir" and isinstance(value, str):
params[key] = FIXTURES_ROOT / "vision" / "images"
elif key == "feature_cache_dir" and isinstance(value, str):
params[key] = FIXTURES_ROOT / "vision" / "images" / "feature_cache"
elif isinstance(value, Params):
patch_image_dir(value)
def patch_dataset_reader(params):
if params["type"] == "multitask":
for reader_params in params["readers"].values():
reader_params["max_instances"] = 4
elif params["type"] == "flickr30k":
params["max_instances"] = 6
else:
params["max_instances"] = 4
# fmt: off
DATASET_PATCHES: Dict[Path, Tuple[str, ...]] = {
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012": ("SRL_TRAIN_DATA_PATH", "SRL_VALIDATION_DATA_PATH"),
FIXTURES_ROOT / "structured_prediction" / "example_ptb.trees": ("PTB_TRAIN_PATH", "PTB_DEV_PATH", "PTB_TEST_PATH"),
FIXTURES_ROOT / "structured_prediction" / "dependencies.conllu": ("PTB_DEPENDENCIES_TRAIN", "PTB_DEPENDENCIES_VAL"),
FIXTURES_ROOT / "structured_prediction" / "semantic_dependencies" / "dm.sdp": (
"SEMEVAL_TRAIN",
"SEMEVAL_DEV",
"SEMEVAL_TEST"
),
FIXTURES_ROOT / "tagging" / "conll2003.txt": ("NER_TRAIN_DATA_PATH", "NER_TEST_DATA_PATH"),
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt": ("BIDIRECTIONAL_LM_TRAIN_PATH",),
FIXTURES_ROOT / "coref" / "coref.gold_conll": (
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
),
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012" / "subdomain": (
"CONLL_TRAIN_DATA_PATH",
"CONLL_DEV_DATA_PATH"
),
FIXTURES_ROOT / "tagging" / "conll2003.txt": (
"NER_TRAIN_DATA_PATH",
"NER_TEST_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
),
FIXTURES_ROOT / "lm" / "bidirectional_language_model" / "vocab": ("BIDIRECTIONAL_LM_VOCAB_PATH",),
FIXTURES_ROOT / "lm" / "bidirectional_language_model" / "training_data" / "*": ("BIDIRECTIONAL_LM_TRAIN_PATH",),
}
# fmt: on
@pytest.mark.pretrained_config_test
class TestAllenNlpPretrainedModelConfigs(AllenNlpTestCase):
@classmethod
def setup_class(cls):
# Make sure all the classes we need are registered.
import_plugins()
# Patch dataset paths.
for dataset_patch, patch_targets in DATASET_PATCHES.items():
for patch_target in patch_targets:
os.environ[patch_target] = str(dataset_patch)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
@pytest.mark.parametrize("path", find_configs())
def test_pretrained_configs(self, path):
params = Params.from_file(
path,
params_overrides="{"
"'trainer.cuda_device': -1, "
"'trainer.use_amp': false, "
"'trainer.num_epochs': 2, "
"}",
)
# Patch max_instances in the multitask case
patch_dataset_reader(params["dataset_reader"])
if "validation_dataset_reader" in params:
# Unclear why this doesn't work for biattentive_classification_network
if "biattentive_classification_network" not in path:
patch_dataset_reader(params["validation_dataset_reader"])
# Patch any pretrained glove files with smaller fixtures.
patch_glove(params)
# Patch image_dir and feature_cache_dir keys so they point at our test fixtures instead.
patch_image_dir(params)
# Remove unnecessary keys.
for key in ("random_seed", "numpy_seed", "pytorch_seed", "distributed"):
if key in params:
del params[key]
# Just make sure the train loop can be instantiated.
TrainModel.from_params(params=params, serialization_dir=self.TEST_DIR, local_rank=0)
| allennlp-models-main | tests/training_config_test.py |
import pathlib
PROJECT_ROOT = (pathlib.Path(__file__).parent / "..").resolve() # pylint: disable=no-member
TESTS_ROOT = PROJECT_ROOT / "tests"
FIXTURES_ROOT = PROJECT_ROOT / "test_fixtures"
| allennlp-models-main | tests/__init__.py |
from allennlp_models import version
class TestVersion:
def test_version_exists(self):
assert version.VERSION.startswith(version.VERSION_SHORT)
| allennlp-models-main | tests/version_test.py |
from typing import Dict, List
import pytest
import spacy
from allennlp.common.testing import AllenNlpTestCase
from allennlp_models.pretrained import get_pretrained_models, get_tasks, load_predictor
# But default we don't run these tests
@pytest.mark.pretrained_model_test
class TestAllenNlpPretrainedModels(AllenNlpTestCase):
def test_machine_comprehension(self):
predictor = load_predictor("rc-bidaf")
passage = """The Matrix is a 1999 science fiction action film written and directed by The Wachowskis, starring Keanu Reeves, Laurence Fishburne, Carrie-Anne Moss, Hugo Weaving, and Joe Pantoliano. It depicts a dystopian future in which reality as perceived by most humans is actually a simulated reality called "the Matrix", created by sentient machines to subdue the human population, while their bodies' heat and electrical activity are used as an energy source. Computer programmer Neo" learns this truth and is drawn into a rebellion against the machines, which involves other people who have been freed from the "dream world". """
question = "Who stars in The Matrix?"
result = predictor.predict_json({"passage": passage, "question": question})
correct = (
"Keanu Reeves, Laurence Fishburne, Carrie-Anne Moss, Hugo Weaving, and Joe Pantoliano"
)
assert correct == result["best_span_str"]
def test_semantic_role_labeling(self):
predictor = load_predictor("structured-prediction-srl-bert")
sentence = "If you liked the music we were playing last night, you will absolutely love what we're playing tomorrow!"
result = predictor.predict_json({"sentence": sentence})
assert result["words"] == [
"If",
"you",
"liked",
"the",
"music",
"we",
"were",
"playing",
"last",
"night",
",",
"you",
"will",
"absolutely",
"love",
"what",
"we",
"'re",
"playing",
"tomorrow",
"!",
]
assert result["verbs"] == [
{
"verb": "liked",
"description": "If [ARG0: you] [V: liked] [ARG1: the music we were playing last night] , you will absolutely love what we 're playing tomorrow !",
"tags": [
"O",
"B-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
],
},
{
"verb": "were",
"description": "If you liked the music we [V: were] playing last night , you will absolutely love what we 're playing tomorrow !",
"tags": [
"O",
"O",
"O",
"O",
"O",
"O",
"B-V",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
],
},
{
"verb": "playing",
"description": "If you liked [ARG1: the music] [ARG0: we] were [V: playing] [ARGM-TMP: last night] , you will absolutely love what we 're playing tomorrow !",
"tags": [
"O",
"O",
"O",
"B-ARG1",
"I-ARG1",
"B-ARG0",
"O",
"B-V",
"B-ARGM-TMP",
"I-ARGM-TMP",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
],
},
{
"verb": "will",
"description": "If you liked the music we were playing last night , you [V: will] absolutely love what we 're playing tomorrow !",
"tags": [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"B-V",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
],
},
{
"verb": "love",
"description": "[ARGM-ADV: If you liked the music we were playing last night] , [ARG0: you] [ARGM-MOD: will] [ARGM-ADV: absolutely] [V: love] [ARG1: what we 're playing tomorrow] !",
"tags": [
"B-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"O",
"B-ARG0",
"B-ARGM-MOD",
"B-ARGM-ADV",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
],
},
{
"verb": "'re",
"description": "If you liked the music we were playing last night , you will absolutely love what we [V: 're] playing tomorrow !",
"tags": [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"B-V",
"O",
"O",
"O",
],
},
{
"verb": "playing",
"description": "If you liked the music we were playing last night , you will absolutely love [ARG1: what] [ARG0: we] 're [V: playing] [ARGM-TMP: tomorrow] !",
"tags": [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"B-ARG1",
"B-ARG0",
"O",
"B-V",
"B-ARGM-TMP",
"O",
],
},
]
def test_textual_entailment(self):
predictor = load_predictor("pair-classification-decomposable-attention-elmo")
result = predictor.predict_json(
{
"premise": "An interplanetary spacecraft is in orbit around a gas giant's icy moon.",
"hypothesis": "The spacecraft has the ability to travel between planets.",
}
)
assert result["label_probs"][0] > 0.7 # entailment
result = predictor.predict_json(
{
"premise": "Two women are wandering along the shore drinking iced tea.",
"hypothesis": "Two women are sitting on a blanket near some rocks talking about politics.",
}
)
assert result["label_probs"][1] > 0.8 # contradiction
result = predictor.predict_json(
{
"premise": "A large, gray elephant walked beside a herd of zebras.",
"hypothesis": "The elephant was lost.",
}
)
assert result["label_probs"][2] > 0.6 # neutral
def test_coreference_resolution(self):
predictor = load_predictor("coref-spanbert")
document = "We 're not going to skimp on quality , but we are very focused to make next year . The only problem is that some of the fabrics are wearing out - since I was a newbie I skimped on some of the fabric and the poor quality ones are developing holes ."
result = predictor.predict_json({"document": document})
print(result)
assert result["clusters"] == [
[[0, 0], [10, 10]],
[[33, 33], [37, 37]],
# [[26, 27], [42, 43]], # Unfortunately the model misses this one.
]
assert result["document"] == [
"We",
"'re",
"not",
"going",
"to",
"skimp",
"on",
"quality",
",",
"but",
"we", # 10
"are",
"very",
"focused",
"to",
"make",
"next",
"year",
".",
"The",
"only", # 20
"problem",
"is",
"that",
"some",
"of",
"the",
"fabrics",
"are",
"wearing",
"out", # 30
"-",
"since",
"I",
"was",
"a",
"newbie",
"I",
"skimped",
"on",
"some", # 40
"of",
"the",
"fabric",
"and",
"the",
"poor",
"quality",
"ones",
"are",
"developing", # 50
"holes",
".",
]
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_ner(self):
predictor = load_predictor("tagging-elmo-crf-tagger")
sentence = """Michael Jordan is a professor at Berkeley."""
result = predictor.predict_json({"sentence": sentence})
assert result["words"] == [
"Michael",
"Jordan",
"is",
"a",
"professor",
"at",
"Berkeley",
".",
]
assert result["tags"] == ["B-PER", "L-PER", "O", "O", "O", "O", "U-LOC", "O"]
@pytest.mark.skipif(
not ("2.1" <= spacy.__version__ < "2.3"),
reason="this model changed before and after 2.1 and 2.2",
)
def test_constituency_parsing(self):
predictor = load_predictor("structured-prediction-constituency-parser")
sentence = """Pierre Vinken died aged 81; immortalised aged 61."""
result = predictor.predict_json({"sentence": sentence})
assert result["tokens"] == [
"Pierre",
"Vinken",
"died",
"aged",
"81",
";",
"immortalised",
"aged",
"61",
".",
]
assert (
result["trees"]
== "(S (NP (NNP Pierre) (NNP Vinken)) (VP (VP (VBD died) (NP (JJ aged) (CD 81))) (, ;) (VP (VBN immortalised) (S (ADJP (JJ aged) (CD 61))))) (. .))"
)
def test_dependency_parsing(self):
predictor = load_predictor("structured-prediction-biaffine-parser")
sentence = """He ate spaghetti with chopsticks."""
result = predictor.predict_json({"sentence": sentence})
# Note that this tree is incorrect. We are checking here that the decoded
# tree is _actually a tree_ - in greedy decoding versions of the dependency
# parser, this sentence has multiple heads. This test shouldn't really live here,
# but it's very difficult to re-create a concrete example of this behaviour without
# a trained dependency parser.
assert result["words"] == ["He", "ate", "spaghetti", "with", "chopsticks", "."]
assert result["pos"] == ["PRON", "VERB", "NOUN", "ADP", "NOUN", "PUNCT"]
assert result["predicted_dependencies"] == [
"nsubj",
"root",
"dobj",
"prep",
"pobj",
"punct",
]
assert result["predicted_heads"] == [2, 0, 2, 2, 4, 2]
def test_sentiment_analysis(self):
predictor = load_predictor("roberta-sst")
result = predictor.predict_json({"sentence": "This is a positive review."})
assert result["label"] == "1"
def test_openie(self):
predictor = load_predictor("structured-prediction-srl")
result = predictor.predict_json(
{"sentence": "I'm against picketing, but I don't know how to show it."}
)
assert "verbs" in result
assert "words" in result
@pytest.mark.parametrize(
"get_model_arg",
["tagging-fine-grained-crf-tagger", "tagging-fine-grained-transformer-crf-tagger"],
)
def test_fine_grained_ner(self, get_model_arg):
predictor = load_predictor(get_model_arg)
text = """Dwayne Haskins passed for 251 yards and three touchdowns, and Urban Meyer finished his coaching career at Ohio State with a 28-23 victory after the Buckeyes held off Washingtonβs thrilling fourth-quarter comeback in the 105th Rose Bowl on Tuesday. Parris Campbell, Johnnie Dixon and Rashod Berry caught TD passes in the first half for the fifth-ranked Buckeyes (13-1), who took a 25-point lead into the fourth. But Myles Gaskin threw a touchdown pass and rushed for two more scores for the No. 9 Huskies (10-4), scoring from 2 yards out with 42 seconds left. The Buckeyes intercepted Jake Browningβs pass on the 2-point conversion attempt and then recovered the Huskiesβ onside kick to wrap up the final game of Meyerβs seven-year tenure. βIβm a very blessed man,β Meyer said. βIβm blessed because of my family, [but] this team, this year, I love this group as much as any Iβve ever had.β"""
result = predictor.predict_json({"sentence": text})
# Just assert that we predicted something better than all-O.
assert len(frozenset(result["tags"])) > 1
def test_transformer_qa(self):
predictor = load_predictor("rc-transformer-qa")
passage = (
"The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were "
"the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. "
'They were descended from Norse ("Norman" comes from "Norseman") raiders and pirates from Denmark, '
"Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. "
"Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, "
"their descendants would gradually merge with the Carolingian-based cultures of West Francia. "
"The distinct cultural and ethnic identity of the Normans emerged initially in the first half "
"of the 10th century, and it continued to evolve over the succeeding centuries."
)
question = "In what country is Normandy located?"
result = predictor.predict(question, passage)
assert result["best_span_str"] == "France"
question = "Who gave their name to Normandy in the 1000's and 1100's"
result = predictor.predict(question, passage)
assert result["best_span_str"] == ""
def test_vilbert_vqa(self):
predictor = load_predictor("vqa-vilbert")
result = predictor.predict(
question="What game are they playing?",
image="https://storage.googleapis.com/allennlp-public-data/vqav2/baseball.jpg",
)
max_answer = max((prob, answer) for answer, prob in result["tokens"].items())[1]
assert max_answer == "baseball"
@pytest.mark.parametrize("model_id, model_card", get_pretrained_models().items())
def test_pretrained_models(self, model_id, model_card):
assert model_card.model_usage.archive_file is not None
models_without_training_configs = {"rc-nmn", "lm-next-token-lm-gpt2", "evaluate_rc-lerc"}
if model_id not in models_without_training_configs:
assert model_card.model_usage.training_config is not None
assert model_card.display_name is not None
assert model_card.model_details.description is not None
assert model_card.model_details.short_description is not None
if model_id not in ["rc-nmn"]:
assert model_card.evaluation_data.dataset is not None
assert model_card.training_data.dataset is not None
assert model_card.metrics.model_performance_measures is not None
if not model_id.startswith("lm") and not model_id.startswith("generation"):
assert (
model_card.evaluation_data.dataset.processed_url is not None
or model_card.evaluation_data.dataset.notes is not None
)
@pytest.mark.parametrize("task_id, task_card", get_tasks().items())
def test_tasks(self, task_id, task_card):
if task_card.examples is not None:
assert isinstance(task_card.examples, List) or isinstance(task_card.examples, Dict)
| allennlp-models-main | tests/pretrained_test.py |
allennlp-models-main | tests/classification/__init__.py |
|
allennlp-models-main | tests/classification/dataset_readers/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.