python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import copy
import json
from os import PathLike
import random
from typing import Any, Dict, Iterable, Set, Union
import torch
import numpy
from numpy.testing import assert_allclose
from allennlp.commands.train import train_model_from_file
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.data import DatasetReader, Vocabulary
from allennlp.data import DataLoader
from allennlp.data.batch import Batch
from allennlp.models import load_archive, Model
from allennlp.training import GradientDescentTrainer
class ModelTestCase(AllenNlpTestCase):
"""
A subclass of [`AllenNlpTestCase`](./test_case.md)
with added methods for testing [`Model`](../../models/model.md) subclasses.
"""
def set_up_model(
self,
param_file: PathLike,
dataset_file: PathLike,
serialization_dir: PathLike = None,
seed: int = None,
):
if seed is not None:
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
self.param_file = str(param_file)
params = Params.from_file(self.param_file)
reader = DatasetReader.from_params(
params["dataset_reader"], serialization_dir=serialization_dir
)
# The dataset reader might be lazy, but a lazy list here breaks some of our tests.
instances = reader.read(str(dataset_file))
# Use parameters for vocabulary if they are present in the config file, so that choices like
# "non_padded_namespaces", "min_count" etc. can be set if needed.
if "vocabulary" in params:
vocab_params = params["vocabulary"]
vocab = Vocabulary.from_params(params=vocab_params, instances=instances)
else:
vocab = Vocabulary.from_instances(instances)
self.vocab = vocab
self.instances = instances
self.instances.index_with(vocab)
self.model = Model.from_params(
vocab=self.vocab, params=params["model"], serialization_dir=serialization_dir
)
# TODO(joelgrus) get rid of these
# (a lot of the model tests use them, so they'll have to be changed)
self.dataset = Batch(list(self.instances))
self.dataset.index_instances(self.vocab)
def ensure_model_can_train_save_and_load(
self,
param_file: Union[PathLike, str],
tolerance: float = 1e-4,
cuda_device: int = -1,
gradients_to_ignore: Set[str] = None,
overrides: str = "",
metric_to_check: str = None,
metric_terminal_value: float = None,
metric_tolerance: float = 1e-4,
disable_dropout: bool = True,
):
"""
# Parameters
param_file : `str`
Path to a training configuration file that we will use to train the model for this
test.
tolerance : `float`, optional (default=`1e-4`)
When comparing model predictions between the originally-trained model and the model
after saving and loading, we will use this tolerance value (passed as `rtol` to
`numpy.testing.assert_allclose`).
cuda_device : `int`, optional (default=`-1`)
The device to run the test on.
gradients_to_ignore : `Set[str]`, optional (default=`None`)
This test runs a gradient check to make sure that we're actually computing gradients
for all of the parameters in the model. If you really want to ignore certain
parameters when doing that check, you can pass their names here. This is not
recommended unless you're `really` sure you don't need to have non-zero gradients for
those parameters (e.g., some of the beam search / state machine models have
infrequently-used parameters that are hard to force the model to use in a small test).
overrides : `str`, optional (default = `""`)
A JSON string that we will use to override values in the input parameter file.
metric_to_check: `str`, optional (default = `None`)
We may want to automatically perform a check that model reaches given metric when
training (on validation set, if it is specified). It may be useful in CI, for example.
You can pass any metric that is in your model returned metrics.
metric_terminal_value: `str`, optional (default = `None`)
When you set `metric_to_check`, you need to set the value this metric must converge to
metric_tolerance: `float`, optional (default=`1e-4`)
Tolerance to check you model metric against metric terminal value. One can expect some
variance in model metrics when the training process is highly stochastic.
disable_dropout : `bool`, optional (default = `True`)
If True we will set all dropout to 0 before checking gradients. (Otherwise, with small
datasets, you may get zero gradients because of unlucky dropout.)
"""
save_dir = self.TEST_DIR / "save_and_load_test"
archive_file = save_dir / "model.tar.gz"
model = train_model_from_file(param_file, save_dir, overrides=overrides)
assert model is not None
metrics_file = save_dir / "metrics.json"
if metric_to_check is not None:
metrics = json.loads(metrics_file.read_text())
metric_value = metrics.get(f"best_validation_{metric_to_check}") or metrics.get(
f"training_{metric_to_check}"
)
assert metric_value is not None, f"Cannot find {metric_to_check} in metrics.json file"
assert metric_terminal_value is not None, "Please specify metric terminal value"
assert abs(metric_value - metric_terminal_value) < metric_tolerance
archive = load_archive(archive_file, cuda_device=cuda_device)
loaded_model = archive.model
state_keys = model.state_dict().keys()
loaded_state_keys = loaded_model.state_dict().keys()
assert state_keys == loaded_state_keys
# First we make sure that the state dict (the parameters) are the same for both models.
for key in state_keys:
assert_allclose(
model.state_dict()[key].cpu().numpy(),
loaded_model.state_dict()[key].cpu().numpy(),
err_msg=key,
)
reader = archive.dataset_reader
params = Params.from_file(param_file, params_overrides=overrides)
print("Reading with original model")
model_dataset = reader.read(params["validation_data_path"])
model_dataset.index_with(model.vocab)
print("Reading with loaded model")
loaded_dataset = reader.read(params["validation_data_path"])
loaded_dataset.index_with(loaded_model.vocab)
# Need to duplicate params because DataLoader.from_params will consume.
data_loader_params = params["data_loader"]
data_loader_params["shuffle"] = False
data_loader_params2 = Params(copy.deepcopy(data_loader_params.as_dict()))
data_loader = DataLoader.from_params(dataset=model_dataset, params=data_loader_params)
data_loader2 = DataLoader.from_params(dataset=loaded_dataset, params=data_loader_params2)
# We'll check that even if we index the dataset with each model separately, we still get
# the same result out.
model_batch = next(iter(data_loader))
loaded_batch = next(iter(data_loader2))
# Check gradients are None for non-trainable parameters and check that
# trainable parameters receive some gradient if they are trainable.
self.check_model_computes_gradients_correctly(
model, model_batch, gradients_to_ignore, disable_dropout
)
# The datasets themselves should be identical.
assert model_batch.keys() == loaded_batch.keys()
for key in model_batch.keys():
self.assert_fields_equal(model_batch[key], loaded_batch[key], key, 1e-6)
# Set eval mode, to turn off things like dropout, then get predictions.
model.eval()
loaded_model.eval()
# Models with stateful RNNs need their states reset to have consistent
# behavior after loading.
for model_ in [model, loaded_model]:
for module in model_.modules():
if hasattr(module, "stateful") and module.stateful:
module.reset_states()
print("Predicting with original model")
model_predictions = model(**model_batch)
print("Predicting with loaded model")
loaded_model_predictions = loaded_model(**loaded_batch)
# Both outputs should have the same keys and the values for these keys should be close.
for key in model_predictions.keys():
self.assert_fields_equal(
model_predictions[key], loaded_model_predictions[key], name=key, tolerance=tolerance
)
# Check loaded model's loss exists and we can compute gradients, for continuing training.
loaded_model.train()
loaded_model_predictions = loaded_model(**loaded_batch)
loaded_model_loss = loaded_model_predictions["loss"]
assert loaded_model_loss is not None
loaded_model_loss.backward()
return model, loaded_model
def ensure_model_can_train(
self,
trainer: GradientDescentTrainer,
gradients_to_ignore: Set[str] = None,
metric_to_check: str = None,
metric_terminal_value: float = None,
metric_tolerance: float = 1e-4,
disable_dropout: bool = True,
):
"""
A simple test for model training behavior when you are not using configuration files. In
this case, we don't have a story around saving and loading models (you need to handle that
yourself), so we don't have tests for that. We just test that the model can train, and that
it computes gradients for all parameters.
Because the `Trainer` already has a reference to a model and to a data loader, we just take
the `Trainer` object itself, and grab the `Model` and other necessary objects from there.
# Parameters
trainer: `GradientDescentTrainer`
The `Trainer` to use for the test, which already has references to a `Model` and a
`DataLoader`, which we will use in the test.
gradients_to_ignore : `Set[str]`, optional (default=`None`)
This test runs a gradient check to make sure that we're actually computing gradients
for all of the parameters in the model. If you really want to ignore certain
parameters when doing that check, you can pass their names here. This is not
recommended unless you're `really` sure you don't need to have non-zero gradients for
those parameters (e.g., some of the beam search / state machine models have
infrequently-used parameters that are hard to force the model to use in a small test).
metric_to_check: `str`, optional (default = `None`)
We may want to automatically perform a check that model reaches given metric when
training (on validation set, if it is specified). It may be useful in CI, for example.
You can pass any metric that is in your model returned metrics.
metric_terminal_value: `str`, optional (default = `None`)
When you set `metric_to_check`, you need to set the value this metric must converge to
metric_tolerance: `float`, optional (default=`1e-4`)
Tolerance to check you model metric against metric terminal value. One can expect some
variance in model metrics when the training process is highly stochastic.
disable_dropout : `bool`, optional (default = `True`)
If True we will set all dropout to 0 before checking gradients. (Otherwise, with small
datasets, you may get zero gradients because of unlucky dropout.)
"""
metrics = trainer.train()
if metric_to_check is not None:
metric_value = metrics.get(f"best_validation_{metric_to_check}") or metrics.get(
f"training_{metric_to_check}"
)
assert metric_value is not None, f"Cannot find {metric_to_check} in metrics.json file"
assert metric_terminal_value is not None, "Please specify metric terminal value"
assert abs(metric_value - metric_terminal_value) < metric_tolerance
model_batch = next(iter(trainer.data_loader))
# Check gradients are None for non-trainable parameters and check that
# trainable parameters receive some gradient if they are trainable.
self.check_model_computes_gradients_correctly(
trainer.model, model_batch, gradients_to_ignore, disable_dropout
)
def assert_fields_equal(self, field1, field2, name: str, tolerance: float = 1e-6) -> None:
if isinstance(field1, torch.Tensor):
assert_allclose(
field1.detach().cpu().numpy(),
field2.detach().cpu().numpy(),
rtol=tolerance,
err_msg=name,
)
elif isinstance(field1, dict):
assert field1.keys() == field2.keys()
for key in field1:
self.assert_fields_equal(
field1[key], field2[key], tolerance=tolerance, name=name + "." + str(key)
)
elif isinstance(field1, (list, tuple)):
assert len(field1) == len(field2)
for i, (subfield1, subfield2) in enumerate(zip(field1, field2)):
self.assert_fields_equal(
subfield1, subfield2, tolerance=tolerance, name=name + f"[{i}]"
)
elif isinstance(field1, (float, int)):
assert_allclose([field1], [field2], rtol=tolerance, err_msg=name)
else:
if field1 != field2:
for key in field1.__dict__:
print(key, getattr(field1, key) == getattr(field2, key))
assert field1 == field2, f"{name}, {type(field1)}, {type(field2)}"
@staticmethod
def check_model_computes_gradients_correctly(
model: Model,
model_batch: Dict[str, Union[Any, Dict[str, Any]]],
params_to_ignore: Set[str] = None,
disable_dropout: bool = True,
):
print("Checking gradients")
for p in model.parameters():
p.grad = None
model.train()
original_dropouts: Dict[str, float] = {}
if disable_dropout:
# Remember original dropouts so we can restore them.
for name, module in model.named_modules():
if isinstance(module, torch.nn.Dropout):
original_dropouts[name] = getattr(module, "p")
setattr(module, "p", 0)
result = model(**model_batch)
result["loss"].backward()
has_zero_or_none_grads = {}
for name, parameter in model.named_parameters():
zeros = torch.zeros(parameter.size())
if params_to_ignore and name in params_to_ignore:
continue
if parameter.requires_grad:
if parameter.grad is None:
has_zero_or_none_grads[
name
] = "No gradient computed (i.e parameter.grad is None)"
elif parameter.grad.is_sparse or parameter.grad.data.is_sparse:
pass
# Some parameters will only be partially updated,
# like embeddings, so we just check that any gradient is non-zero.
elif (parameter.grad.cpu() == zeros).all():
has_zero_or_none_grads[
name
] = f"zeros with shape ({tuple(parameter.grad.size())})"
else:
assert parameter.grad is None
if has_zero_or_none_grads:
for name, grad in has_zero_or_none_grads.items():
print(f"Parameter: {name} had incorrect gradient: {grad}")
raise Exception("Incorrect gradients found. See stdout for more info.")
# Now restore dropouts if we disabled them.
if disable_dropout:
for name, module in model.named_modules():
if name in original_dropouts:
setattr(module, "p", original_dropouts[name])
def ensure_batch_predictions_are_consistent(self, keys_to_ignore: Iterable[str] = ()):
"""
Ensures that the model performs the same on a batch of instances as on individual instances.
Ignores metrics matching the regexp .*loss.* and those specified explicitly.
# Parameters
keys_to_ignore : `Iterable[str]`, optional (default=`()`)
Names of metrics that should not be taken into account, e.g. "batch_weight".
"""
self.model.eval()
single_predictions = []
for i, instance in enumerate(self.instances):
dataset = Batch([instance])
tensors = dataset.as_tensor_dict(dataset.get_padding_lengths())
result = self.model(**tensors)
single_predictions.append(result)
full_dataset = Batch(self.instances)
batch_tensors = full_dataset.as_tensor_dict(full_dataset.get_padding_lengths())
batch_predictions = self.model(**batch_tensors)
for i, instance_predictions in enumerate(single_predictions):
for key, single_predicted in instance_predictions.items():
tolerance = 1e-6
if "loss" in key:
# Loss is particularly unstable; we'll just be satisfied if everything else is
# close.
continue
if key in keys_to_ignore:
continue
single_predicted = single_predicted[0]
batch_predicted = batch_predictions[key][i]
if isinstance(single_predicted, torch.Tensor):
if single_predicted.size() != batch_predicted.size():
slices = tuple(slice(0, size) for size in single_predicted.size())
batch_predicted = batch_predicted[slices]
assert_allclose(
single_predicted.data.numpy(),
batch_predicted.data.numpy(),
atol=tolerance,
err_msg=key,
)
else:
assert single_predicted == batch_predicted, key
| allennlp-master | allennlp/common/testing/model_test_case.py |
"""
Utilities and helpers for writing tests.
"""
from typing import Dict, Any, Optional, Union, Tuple, List
import torch
from torch.testing import assert_allclose
import pytest
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.common.testing.model_test_case import ModelTestCase
from allennlp.common.testing.distributed_test import run_distributed_test
from allennlp.training.metrics import Metric
_available_devices = ["cpu"] + (["cuda"] if torch.cuda.is_available() else [])
def multi_device(test_method):
"""
Decorator that provides an argument `device` of type `str` for each available PyTorch device.
"""
return pytest.mark.parametrize("device", _available_devices)(pytest.mark.gpu(test_method))
def requires_gpu(test_method):
"""
Decorator to indicate that a test requires a GPU device.
"""
return pytest.mark.gpu(
pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.")(
test_method
)
)
def requires_multi_gpu(test_method):
"""
Decorator to indicate that a test requires multiple GPU devices.
"""
return pytest.mark.gpu(
pytest.mark.skipif(torch.cuda.device_count() < 2, reason="2 or more GPUs required.")(
test_method
)
)
def cpu_or_gpu(test_method):
"""
Decorator to indicate that a test should run on both CPU and GPU
"""
return pytest.mark.gpu(test_method)
# Helpers for testing distributed metrics
def assert_metrics_values(
metrics: Dict[str, Any],
desired_values: Dict[str, Any],
rtol: float = 0.0001,
atol: float = 1e-05,
):
for key in metrics:
assert_allclose(metrics[key], desired_values[key], rtol=rtol, atol=atol)
def global_distributed_metric(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: Metric,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
metric(**kwargs)
metrics = metric.get_metric(False)
if not isinstance(metrics, Dict) and not isinstance(desired_values, Dict):
metrics = {"metric_value": metrics}
desired_values = {"metric_value": desired_values}
# Call `assertion_metrics_values` to check if the metrics have the desired values.
if isinstance(exact, bool):
if exact:
rtol = 0.0
atol = 0.0
else:
rtol = 0.0001
atol = 1e-05
else:
rtol = exact[0]
atol = exact[1]
assert_metrics_values(metrics, desired_values, rtol, atol) # type: ignore
| allennlp-master | allennlp/common/testing/__init__.py |
import logging
import os
import pathlib
import shutil
import tempfile
from allennlp.common.checks import log_pytorch_version_info
TEST_DIR = tempfile.mkdtemp(prefix="allennlp_tests")
class AllenNlpTestCase:
"""
A custom testing class that disables some of the more verbose AllenNLP
logging and that creates and destroys a temp directory as a test fixture.
"""
PROJECT_ROOT = (pathlib.Path(__file__).parent / ".." / ".." / "..").resolve()
MODULE_ROOT = PROJECT_ROOT / "allennlp"
TOOLS_ROOT = MODULE_ROOT / "tools"
TESTS_ROOT = PROJECT_ROOT / "tests"
FIXTURES_ROOT = PROJECT_ROOT / "test_fixtures"
def setup_method(self):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.DEBUG
)
# Disabling some of the more verbose logging statements that typically aren't very helpful
# in tests.
logging.getLogger("allennlp.common.params").disabled = True
logging.getLogger("allennlp.nn.initializers").disabled = True
logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)
logging.getLogger("urllib3.connectionpool").disabled = True
log_pytorch_version_info()
self.TEST_DIR = pathlib.Path(TEST_DIR)
os.makedirs(self.TEST_DIR, exist_ok=True)
def teardown_method(self):
shutil.rmtree(self.TEST_DIR)
| allennlp-master | allennlp/common/testing/test_case.py |
from allennlp.predictors import TextClassifierPredictor
from allennlp.models.model import Model
import torch
class FakeModelForTestingInterpret(Model):
def __init__(self, vocab, max_tokens=7, num_labels=2):
super().__init__(vocab)
self._max_tokens = max_tokens
self.embedder = torch.nn.Embedding(vocab.get_vocab_size(), 16)
self.linear = torch.nn.Linear(max_tokens * 16, num_labels)
self._loss = torch.nn.CrossEntropyLoss()
def forward(self, tokens, label=None):
tokens = tokens["tokens"]["tokens"][:, 0 : self._max_tokens]
embedded = self.embedder(tokens)
logits = self.linear(torch.flatten(embedded).unsqueeze(0))
probs = torch.nn.functional.softmax(logits, dim=-1)
output_dict = {"logits": logits, "probs": probs}
if label is not None:
output_dict["loss"] = self._loss(logits, label.long().view(-1))
return output_dict
def make_output_human_readable(self, output_dict):
preds = output_dict["probs"]
if len(preds.shape) == 1:
output_dict["probs"] = preds.unsqueeze(0)
output_dict["logits"] = output_dict["logits"].unsqueeze(0)
classes = []
for prediction in output_dict["probs"]:
label_idx = prediction.argmax(dim=-1).item()
output_dict["loss"] = self._loss(output_dict["logits"], torch.LongTensor([label_idx]))
label_str = str(label_idx)
classes.append(label_str)
output_dict["label"] = classes
return output_dict
class FakePredictorForTestingInterpret(TextClassifierPredictor):
def get_interpretable_layer(self):
return self._model.embedder
def get_interpretable_text_field_embedder(self):
return self._model.embedder
| allennlp-master | allennlp/common/testing/interpret_test.py |
import datetime
from typing import List, Dict, Any, Tuple, Callable
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from allennlp.common.checks import check_for_gpu
def init_process(
process_rank: int,
world_size: int,
distributed_device_ids: List[int],
func: Callable,
func_args: Tuple = None,
func_kwargs: Dict[str, Any] = None,
master_addr: str = "127.0.0.1",
master_port: int = 29500,
):
assert world_size > 1
global_rank = process_rank
gpu_id = distributed_device_ids[process_rank] # type: ignore
if gpu_id >= 0:
torch.cuda.set_device(int(gpu_id))
dist.init_process_group(
backend="nccl",
init_method=f"tcp://{master_addr}:{master_port}",
world_size=world_size,
rank=global_rank,
)
else:
dist.init_process_group(
backend="gloo",
init_method=f"tcp://{master_addr}:{master_port}",
world_size=world_size,
rank=global_rank,
timeout=datetime.timedelta(seconds=120),
)
func(global_rank, world_size, gpu_id, *(func_args or []), **(func_kwargs or {}))
dist.barrier()
def run_distributed_test(
device_ids: List[int] = None,
func: Callable = None,
*args,
**kwargs,
):
"""
This runs the `func` in a simulated distributed environment.
# Parameters
device_ids: `List[int]`
List of devices. There need to be at least 2 devices. Default is [-1, -1].
func: `Callable`
`func` needs to be global for spawning the processes, so that it can be pickled.
"""
device_ids = device_ids or [-1, -1]
check_for_gpu(device_ids)
# "fork" start method is the default and should be preferred, except when we're
# running the tests on GPU, in which case we need to use "spawn".
start_method = "spawn" if any(x >= 0 for x in device_ids) else "fork"
nprocs = world_size = len(device_ids)
mp.start_processes(
init_process,
args=(world_size, device_ids, func, args, kwargs),
nprocs=nprocs,
start_method=start_method,
)
| allennlp-master | allennlp/common/testing/distributed_test.py |
from allennlp.interpret.attackers.attacker import Attacker
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
| allennlp-master | allennlp/interpret/__init__.py |
import math
from typing import List, Dict, Any
import numpy
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.nn import util
@SaliencyInterpreter.register("integrated-gradient")
class IntegratedGradient(SaliencyInterpreter):
"""
Interprets the prediction using Integrated Gradients (https://arxiv.org/abs/1703.01365)
Registered as a `SaliencyInterpreter` with name "integrated-gradient".
"""
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
# Convert inputs to labeled instances
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Run integrated gradients
grads = self._integrate_gradients(instance)
# Normalize results
for key, grad in grads.items():
# The [0] here is undo-ing the batching that happens in get_gradients.
embedding_grad = numpy.sum(grad[0], axis=1)
norm = numpy.linalg.norm(embedding_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
def _register_hooks(self, alpha: int, embeddings_list: List, token_offsets: List):
"""
Register a forward hook on the embedding layer which scales the embeddings by alpha. Used
for one term in the Integrated Gradients sum.
We store the embedding output into the embeddings_list when alpha is zero. This is used
later to element-wise multiply the input by the averaged gradients.
"""
def forward_hook(module, inputs, output):
# Save the input for later use. Only do so on first call.
if alpha == 0:
embeddings_list.append(output.squeeze(0).clone().detach())
# Scale the embedding by alpha
output.mul_(alpha)
def get_token_offsets(module, inputs, outputs):
offsets = util.get_token_offsets_from_text_field_inputs(inputs)
if offsets is not None:
token_offsets.append(offsets)
# Register the hooks
handles = []
embedding_layer = self.predictor.get_interpretable_layer()
handles.append(embedding_layer.register_forward_hook(forward_hook))
text_field_embedder = self.predictor.get_interpretable_text_field_embedder()
handles.append(text_field_embedder.register_forward_hook(get_token_offsets))
return handles
def _integrate_gradients(self, instance: Instance) -> Dict[str, numpy.ndarray]:
"""
Returns integrated gradients for the given [`Instance`](../../data/instance.md)
"""
ig_grads: Dict[str, Any] = {}
# List of Embedding inputs
embeddings_list: List[torch.Tensor] = []
token_offsets: List[torch.Tensor] = []
# Use 10 terms in the summation approximation of the integral in integrated grad
steps = 10
# Exclude the endpoint because we do a left point integral approximation
for alpha in numpy.linspace(0, 1.0, num=steps, endpoint=False):
handles = []
# Hook for modifying embedding value
handles = self._register_hooks(alpha, embeddings_list, token_offsets)
try:
grads = self.predictor.get_gradients([instance])[0]
finally:
for handle in handles:
handle.remove()
# Running sum of gradients
if ig_grads == {}:
ig_grads = grads
else:
for key in grads.keys():
ig_grads[key] += grads[key]
# Average of each gradient term
for key in ig_grads.keys():
ig_grads[key] /= steps
# Gradients come back in the reverse order that they were sent into the network
embeddings_list.reverse()
token_offsets.reverse()
embeddings_list = self._aggregate_token_embeddings(embeddings_list, token_offsets)
# Element-wise multiply average gradient by the input
for idx, input_embedding in enumerate(embeddings_list):
key = "grad_input_" + str(idx + 1)
ig_grads[key] *= input_embedding
return ig_grads
| allennlp-master | allennlp/interpret/saliency_interpreters/integrated_gradient.py |
from typing import List
import numpy
import torch
from allennlp.common import Registrable
from allennlp.common.util import JsonDict
from allennlp.nn import util
from allennlp.predictors import Predictor
class SaliencyInterpreter(Registrable):
"""
A `SaliencyInterpreter` interprets an AllenNLP Predictor's outputs by assigning a saliency
score to each input token.
"""
def __init__(self, predictor: Predictor) -> None:
self.predictor = predictor
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
This function finds saliency values for each input token.
# Parameters
inputs : `JsonDict`
The input you want to interpret (the same as the argument to a Predictor, e.g., predict_json()).
# Returns
interpretation : `JsonDict`
Contains the normalized saliency values for each input token. The dict has entries for
each instance in the inputs JsonDict, e.g., `{instance_1: ..., instance_2:, ... }`.
Each one of those entries has entries for the saliency of the inputs, e.g.,
`{grad_input_1: ..., grad_input_2: ... }`.
"""
raise NotImplementedError("Implement this for saliency interpretations")
@staticmethod
def _aggregate_token_embeddings(
embeddings_list: List[torch.Tensor], token_offsets: List[torch.Tensor]
) -> List[numpy.ndarray]:
if len(token_offsets) == 0:
return [embeddings.numpy() for embeddings in embeddings_list]
aggregated_embeddings = []
# NOTE: This is assuming that embeddings and offsets come in the same order, which may not
# be true. But, the intersection of using multiple TextFields with mismatched indexers is
# currently zero, so we'll delay handling this corner case until it actually causes a
# problem. In practice, both of these lists will always be of size one at the moment.
for embeddings, offsets in zip(embeddings_list, token_offsets):
span_embeddings, span_mask = util.batched_span_select(embeddings.contiguous(), offsets)
span_mask = span_mask.unsqueeze(-1)
span_embeddings *= span_mask # zero out paddings
span_embeddings_sum = span_embeddings.sum(2)
span_embeddings_len = span_mask.sum(2)
# Shape: (batch_size, num_orig_tokens, embedding_size)
embeddings = span_embeddings_sum / torch.clamp_min(span_embeddings_len, 1)
# All the places where the span length is zero, write in zeros.
embeddings[(span_embeddings_len == 0).expand(embeddings.shape)] = 0
aggregated_embeddings.append(embeddings.numpy())
return aggregated_embeddings
| allennlp-master | allennlp/interpret/saliency_interpreters/saliency_interpreter.py |
import math
from typing import Dict, Any
import numpy
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.predictors import Predictor
@SaliencyInterpreter.register("smooth-gradient")
class SmoothGradient(SaliencyInterpreter):
"""
Interprets the prediction using SmoothGrad (https://arxiv.org/abs/1706.03825)
Registered as a `SaliencyInterpreter` with name "smooth-gradient".
"""
def __init__(self, predictor: Predictor) -> None:
super().__init__(predictor)
# Hyperparameters
self.stdev = 0.01
self.num_samples = 10
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
# Convert inputs to labeled instances
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Run smoothgrad
grads = self._smooth_grads(instance)
# Normalize results
for key, grad in grads.items():
# TODO (@Eric-Wallace), SmoothGrad is not using times input normalization.
# Fine for now, but should fix for consistency.
# The [0] here is undo-ing the batching that happens in get_gradients.
embedding_grad = numpy.sum(grad[0], axis=1)
norm = numpy.linalg.norm(embedding_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
def _register_forward_hook(self, stdev: float):
"""
Register a forward hook on the embedding layer which adds random noise to every embedding.
Used for one term in the SmoothGrad sum.
"""
def forward_hook(module, inputs, output):
# Random noise = N(0, stdev * (max-min))
scale = output.detach().max() - output.detach().min()
noise = torch.randn(output.shape, device=output.device) * stdev * scale
# Add the random noise
output.add_(noise)
# Register the hook
embedding_layer = self.predictor.get_interpretable_layer()
handle = embedding_layer.register_forward_hook(forward_hook)
return handle
def _smooth_grads(self, instance: Instance) -> Dict[str, numpy.ndarray]:
total_gradients: Dict[str, Any] = {}
for _ in range(self.num_samples):
handle = self._register_forward_hook(self.stdev)
try:
grads = self.predictor.get_gradients([instance])[0]
finally:
handle.remove()
# Sum gradients
if total_gradients == {}:
total_gradients = grads
else:
for key in grads.keys():
total_gradients[key] += grads[key]
# Average the gradients
for key in total_gradients.keys():
total_gradients[key] /= self.num_samples
return total_gradients
| allennlp-master | allennlp/interpret/saliency_interpreters/smooth_gradient.py |
import math
from typing import List
import numpy
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.nn import util
@SaliencyInterpreter.register("simple-gradient")
class SimpleGradient(SaliencyInterpreter):
"""
Registered as a `SaliencyInterpreter` with name "simple-gradient".
"""
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
Interprets the model's prediction for inputs. Gets the gradients of the logits with respect
to the input and returns those gradients normalized and sanitized.
"""
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# List of embedding inputs, used for multiplying gradient by the input for normalization
embeddings_list: List[torch.Tensor] = []
token_offsets: List[torch.Tensor] = []
# Hook used for saving embeddings
handles = self._register_hooks(embeddings_list, token_offsets)
try:
grads = self.predictor.get_gradients([instance])[0]
finally:
for handle in handles:
handle.remove()
# Gradients come back in the reverse order that they were sent into the network
embeddings_list.reverse()
token_offsets.reverse()
embeddings_list = self._aggregate_token_embeddings(embeddings_list, token_offsets)
for key, grad in grads.items():
# Get number at the end of every gradient key (they look like grad_input_[int],
# we're getting this [int] part and subtracting 1 for zero-based indexing).
# This is then used as an index into the reversed input array to match up the
# gradient and its respective embedding.
input_idx = int(key[-1]) - 1
# The [0] here is undo-ing the batching that happens in get_gradients.
emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx][0], axis=1)
norm = numpy.linalg.norm(emb_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in emb_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
def _register_hooks(self, embeddings_list: List, token_offsets: List):
"""
Finds all of the TextFieldEmbedders, and registers a forward hook onto them. When forward()
is called, embeddings_list is filled with the embedding values. This is necessary because
our normalization scheme multiplies the gradient by the embedding value.
"""
def forward_hook(module, inputs, output):
embeddings_list.append(output.squeeze(0).clone().detach())
def get_token_offsets(module, inputs, outputs):
offsets = util.get_token_offsets_from_text_field_inputs(inputs)
if offsets is not None:
token_offsets.append(offsets)
# Register the hooks
handles = []
embedding_layer = self.predictor.get_interpretable_layer()
handles.append(embedding_layer.register_forward_hook(forward_hook))
text_field_embedder = self.predictor.get_interpretable_text_field_embedder()
handles.append(text_field_embedder.register_forward_hook(get_token_offsets))
return handles
| allennlp-master | allennlp/interpret/saliency_interpreters/simple_gradient.py |
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.interpret.saliency_interpreters.simple_gradient import SimpleGradient
from allennlp.interpret.saliency_interpreters.integrated_gradient import IntegratedGradient
from allennlp.interpret.saliency_interpreters.smooth_gradient import SmoothGradient
| allennlp-master | allennlp/interpret/saliency_interpreters/__init__.py |
from allennlp.interpret.attackers.attacker import Attacker
from allennlp.interpret.attackers.input_reduction import InputReduction
from allennlp.interpret.attackers.hotflip import Hotflip
| allennlp-master | allennlp/interpret/attackers/__init__.py |
from allennlp.common.util import JsonDict
from allennlp.data import Instance
def get_fields_to_compare(
inputs: JsonDict, instance: Instance, input_field_to_attack: str
) -> JsonDict:
"""
Gets a list of the fields that should be checked for equality after an attack is performed.
# Parameters
inputs : `JsonDict`
The input you want to attack, similar to the argument to a Predictor, e.g., predict_json().
instance : `Instance`
A labeled instance that is output from json_to_labeled_instances().
input_field_to_attack : `str`
The key in the inputs JsonDict you want to attack, e.g., tokens.
# Returns
fields : `JsonDict`
The fields that must be compared for equality.
"""
# TODO(mattg): this really should live on the Predictor. We have some messy stuff for, e.g.,
# reading comprehension models, and the interpret code can't really know about the internals of
# that (or at least it shouldn't now, and once we split out the reading comprehension repo, it
# really *can't*).
fields_to_compare = {
key: instance[key]
for key in instance.fields
if key not in inputs
and key != input_field_to_attack
and key != "metadata"
and key != "output"
}
return fields_to_compare
def instance_has_changed(instance: Instance, fields_to_compare: JsonDict):
if "clusters" in fields_to_compare:
# Coref needs a special case here, apparently. I (mattg) am not sure why the check below
# doesn't catch this case; TODO: look into this.
original_clusters = set(tuple(x) for x in fields_to_compare["clusters"])
new_clusters = set(tuple(x) for x in instance["clusters"]) # type: ignore
return original_clusters != new_clusters
if any(instance[field] != fields_to_compare[field] for field in fields_to_compare):
return True
return False
| allennlp-master | allennlp/interpret/attackers/utils.py |
from copy import deepcopy
from typing import Dict, List, Tuple
import numpy
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance, Token
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import (
ELMoTokenCharactersIndexer,
TokenCharactersIndexer,
SingleIdTokenIndexer,
)
from allennlp.interpret.attackers import utils
from allennlp.interpret.attackers.attacker import Attacker
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import util
from allennlp.predictors.predictor import Predictor
DEFAULT_IGNORE_TOKENS = ["@@NULL@@", ".", ",", ";", "!", "?", "[MASK]", "[SEP]", "[CLS]"]
@Attacker.register("hotflip")
class Hotflip(Attacker):
"""
Runs the HotFlip style attack at the word-level https://arxiv.org/abs/1712.06751. We use the
first-order taylor approximation described in https://arxiv.org/abs/1903.06620, in the function
`_first_order_taylor()`.
We try to re-use the embedding matrix from the model when deciding what other words to flip a
token to. For a large class of models, this is straightforward. When there is a
character-level encoder, however (e.g., with ELMo, any char-CNN, etc.), or a combination of
encoders (e.g., ELMo + glove), we need to construct a fake embedding matrix that we can use in
`_first_order_taylor()`. We do this by getting a list of words from the model's vocabulary
and embedding them using the encoder. This can be expensive, both in terms of time and memory
usage, so we take a `max_tokens` parameter to limit the size of this fake embedding matrix.
This also requires a model to `have` a token vocabulary in the first place, which can be
problematic for models that only have character vocabularies.
Registered as an `Attacker` with name "hotflip".
# Parameters
predictor : `Predictor`
The model (inside a Predictor) that we're attacking. We use this to get gradients and
predictions.
vocab_namespace : `str`, optional (default=`'tokens'`)
We use this to know three things: (1) which tokens we should ignore when producing flips
(we don't consider non-alphanumeric tokens); (2) what the string value is of the token that
we produced, so we can show something human-readable to the user; and (3) if we need to
construct a fake embedding matrix, we use the tokens in the vocabulary as flip candidates.
max_tokens : `int`, optional (default=`5000`)
This is only used when we need to construct a fake embedding matrix. That matrix can take
a lot of memory when the vocab size is large. This parameter puts a cap on the number of
tokens to use, so the fake embedding matrix doesn't take as much memory.
"""
def __init__(
self, predictor: Predictor, vocab_namespace: str = "tokens", max_tokens: int = 5000
) -> None:
super().__init__(predictor)
self.vocab = self.predictor._model.vocab
self.namespace = vocab_namespace
# Force new tokens to be alphanumeric
self.max_tokens = max_tokens
self.invalid_replacement_indices: List[int] = []
for i in self.vocab._index_to_token[self.namespace]:
if not self.vocab._index_to_token[self.namespace][i].isalnum():
self.invalid_replacement_indices.append(i)
self.embedding_matrix: torch.Tensor = None
self.embedding_layer: torch.nn.Module = None
# get device number
self.cuda_device = predictor.cuda_device
def initialize(self):
"""
Call this function before running attack_from_json(). We put the call to
`_construct_embedding_matrix()` in this function to prevent a large amount of compute
being done when __init__() is called.
"""
if self.embedding_matrix is None:
self.embedding_matrix = self._construct_embedding_matrix()
def _construct_embedding_matrix(self) -> Embedding:
"""
For HotFlip, we need a word embedding matrix to search over. The below is necessary for
models such as ELMo, character-level models, or for models that use a projection layer
after their word embeddings.
We run all of the tokens from the vocabulary through the TextFieldEmbedder, and save the
final output embedding. We then group all of those output embeddings into an "embedding
matrix".
"""
embedding_layer = self.predictor.get_interpretable_layer()
self.embedding_layer = embedding_layer
if isinstance(embedding_layer, (Embedding, torch.nn.modules.sparse.Embedding)):
# If we're using something that already has an only embedding matrix, we can just use
# that and bypass this method.
return embedding_layer.weight
# We take the top `self.max_tokens` as candidates for hotflip. Because we have to
# construct a new vector for each of these, we can't always afford to use the whole vocab,
# for both runtime and memory considerations.
all_tokens = list(self.vocab._token_to_index[self.namespace])[: self.max_tokens]
max_index = self.vocab.get_token_index(all_tokens[-1], self.namespace)
self.invalid_replacement_indices = [
i for i in self.invalid_replacement_indices if i < max_index
]
inputs = self._make_embedder_input(all_tokens)
# pass all tokens through the fake matrix and create an embedding out of it.
embedding_matrix = embedding_layer(inputs).squeeze()
return embedding_matrix
def _make_embedder_input(self, all_tokens: List[str]) -> Dict[str, torch.Tensor]:
inputs = {}
# A bit of a hack; this will only work with some dataset readers, but it'll do for now.
indexers = self.predictor._dataset_reader._token_indexers # type: ignore
for indexer_name, token_indexer in indexers.items():
if isinstance(token_indexer, SingleIdTokenIndexer):
all_indices = [
self.vocab._token_to_index[self.namespace][token] for token in all_tokens
]
inputs[indexer_name] = {"tokens": torch.LongTensor(all_indices).unsqueeze(0)}
elif isinstance(token_indexer, TokenCharactersIndexer):
tokens = [Token(x) for x in all_tokens]
max_token_length = max(len(x) for x in all_tokens)
# sometime max_token_length is too short for cnn encoder
max_token_length = max(max_token_length, token_indexer._min_padding_length)
indexed_tokens = token_indexer.tokens_to_indices(tokens, self.vocab)
padding_lengths = token_indexer.get_padding_lengths(indexed_tokens)
padded_tokens = token_indexer.as_padded_tensor_dict(indexed_tokens, padding_lengths)
inputs[indexer_name] = {
"token_characters": torch.LongTensor(
padded_tokens["token_characters"]
).unsqueeze(0)
}
elif isinstance(token_indexer, ELMoTokenCharactersIndexer):
elmo_tokens = []
for token in all_tokens:
elmo_indexed_token = token_indexer.tokens_to_indices(
[Token(text=token)], self.vocab
)["elmo_tokens"]
elmo_tokens.append(elmo_indexed_token[0])
inputs[indexer_name] = {"elmo_tokens": torch.LongTensor(elmo_tokens).unsqueeze(0)}
else:
raise RuntimeError("Unsupported token indexer:", token_indexer)
return util.move_to_device(inputs, self.cuda_device)
def attack_from_json(
self,
inputs: JsonDict,
input_field_to_attack: str = "tokens",
grad_input_field: str = "grad_input_1",
ignore_tokens: List[str] = None,
target: JsonDict = None,
) -> JsonDict:
"""
Replaces one token at a time from the input until the model's prediction changes.
`input_field_to_attack` is for example `tokens`, it says what the input field is
called. `grad_input_field` is for example `grad_input_1`, which is a key into a grads
dictionary.
The method computes the gradient w.r.t. the tokens, finds the token with the maximum
gradient (by L2 norm), and replaces it with another token based on the first-order Taylor
approximation of the loss. This process is iteratively repeated until the prediction
changes. Once a token is replaced, it is not flipped again.
# Parameters
inputs : `JsonDict`
The model inputs, the same as what is passed to a `Predictor`.
input_field_to_attack : `str`, optional (default=`'tokens'`)
The field that has the tokens that we're going to be flipping. This must be a
`TextField`.
grad_input_field : `str`, optional (default=`'grad_input_1'`)
If there is more than one field that gets embedded in your model (e.g., a question and
a passage, or a premise and a hypothesis), this tells us the key to use to get the
correct gradients. This selects from the output of :func:`Predictor.get_gradients`.
ignore_tokens : `List[str]`, optional (default=`DEFAULT_IGNORE_TOKENS`)
These tokens will not be flipped. The default list includes some simple punctuation,
OOV and padding tokens, and common control tokens for BERT, etc.
target : `JsonDict`, optional (default=`None`)
If given, this will be a `targeted` hotflip attack, where instead of just trying to
change a model's prediction from what it current is predicting, we try to change it to
a `specific` target value. This is a `JsonDict` because it needs to specify the
field name and target value. For example, for a masked LM, this would be something
like `{"words": ["she"]}`, because `"words"` is the field name, there is one mask
token (hence the list of length one), and we want to change the prediction from
whatever it was to `"she"`.
"""
instance = self.predictor._json_to_instance(inputs)
if target is None:
output_dict = self.predictor._model.forward_on_instance(instance)
else:
output_dict = target
# This now holds the predictions that we want to change (either away from or towards,
# depending on whether `target` was passed). We'll use this in the loop below to check for
# when we've met our stopping criterion.
original_instances = self.predictor.predictions_to_labeled_instances(instance, output_dict)
# This is just for ease of access in the UI, so we know the original tokens. It's not used
# in the logic below.
original_text_field: TextField = original_instances[0][ # type: ignore
input_field_to_attack
]
original_tokens = deepcopy(original_text_field.tokens)
final_tokens = []
final_outputs = []
# `original_instances` is a list because there might be several different predictions that
# we're trying to attack (e.g., all of the NER tags for an input sentence). We attack them
# one at a time.
for instance in original_instances:
tokens, outputs = self.attack_instance(
instance=instance,
inputs=inputs,
input_field_to_attack=input_field_to_attack,
grad_input_field=grad_input_field,
ignore_tokens=ignore_tokens,
target=target,
)
final_tokens.append(tokens)
final_outputs.append(outputs)
return sanitize(
{"final": final_tokens, "original": original_tokens, "outputs": final_outputs}
)
def attack_instance(
self,
instance: Instance,
inputs: JsonDict,
input_field_to_attack: str = "tokens",
grad_input_field: str = "grad_input_1",
ignore_tokens: List[str] = None,
target: JsonDict = None,
) -> Tuple[List[Token], JsonDict]:
if self.embedding_matrix is None:
self.initialize()
ignore_tokens = DEFAULT_IGNORE_TOKENS if ignore_tokens is None else ignore_tokens
# If `target` is `None`, we move away from the current prediction, otherwise we move
# _towards_ the target.
sign = -1 if target is None else 1
# Gets a list of the fields that we want to check to see if they change.
fields_to_compare = utils.get_fields_to_compare(inputs, instance, input_field_to_attack)
# We'll be modifying the tokens in this text field below, and grabbing the modified
# list after the `while` loop.
text_field: TextField = instance[input_field_to_attack] # type: ignore
# Because we can save computation by getting grads and outputs at the same time, we do
# them together at the end of the loop, even though we use grads at the beginning and
# outputs at the end. This is our initial gradient for the beginning of the loop. The
# output can be ignored here.
grads, outputs = self.predictor.get_gradients([instance])
# Ignore any token that is in the ignore_tokens list by setting the token to already
# flipped.
flipped: List[int] = []
for index, token in enumerate(text_field.tokens):
if token.text in ignore_tokens:
flipped.append(index)
if "clusters" in outputs:
# Coref unfortunately needs a special case here. We don't want to flip words in
# the same predicted coref cluster, but we can't really specify a list of tokens,
# because, e.g., "he" could show up in several different clusters.
# TODO(mattg): perhaps there's a way to get `predictions_to_labeled_instances` to
# return the set of tokens that shouldn't be changed for each instance? E.g., you
# could imagine setting a field on the `Token` object, that we could then read
# here...
for cluster in outputs["clusters"]:
for mention in cluster:
for index in range(mention[0], mention[1] + 1):
flipped.append(index)
while True:
# Compute L2 norm of all grads.
grad = grads[grad_input_field][0]
grads_magnitude = [g.dot(g) for g in grad]
# only flip a token once
for index in flipped:
grads_magnitude[index] = -1
# We flip the token with highest gradient norm.
index_of_token_to_flip = numpy.argmax(grads_magnitude)
if grads_magnitude[index_of_token_to_flip] == -1:
# If we've already flipped all of the tokens, we give up.
break
flipped.append(index_of_token_to_flip)
text_field_tensors = text_field.as_tensor(text_field.get_padding_lengths())
input_tokens = util.get_token_ids_from_text_field_tensors(text_field_tensors)
original_id_of_token_to_flip = input_tokens[index_of_token_to_flip]
# Get new token using taylor approximation.
new_id = self._first_order_taylor(
grad[index_of_token_to_flip], original_id_of_token_to_flip, sign
)
# Flip token. We need to tell the instance to re-index itself, so the text field
# will actually update.
new_token = Token(self.vocab._index_to_token[self.namespace][new_id]) # type: ignore
text_field.tokens[index_of_token_to_flip] = new_token
instance.indexed = False
# Get model predictions on instance, and then label the instances
grads, outputs = self.predictor.get_gradients([instance]) # predictions
for key, output in outputs.items():
if isinstance(output, torch.Tensor):
outputs[key] = output.detach().cpu().numpy().squeeze()
elif isinstance(output, list):
outputs[key] = output[0]
# TODO(mattg): taking the first result here seems brittle, if we're in a case where
# there are multiple predictions.
labeled_instance = self.predictor.predictions_to_labeled_instances(instance, outputs)[0]
# If we've met our stopping criterion, we stop.
has_changed = utils.instance_has_changed(labeled_instance, fields_to_compare)
if target is None and has_changed:
# With no target, we just want to change the prediction.
break
if target is not None and not has_changed:
# With a given target, we want to *match* the target, which we check by
# `not has_changed`.
break
return text_field.tokens, outputs
def _first_order_taylor(self, grad: numpy.ndarray, token_idx: torch.Tensor, sign: int) -> int:
"""
The below code is based on
https://github.com/pmichel31415/translate/blob/paul/pytorch_translate/
research/adversarial/adversaries/brute_force_adversary.py
Replaces the current token_idx with another token_idx to increase the loss. In particular, this
function uses the grad, alongside the embedding_matrix to select the token that maximizes the
first-order taylor approximation of the loss.
"""
grad = util.move_to_device(torch.from_numpy(grad), self.cuda_device)
if token_idx.size() != ():
# We've got an encoder that only has character ids as input. We don't curently handle
# this case, and it's not clear it's worth it to implement it. We'll at least give a
# nicer error than some pytorch dimension mismatch.
raise NotImplementedError(
"You are using a character-level indexer with no other indexers. This case is not "
"currently supported for hotflip. If you would really like to see us support "
"this, please open an issue on github."
)
if token_idx >= self.embedding_matrix.size(0):
# This happens when we've truncated our fake embedding matrix. We need to do a dot
# product with the word vector of the current token; if that token is out of
# vocabulary for our truncated matrix, we need to run it through the embedding layer.
inputs = self._make_embedder_input([self.vocab.get_token_from_index(token_idx.item())])
word_embedding = self.embedding_layer(inputs)[0]
else:
word_embedding = torch.nn.functional.embedding(
util.move_to_device(torch.LongTensor([token_idx]), self.cuda_device),
self.embedding_matrix,
)
word_embedding = word_embedding.detach().unsqueeze(0)
grad = grad.unsqueeze(0).unsqueeze(0)
# solves equation (3) here https://arxiv.org/abs/1903.06620
new_embed_dot_grad = torch.einsum("bij,kj->bik", (grad, self.embedding_matrix))
prev_embed_dot_grad = torch.einsum("bij,bij->bi", (grad, word_embedding)).unsqueeze(-1)
neg_dir_dot_grad = sign * (prev_embed_dot_grad - new_embed_dot_grad)
neg_dir_dot_grad = neg_dir_dot_grad.detach().cpu().numpy()
# Do not replace with non-alphanumeric tokens
neg_dir_dot_grad[:, :, self.invalid_replacement_indices] = -numpy.inf
best_at_each_step = neg_dir_dot_grad.argmax(2)
return best_at_each_step[0].data[0]
| allennlp-master | allennlp/interpret/attackers/hotflip.py |
from copy import deepcopy
from typing import List, Tuple
import heapq
import numpy as np
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.interpret.attackers import utils
from allennlp.interpret.attackers.attacker import Attacker
from allennlp.predictors import Predictor
@Attacker.register("input-reduction")
class InputReduction(Attacker):
"""
Runs the input reduction method from [Pathologies of Neural Models Make Interpretations
Difficult](https://arxiv.org/abs/1804.07781), which removes as many words as possible from
the input without changing the model's prediction.
The functions on this class handle a special case for NER by looking for a field called "tags"
This check is brittle, i.e., the code could break if the name of this field has changed, or if
a non-NER model has a field called "tags".
Registered as an `Attacker` with name "input-reduction".
"""
def __init__(self, predictor: Predictor, beam_size: int = 3) -> None:
super().__init__(predictor)
self.beam_size = beam_size
def attack_from_json(
self,
inputs: JsonDict,
input_field_to_attack: str = "tokens",
grad_input_field: str = "grad_input_1",
ignore_tokens: List[str] = None,
target: JsonDict = None,
):
if target is not None:
raise ValueError("Input reduction does not implement targeted attacks")
ignore_tokens = ["@@NULL@@"] if ignore_tokens is None else ignore_tokens
original_instances = self.predictor.json_to_labeled_instances(inputs)
original_text_field: TextField = original_instances[0][ # type: ignore
input_field_to_attack
]
original_tokens = deepcopy(original_text_field.tokens)
final_tokens = []
for instance in original_instances:
final_tokens.append(
self._attack_instance(
inputs, instance, input_field_to_attack, grad_input_field, ignore_tokens
)
)
return sanitize({"final": final_tokens, "original": original_tokens})
def _attack_instance(
self,
inputs: JsonDict,
instance: Instance,
input_field_to_attack: str,
grad_input_field: str,
ignore_tokens: List[str],
):
# Save fields that must be checked for equality
fields_to_compare = utils.get_fields_to_compare(inputs, instance, input_field_to_attack)
# Set num_ignore_tokens, which tells input reduction when to stop
# We keep at least one token for input reduction on classification/entailment/etc.
if "tags" not in instance:
num_ignore_tokens = 1
tag_mask = None
# Set num_ignore_tokens for NER and build token mask
else:
num_ignore_tokens, tag_mask, original_tags = _get_ner_tags_and_mask(
instance, input_field_to_attack, ignore_tokens
)
text_field: TextField = instance[input_field_to_attack] # type: ignore
current_tokens = deepcopy(text_field.tokens)
candidates = [(instance, -1, tag_mask)]
# keep removing tokens until prediction is about to change
while len(current_tokens) > num_ignore_tokens and candidates:
# sort current candidates by smallest length (we want to remove as many tokens as possible)
def get_length(input_instance: Instance):
input_text_field: TextField = input_instance[input_field_to_attack] # type: ignore
return len(input_text_field.tokens)
candidates = heapq.nsmallest(self.beam_size, candidates, key=lambda x: get_length(x[0]))
beam_candidates = deepcopy(candidates)
candidates = []
for beam_instance, smallest_idx, tag_mask in beam_candidates:
# get gradients and predictions
beam_tag_mask = deepcopy(tag_mask)
grads, outputs = self.predictor.get_gradients([beam_instance])
for output in outputs:
if isinstance(outputs[output], torch.Tensor):
outputs[output] = outputs[output].detach().cpu().numpy().squeeze().squeeze()
elif isinstance(outputs[output], list):
outputs[output] = outputs[output][0]
# Check if any fields have changed, if so, next beam
if "tags" not in instance:
# relabel beam_instance since last iteration removed an input token
beam_instance = self.predictor.predictions_to_labeled_instances(
beam_instance, outputs
)[0]
if utils.instance_has_changed(beam_instance, fields_to_compare):
continue
# special case for sentence tagging (we have tested NER)
else:
# remove the mask where you remove the input token from.
if smallest_idx != -1: # Don't delete on the very first iteration
del beam_tag_mask[smallest_idx] # type: ignore
cur_tags = [
outputs["tags"][x] for x in range(len(outputs["tags"])) if beam_tag_mask[x] # type: ignore
]
if cur_tags != original_tags:
continue
# remove a token from the input
text_field: TextField = beam_instance[input_field_to_attack] # type: ignore
current_tokens = deepcopy(text_field.tokens)
reduced_instances_and_smallest = _remove_one_token(
beam_instance,
input_field_to_attack,
grads[grad_input_field][0],
ignore_tokens,
self.beam_size,
beam_tag_mask, # type: ignore
)
candidates.extend(reduced_instances_and_smallest)
return current_tokens
def _remove_one_token(
instance: Instance,
input_field_to_attack: str,
grads: np.ndarray,
ignore_tokens: List[str],
beam_size: int,
tag_mask: List[int],
) -> List[Tuple[Instance, int, List[int]]]:
"""
Finds the token with the smallest gradient and removes it.
"""
# Compute L2 norm of all grads.
grads_mag = [np.sqrt(grad.dot(grad)) for grad in grads]
# Skip all ignore_tokens by setting grad to infinity
text_field: TextField = instance[input_field_to_attack] # type: ignore
for token_idx, token in enumerate(text_field.tokens):
if token in ignore_tokens:
grads_mag[token_idx] = float("inf")
# For NER, skip all tokens that are not in outside
if "tags" in instance:
tag_field: SequenceLabelField = instance["tags"] # type: ignore
labels: List[str] = tag_field.labels # type: ignore
for idx, label in enumerate(labels):
if label != "O":
grads_mag[idx] = float("inf")
reduced_instances_and_smallest: List[Tuple[Instance, int, List[int]]] = []
for _ in range(beam_size):
# copy instance and edit later
copied_instance = deepcopy(instance)
copied_text_field: TextField = copied_instance[input_field_to_attack] # type: ignore
# find smallest
smallest = np.argmin(grads_mag)
if grads_mag[smallest] == float("inf"): # if all are ignored tokens, return.
break
grads_mag[smallest] = float("inf") # so the other beams don't use this token
# remove smallest
inputs_before_smallest = copied_text_field.tokens[0:smallest]
inputs_after_smallest = copied_text_field.tokens[smallest + 1 :]
copied_text_field.tokens = inputs_before_smallest + inputs_after_smallest
if "tags" in instance:
tag_field: SequenceLabelField = copied_instance["tags"] # type: ignore
tag_field_before_smallest = tag_field.labels[0:smallest]
tag_field_after_smallest = tag_field.labels[smallest + 1 :]
tag_field.labels = tag_field_before_smallest + tag_field_after_smallest # type: ignore
tag_field.sequence_field = copied_text_field
copied_instance.indexed = False
reduced_instances_and_smallest.append((copied_instance, smallest, tag_mask))
return reduced_instances_and_smallest
def _get_ner_tags_and_mask(
instance: Instance, input_field_to_attack: str, ignore_tokens: List[str]
):
"""
Used for the NER task. Sets the num_ignore tokens, saves the original predicted tag and a 0/1
mask in the position of the tags
"""
# Set num_ignore_tokens
num_ignore_tokens = 0
input_field: TextField = instance[input_field_to_attack] # type: ignore
for token in input_field.tokens:
if str(token) in ignore_tokens:
num_ignore_tokens += 1
# save the original tags and a 0/1 mask where the tags are
tag_mask = []
original_tags = []
tag_field: SequenceLabelField = instance["tags"] # type: ignore
for label in tag_field.labels:
if label != "O":
tag_mask.append(1)
original_tags.append(label)
num_ignore_tokens += 1
else:
tag_mask.append(0)
return num_ignore_tokens, tag_mask, original_tags
| allennlp-master | allennlp/interpret/attackers/input_reduction.py |
from typing import List
from allennlp.common import Registrable
from allennlp.common.util import JsonDict
from allennlp.predictors import Predictor
class Attacker(Registrable):
"""
An `Attacker` will modify an input (e.g., add or delete tokens) to try to change an AllenNLP
Predictor's output in a desired manner (e.g., make it incorrect).
"""
def __init__(self, predictor: Predictor) -> None:
self.predictor = predictor
def initialize(self):
"""
Initializes any components of the Attacker that are expensive to compute, so that they are
not created on __init__(). Default implementation is `pass`.
"""
pass
def attack_from_json(
self,
inputs: JsonDict,
input_field_to_attack: str,
grad_input_field: str,
ignore_tokens: List[str],
target: JsonDict,
) -> JsonDict:
"""
This function finds a modification to the input text that would change the model's
prediction in some desired manner (e.g., an adversarial attack).
# Parameters
inputs : `JsonDict`
The input you want to attack (the same as the argument to a Predictor, e.g.,
predict_json()).
input_field_to_attack : `str`
The key in the inputs JsonDict you want to attack, e.g., `tokens`.
grad_input_field : `str`
The field in the gradients dictionary that contains the input gradients. For example,
`grad_input_1` will be the field for single input tasks. See get_gradients() in
`Predictor` for more information on field names.
target : `JsonDict`
If given, this is a `targeted` attack, trying to change the prediction to a particular
value, instead of just changing it from its original prediction. Subclasses are not
required to accept this argument, as not all attacks make sense as targeted attacks.
Perhaps that means we should make the API more crisp, but adding another class is not
worth it.
# Returns
reduced_input : `JsonDict`
Contains the final, sanitized input after adversarial modification.
"""
raise NotImplementedError()
| allennlp-master | allennlp/interpret/attackers/attacker.py |
"""
Subcommand for building a vocabulary from a training config.
"""
import argparse
import json
import logging
import os
import tarfile
import tempfile
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common.file_utils import CacheFile
from allennlp.common.params import Params
from allennlp.training.util import make_vocab_from_params
logger = logging.getLogger(__name__)
@Subcommand.register("build-vocab")
class BuildVocab(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Build a vocabulary from an experiment config file."""
subparser = parser.add_parser(self.name, description=description, help=description)
subparser.add_argument("param_path", type=str, help="path to an experiment config file")
subparser.add_argument(
"output_path", type=str, help="path to save the vocab tar.gz file to"
)
subparser.add_argument(
"-f",
"--force",
action="store_true",
help="force write if the output_path already exists",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"vocabulary.min_count.labels\": 10}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.set_defaults(func=build_vocab_from_args)
return subparser
def build_vocab_from_args(args: argparse.Namespace):
if not args.output_path.endswith(".tar.gz"):
raise ValueError("param 'output_path' should end with '.tar.gz'")
if os.path.exists(args.output_path) and not args.force:
raise RuntimeError(f"{args.output_path} already exists. Use --force to overwrite.")
output_directory = os.path.dirname(args.output_path)
os.makedirs(output_directory, exist_ok=True)
params = Params.from_file(args.param_path)
with tempfile.TemporaryDirectory() as temp_dir:
# Serializes the vocab to 'tempdir/vocabulary'.
make_vocab_from_params(params, temp_dir)
# The CacheFile context manager gives us a temporary file to write to.
# On a successful exit from the context, it will rename the temp file to
# the target `output_path`.
with CacheFile(args.output_path, suffix=".tar.gz") as temp_archive:
logger.info("Archiving vocabulary to %s", args.output_path)
with tarfile.open(temp_archive.name, "w:gz") as archive:
vocab_dir = os.path.join(temp_dir, "vocabulary")
for fname in os.listdir(vocab_dir):
if fname.endswith(".lock"):
continue
archive.add(os.path.join(vocab_dir, fname), arcname=fname)
print(f"Success! Vocab saved to {args.output_path}")
print('You can now set the "vocabulary" entry of your training config to:')
print(json.dumps({"type": "from_files", "directory": os.path.abspath(args.output_path)}))
| allennlp-master | allennlp/commands/build_vocab.py |
"""
The `predict` subcommand allows you to make bulk JSON-to-JSON
or dataset to JSON predictions using a trained model and its
[`Predictor`](../predictors/predictor.md#predictor) wrapper.
"""
from typing import List, Iterator, Optional
import argparse
import sys
import json
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common import logging as common_logging
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.util import lazy_groups_of
from allennlp.models.archival import load_archive
from allennlp.predictors.predictor import Predictor, JsonDict
from allennlp.data import Instance
@Subcommand.register("predict")
class Predict(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Run the specified model against a JSON-lines input file."""
subparser = parser.add_parser(
self.name, description=description, help="Use a trained model to make predictions."
)
subparser.add_argument(
"archive_file", type=str, help="the archived model to make predictions with"
)
subparser.add_argument("input_file", type=str, help="path to or url of the input file")
subparser.add_argument("--output-file", type=str, help="path to output file")
subparser.add_argument(
"--weights-file", type=str, help="a path that overrides which weights file to use"
)
batch_size = subparser.add_mutually_exclusive_group(required=False)
batch_size.add_argument(
"--batch-size", type=int, default=1, help="The batch size to use for processing"
)
subparser.add_argument(
"--silent", action="store_true", help="do not print output to stdout"
)
cuda_device = subparser.add_mutually_exclusive_group(required=False)
cuda_device.add_argument(
"--cuda-device", type=int, default=-1, help="id of GPU to use (if any)"
)
subparser.add_argument(
"--use-dataset-reader",
action="store_true",
help="Whether to use the dataset reader of the original model to load Instances. "
"The validation dataset reader will be used if it exists, otherwise it will "
"fall back to the train dataset reader. This behavior can be overridden "
"with the --dataset-reader-choice flag.",
)
subparser.add_argument(
"--dataset-reader-choice",
type=str,
choices=["train", "validation"],
default="validation",
help="Indicates which model dataset reader to use if the --use-dataset-reader "
"flag is set.",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.add_argument(
"--predictor", type=str, help="optionally specify a specific predictor to use"
)
subparser.add_argument(
"--file-friendly-logging",
action="store_true",
default=False,
help="outputs tqdm status on separate lines and slows tqdm refresh rate",
)
subparser.set_defaults(func=_predict)
return subparser
def _get_predictor(args: argparse.Namespace) -> Predictor:
check_for_gpu(args.cuda_device)
archive = load_archive(
args.archive_file,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
return Predictor.from_archive(
archive, args.predictor, dataset_reader_to_load=args.dataset_reader_choice
)
class _PredictManager:
def __init__(
self,
predictor: Predictor,
input_file: str,
output_file: Optional[str],
batch_size: int,
print_to_console: bool,
has_dataset_reader: bool,
) -> None:
self._predictor = predictor
self._input_file = input_file
self._output_file = None if output_file is None else open(output_file, "w")
self._batch_size = batch_size
self._print_to_console = print_to_console
self._dataset_reader = None if not has_dataset_reader else predictor._dataset_reader
def _predict_json(self, batch_data: List[JsonDict]) -> Iterator[str]:
if len(batch_data) == 1:
results = [self._predictor.predict_json(batch_data[0])]
else:
results = self._predictor.predict_batch_json(batch_data)
for output in results:
yield self._predictor.dump_line(output)
def _predict_instances(self, batch_data: List[Instance]) -> Iterator[str]:
if len(batch_data) == 1:
results = [self._predictor.predict_instance(batch_data[0])]
else:
results = self._predictor.predict_batch_instance(batch_data)
for output in results:
yield self._predictor.dump_line(output)
def _maybe_print_to_console_and_file(
self, index: int, prediction: str, model_input: str = None
) -> None:
if self._print_to_console:
if model_input is not None:
print(f"input {index}: ", model_input)
print("prediction: ", prediction)
if self._output_file is not None:
self._output_file.write(prediction)
def _get_json_data(self) -> Iterator[JsonDict]:
if self._input_file == "-":
for line in sys.stdin:
if not line.isspace():
yield self._predictor.load_line(line)
else:
input_file = cached_path(self._input_file)
with open(input_file, "r") as file_input:
for line in file_input:
if not line.isspace():
yield self._predictor.load_line(line)
def _get_instance_data(self) -> Iterator[Instance]:
if self._input_file == "-":
raise ConfigurationError("stdin is not an option when using a DatasetReader.")
elif self._dataset_reader is None:
raise ConfigurationError("To generate instances directly, pass a DatasetReader.")
else:
yield from self._dataset_reader.read(self._input_file)
def run(self) -> None:
has_reader = self._dataset_reader is not None
index = 0
if has_reader:
for batch in lazy_groups_of(self._get_instance_data(), self._batch_size):
for model_input_instance, result in zip(batch, self._predict_instances(batch)):
self._maybe_print_to_console_and_file(index, result, str(model_input_instance))
index = index + 1
else:
for batch_json in lazy_groups_of(self._get_json_data(), self._batch_size):
for model_input_json, result in zip(batch_json, self._predict_json(batch_json)):
self._maybe_print_to_console_and_file(
index, result, json.dumps(model_input_json)
)
index = index + 1
if self._output_file is not None:
self._output_file.close()
def _predict(args: argparse.Namespace) -> None:
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
predictor = _get_predictor(args)
if args.silent and not args.output_file:
print("--silent specified without --output-file.")
print("Exiting early because no output will be created.")
sys.exit(0)
manager = _PredictManager(
predictor,
args.input_file,
args.output_file,
args.batch_size,
not args.silent,
args.use_dataset_reader,
)
manager.run()
| allennlp-master | allennlp/commands/predict.py |
import argparse
import logging
import sys
from typing import Any, Optional, Tuple, Set
from overrides import overrides
from allennlp import __version__
from allennlp.commands.build_vocab import BuildVocab
from allennlp.commands.cached_path import CachedPath
from allennlp.commands.evaluate import Evaluate
from allennlp.commands.find_learning_rate import FindLearningRate
from allennlp.commands.predict import Predict
from allennlp.commands.print_results import PrintResults
from allennlp.commands.subcommand import Subcommand
from allennlp.commands.test_install import TestInstall
from allennlp.commands.train import Train
from allennlp.common.plugins import import_plugins
from allennlp.common.util import import_module_and_submodules
logger = logging.getLogger(__name__)
class ArgumentParserWithDefaults(argparse.ArgumentParser):
"""
Custom argument parser that will display the default value for an argument
in the help message.
"""
_action_defaults_to_ignore = {"help", "store_true", "store_false", "store_const"}
@staticmethod
def _is_empty_default(default: Any) -> bool:
if default is None:
return True
if isinstance(default, (str, list, tuple, set)):
return not bool(default)
return False
@overrides
def add_argument(self, *args, **kwargs):
# Add default value to the help message when the default is meaningful.
default = kwargs.get("default")
if kwargs.get(
"action"
) not in self._action_defaults_to_ignore and not self._is_empty_default(default):
description = kwargs.get("help", "")
kwargs["help"] = f"{description} (default = {default})"
super().add_argument(*args, **kwargs)
def parse_args(prog: Optional[str] = None) -> Tuple[argparse.ArgumentParser, argparse.Namespace]:
"""
Creates the argument parser for the main program and uses it to parse the args.
"""
parser = ArgumentParserWithDefaults(description="Run AllenNLP", prog=prog)
parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
subparsers = parser.add_subparsers(title="Commands", metavar="")
subcommands: Set[str] = set()
def add_subcommands():
for subcommand_name in sorted(Subcommand.list_available()):
if subcommand_name in subcommands:
continue
subcommands.add(subcommand_name)
subcommand_class = Subcommand.by_name(subcommand_name)
subcommand = subcommand_class()
subparser = subcommand.add_subparser(subparsers)
if subcommand_class.requires_plugins:
subparser.add_argument(
"--include-package",
type=str,
action="append",
default=[],
help="additional packages to include",
)
# Add all default registered subcommands first.
add_subcommands()
# If we need to print the usage/help, or the subcommand is unknown,
# we'll call `import_plugins()` to register any plugin subcommands first.
argv = sys.argv[1:]
plugins_imported: bool = False
if not argv or argv == ["--help"] or argv[0] not in subcommands:
import_plugins()
plugins_imported = True
# Add subcommands again in case one of the plugins has a registered subcommand.
add_subcommands()
# Now we can parse the arguments.
args = parser.parse_args()
if not plugins_imported and Subcommand.by_name(argv[0]).requires_plugins: # type: ignore
import_plugins()
return parser, args
def main(prog: Optional[str] = None) -> None:
"""
The [`run`](./train.md#run) command only knows about the registered classes in the ``allennlp``
codebase. In particular, once you start creating your own `Model` s and so forth, it won't
work for them, unless you use the ``--include-package`` flag or you make your code available
as a plugin (see [`plugins`](./plugins.md)).
"""
parser, args = parse_args(prog)
# If a subparser is triggered, it adds its work as `args.func`.
# So if no such attribute has been added, no subparser was triggered,
# so give the user some help.
if "func" in dir(args):
# Import any additional modules needed (to register custom classes).
for package_name in getattr(args, "include_package", []):
import_module_and_submodules(package_name)
args.func(args)
else:
parser.print_help()
| allennlp-master | allennlp/commands/__init__.py |
"""
CLI to the the caching mechanism in `common.file_utils`.
"""
import argparse
import logging
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common.file_utils import (
cached_path,
CACHE_DIRECTORY,
inspect_cache,
remove_cache_entries,
)
logger = logging.getLogger(__name__)
@Subcommand.register("cached-path")
class CachedPath(Subcommand):
requires_plugins: bool = False
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Cache remote files to the AllenNLP cache."""
subparser = parser.add_parser(
self.name,
description=description,
help=description,
)
subparser.set_defaults(func=_cached_path)
subparser.add_argument(
"resources",
type=str,
help="""The URLs or paths to the resources.
If using the --inspect or --remove flag, this can also contain glob patterns.""",
nargs="*",
)
subparser.add_argument(
"-d",
"--cache-dir",
type=str,
help="""Use a custom cache directory.""",
default=CACHE_DIRECTORY,
)
subparser.add_argument(
"-x",
"--extract-archive",
action="store_true",
help="""Automatically extract zip or tar.gz archive files.""",
)
subparser.add_argument(
"-f",
"--force-extract",
action="store_true",
help="""Extract archives regardless of whether or not they already exist.""",
)
subparser.add_argument(
"--inspect",
action="store_true",
help="""Print some useful information about the cache.""",
)
subparser.add_argument(
"--remove",
action="store_true",
help="""Remove any cache entries matching the given resource patterns.""",
)
return subparser
def _cached_path(args: argparse.Namespace):
logger.info("Cache directory: %s", args.cache_dir)
if args.inspect:
if args.extract_archive or args.force_extract or args.remove:
raise RuntimeError(
"cached-path cannot accept --extract-archive, --force-extract, or --remove "
"options when --inspect flag is used."
)
inspect_cache(patterns=args.resources, cache_dir=args.cache_dir)
elif args.remove:
from allennlp.common.util import format_size
if args.extract_archive or args.force_extract or args.inspect:
raise RuntimeError(
"cached-path cannot accept --extract-archive, --force-extract, or --inspect "
"options when --remove flag is used."
)
if not args.resources:
raise RuntimeError(
"Missing positional argument(s) 'resources'. 'resources' is required when using "
"the --remove option. If you really want to remove everything, pass '*' for 'resources'."
)
reclaimed_space = remove_cache_entries(args.resources, cache_dir=args.cache_dir)
print(f"Reclaimed {format_size(reclaimed_space)} of space")
else:
for resource in args.resources:
print(
cached_path(
resource,
cache_dir=args.cache_dir,
extract_archive=args.extract_archive,
force_extract=args.force_extract,
)
)
| allennlp-master | allennlp/commands/cached_path.py |
"""
The `print-results` subcommand allows you to print results from multiple
allennlp serialization directories to the console in a helpful csv format.
"""
import argparse
import json
import logging
import os
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
logger = logging.getLogger(__name__)
@Subcommand.register("print-results")
class PrintResults(Subcommand):
requires_plugins: bool = False
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Print results from allennlp training runs in a helpful CSV format."""
subparser = parser.add_parser(
self.name,
description=description,
help="Print results from allennlp serialization directories to the console.",
)
subparser.add_argument(
"path",
type=str,
help="Path to recursively search for allennlp serialization directories.",
)
subparser.add_argument(
"-k",
"--keys",
type=str,
nargs="+",
help="Keys to print from metrics.json."
'Keys not present in all metrics.json will result in "N/A"',
default=None,
required=False,
)
subparser.add_argument(
"-m",
"--metrics-filename",
type=str,
help="Name of the metrics file to inspect.",
default="metrics.json",
required=False,
)
subparser.set_defaults(func=print_results_from_args)
return subparser
def print_results_from_args(args: argparse.Namespace):
"""
Prints results from an `argparse.Namespace` object.
"""
path = args.path
metrics_name = args.metrics_filename
keys = args.keys
results_dict = {}
for root, _, files in os.walk(path):
if metrics_name in files:
full_name = os.path.join(root, metrics_name)
with open(full_name) as file_:
metrics = json.load(file_)
results_dict[full_name] = metrics
sorted_keys = sorted(list(results_dict.keys()))
print(f"model_run, {', '.join(keys)}")
for name in sorted_keys:
results = results_dict[name]
keys_to_print = (str(results.get(key, "N/A")) for key in keys)
print(f"{name}, {', '.join(keys_to_print)}")
| allennlp-master | allennlp/commands/print_results.py |
"""
The `train` subcommand can be used to train a model.
It requires a configuration file and a directory in
which to write the results.
"""
import argparse
import logging
import os
from os import PathLike
from typing import Any, Dict, List, Optional, Union
import warnings
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common import Params, Registrable, Lazy
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common import logging as common_logging
from allennlp.common import util as common_util
from allennlp.common.plugins import import_plugins
from allennlp.data import DatasetReader, Vocabulary
from allennlp.data import DataLoader
from allennlp.models.archival import archive_model, CONFIG_NAME, verify_include_in_archive
from allennlp.models.model import _DEFAULT_WEIGHTS, Model
from allennlp.training.trainer import Trainer
from allennlp.training import util as training_util
logger = logging.getLogger(__name__)
@Subcommand.register("train")
class Train(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Train the specified model on the specified dataset."""
subparser = parser.add_parser(self.name, description=description, help="Train a model.")
subparser.add_argument(
"param_path", type=str, help="path to parameter file describing the model to be trained"
)
subparser.add_argument(
"-s",
"--serialization-dir",
required=True,
type=str,
help="directory in which to save the model and its logs",
)
subparser.add_argument(
"-r",
"--recover",
action="store_true",
default=False,
help="recover training from the state in serialization_dir",
)
subparser.add_argument(
"-f",
"--force",
action="store_true",
required=False,
help="overwrite the output directory if it exists",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.add_argument(
"--node-rank", type=int, default=0, help="rank of this node in the distributed setup"
)
subparser.add_argument(
"--dry-run",
action="store_true",
help=(
"do not train a model, but create a vocabulary, show dataset statistics and "
"other training information"
),
)
subparser.add_argument(
"--file-friendly-logging",
action="store_true",
default=False,
help="outputs tqdm status on separate lines and slows tqdm refresh rate",
)
subparser.set_defaults(func=train_model_from_args)
return subparser
def train_model_from_args(args: argparse.Namespace):
"""
Just converts from an `argparse.Namespace` object to string paths.
"""
train_model_from_file(
parameter_filename=args.param_path,
serialization_dir=args.serialization_dir,
overrides=args.overrides,
recover=args.recover,
force=args.force,
node_rank=args.node_rank,
include_package=args.include_package,
dry_run=args.dry_run,
file_friendly_logging=args.file_friendly_logging,
)
def train_model_from_file(
parameter_filename: Union[str, PathLike],
serialization_dir: Union[str, PathLike],
overrides: Union[str, Dict[str, Any]] = "",
recover: bool = False,
force: bool = False,
node_rank: int = 0,
include_package: List[str] = None,
dry_run: bool = False,
file_friendly_logging: bool = False,
) -> Optional[Model]:
"""
A wrapper around [`train_model`](#train_model) which loads the params from a file.
# Parameters
parameter_filename : `str`
A json parameter file specifying an AllenNLP experiment.
serialization_dir : `str`
The directory in which to save results and logs. We just pass this along to
[`train_model`](#train_model).
overrides : `Union[str, Dict[str, Any]]`, optional (default = `""`)
A JSON string or a dict that we will use to override values in the input parameter file.
recover : `bool`, optional (default=`False`)
If `True`, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see `Model.from_archive`.
force : `bool`, optional (default=`False`)
If `True`, we will overwrite the serialization directory if it already exists.
node_rank : `int`, optional
Rank of the current node in distributed training
include_package : `str`, optional
In distributed mode, extra packages mentioned will be imported in trainer workers.
dry_run : `bool`, optional (default=`False`)
Do not train a model, but create a vocabulary, show dataset statistics and other training
information.
file_friendly_logging : `bool`, optional (default=`False`)
If `True`, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
# Returns
best_model : `Optional[Model]`
The model with the best epoch weights or `None` if in dry run.
"""
# Load the experiment config from a file and pass it to `train_model`.
params = Params.from_file(parameter_filename, overrides)
return train_model(
params=params,
serialization_dir=serialization_dir,
recover=recover,
force=force,
node_rank=node_rank,
include_package=include_package,
dry_run=dry_run,
file_friendly_logging=file_friendly_logging,
)
def train_model(
params: Params,
serialization_dir: Union[str, PathLike],
recover: bool = False,
force: bool = False,
node_rank: int = 0,
include_package: List[str] = None,
dry_run: bool = False,
file_friendly_logging: bool = False,
) -> Optional[Model]:
"""
Trains the model specified in the given [`Params`](../common/params.md#params) object, using the data
and training parameters also specified in that object, and saves the results in `serialization_dir`.
# Parameters
params : `Params`
A parameter object specifying an AllenNLP Experiment.
serialization_dir : `str`
The directory in which to save results and logs.
recover : `bool`, optional (default=`False`)
If `True`, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see `Model.from_archive`.
force : `bool`, optional (default=`False`)
If `True`, we will overwrite the serialization directory if it already exists.
node_rank : `int`, optional
Rank of the current node in distributed training
include_package : `List[str]`, optional
In distributed mode, extra packages mentioned will be imported in trainer workers.
dry_run : `bool`, optional (default=`False`)
Do not train a model, but create a vocabulary, show dataset statistics and other training
information.
file_friendly_logging : `bool`, optional (default=`False`)
If `True`, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
# Returns
best_model : `Optional[Model]`
The model with the best epoch weights or `None` if in dry run.
"""
common_logging.FILE_FRIENDLY_LOGGING = file_friendly_logging
training_util.create_serialization_dir(params, serialization_dir, recover, force)
params.to_file(os.path.join(serialization_dir, CONFIG_NAME))
include_in_archive = params.pop("include_in_archive", None)
verify_include_in_archive(include_in_archive)
distributed_params = params.params.pop("distributed", None)
# If distributed isn't in the config and the config contains strictly
# one cuda device, we just run a single training process.
if distributed_params is None:
model = _train_worker(
process_rank=0,
params=params,
serialization_dir=serialization_dir,
include_package=include_package,
dry_run=dry_run,
file_friendly_logging=file_friendly_logging,
)
if not dry_run:
archive_model(serialization_dir, include_in_archive=include_in_archive)
return model
# Otherwise, we are running multiple processes for training.
else:
common_logging.prepare_global_logging(
serialization_dir,
rank=0,
world_size=1,
)
# We are careful here so that we can raise a good error if someone
# passed the wrong thing - cuda_devices are required.
device_ids = distributed_params.pop("cuda_devices", None)
multi_device = isinstance(device_ids, list) and len(device_ids) > 1
num_nodes = distributed_params.pop("num_nodes", 1)
if not (multi_device or num_nodes > 1):
raise ConfigurationError(
"Multiple cuda devices/nodes need to be configured to run distributed training."
)
check_for_gpu(device_ids)
master_addr = distributed_params.pop("master_address", "127.0.0.1")
if master_addr in ("127.0.0.1", "0.0.0.0", "localhost"):
# If running locally, we can automatically find an open port if one is not specified.
master_port = (
distributed_params.pop("master_port", None) or common_util.find_open_port()
)
else:
# Otherwise we require that the port be specified.
master_port = distributed_params.pop("master_port")
num_procs = len(device_ids)
world_size = num_nodes * num_procs
# Creating `Vocabulary` objects from workers could be problematic since
# the data loaders in each worker will yield only `rank` specific
# instances. Hence it is safe to construct the vocabulary and write it
# to disk before initializing the distributed context. The workers will
# load the vocabulary from the path specified.
vocab_dir = os.path.join(serialization_dir, "vocabulary")
if recover:
vocab = Vocabulary.from_files(vocab_dir)
else:
vocab = training_util.make_vocab_from_params(
params.duplicate(), serialization_dir, print_statistics=dry_run
)
params["vocabulary"] = {
"type": "from_files",
"directory": vocab_dir,
"padding_token": vocab._padding_token,
"oov_token": vocab._oov_token,
}
logging.info(
"Switching to distributed training mode since multiple GPUs are configured | "
f"Master is at: {master_addr}:{master_port} | Rank of this node: {node_rank} | "
f"Number of workers in this node: {num_procs} | Number of nodes: {num_nodes} | "
f"World size: {world_size}"
)
mp.spawn(
_train_worker,
args=(
params.duplicate(),
serialization_dir,
include_package,
dry_run,
node_rank,
master_addr,
master_port,
world_size,
device_ids,
file_friendly_logging,
include_in_archive,
),
nprocs=num_procs,
)
if dry_run:
return None
else:
archive_model(serialization_dir, include_in_archive=include_in_archive)
model = Model.load(params, serialization_dir)
return model
def _train_worker(
process_rank: int,
params: Params,
serialization_dir: Union[str, PathLike],
include_package: List[str] = None,
dry_run: bool = False,
node_rank: int = 0,
master_addr: str = "127.0.0.1",
master_port: int = 29500,
world_size: int = 1,
distributed_device_ids: List[int] = None,
file_friendly_logging: bool = False,
include_in_archive: List[str] = None,
) -> Optional[Model]:
"""
Helper to train the configured model/experiment. In distributed mode, this is spawned as a
worker process. In a single GPU experiment, this returns the `Model` object and in distributed
training, nothing is returned.
# Parameters
process_rank : `int`
The process index that is initialized using the GPU device id.
params : `Params`
A parameter object specifying an AllenNLP Experiment.
serialization_dir : `str`
The directory in which to save results and logs.
include_package : `List[str]`, optional
In distributed mode, since this function would have been spawned as a separate process,
the extra imports need to be done again. NOTE: This does not have any effect in single
GPU training.
dry_run : `bool`, optional (default=`False`)
Do not train a model, but create a vocabulary, show dataset statistics and other training
information.
node_rank : `int`, optional
Rank of the node.
master_addr : `str`, optional (default=`"127.0.0.1"`)
Address of the master node for distributed training.
master_port : `str`, optional (default=`"29500"`)
Port of the master node for distributed training.
world_size : `int`, optional
The number of processes involved in distributed training.
distributed_device_ids: `List[str]`, optional
IDs of the devices used involved in distributed training.
file_friendly_logging : `bool`, optional (default=`False`)
If `True`, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
include_in_archive : `List[str]`, optional
Paths relative to `serialization_dir` that should be archived in addition to the default ones.
# Returns
best_model : `Optional[Model]`
The model with the best epoch weights or `None` if in distributed training or in dry run.
"""
common_logging.FILE_FRIENDLY_LOGGING = file_friendly_logging
common_logging.prepare_global_logging(
serialization_dir,
rank=process_rank,
world_size=world_size,
)
common_util.prepare_environment(params)
distributed = world_size > 1
master = process_rank == 0
include_package = include_package or []
if distributed:
assert distributed_device_ids is not None
# Since the worker is spawned and not forked, the extra imports need to be done again.
# Both the ones from the plugins and the ones from `include_package`.
import_plugins()
for package_name in include_package:
common_util.import_module_and_submodules(package_name)
num_procs_per_node = len(distributed_device_ids)
# The Unique identifier of the worker process among all the processes in the
# distributed training group is computed here. This is used while initializing
# the process group using `init_process_group`
global_rank = node_rank * num_procs_per_node + process_rank
# Number of processes per node is useful to know if a process
# is a master in the local node(node in which it is running)
os.environ["ALLENNLP_PROCS_PER_NODE"] = str(num_procs_per_node)
# In distributed training, the configured device is always going to be a list.
# The corresponding gpu id for the particular worker is obtained by picking the id
# from the device list with the rank as index
gpu_id = distributed_device_ids[process_rank] # type: ignore
# Till now, "cuda_device" might not be set in the trainer params.
# But a worker trainer needs to only know about its specific GPU id.
params["trainer"]["cuda_device"] = gpu_id
params["trainer"]["world_size"] = world_size
params["trainer"]["distributed"] = True
if gpu_id >= 0:
torch.cuda.set_device(int(gpu_id))
dist.init_process_group(
backend="nccl",
init_method=f"tcp://{master_addr}:{master_port}",
world_size=world_size,
rank=global_rank,
)
else:
dist.init_process_group(
backend="gloo",
init_method=f"tcp://{master_addr}:{master_port}",
world_size=world_size,
rank=global_rank,
)
logging.info(
f"Process group of world size {world_size} initialized "
f"for distributed training in worker {global_rank}"
)
train_loop = TrainModel.from_params(
params=params,
serialization_dir=serialization_dir,
local_rank=process_rank,
)
if dry_run:
return None
try:
if distributed: # let the setup get ready for all the workers
dist.barrier()
metrics = train_loop.run()
except KeyboardInterrupt:
# if we have completed an epoch, try to create a model archive.
if master and os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info(
"Training interrupted by the user. Attempting to create "
"a model archive using the current best epoch weights."
)
archive_model(serialization_dir, include_in_archive=include_in_archive)
raise
if master:
train_loop.finish(metrics)
if not distributed:
return train_loop.model
return None
class TrainModel(Registrable):
"""
This class exists so that we can easily read a configuration file with the `allennlp train`
command. The basic logic is that we call `train_loop =
TrainModel.from_params(params_from_config_file)`, then `train_loop.run()`. This class performs
very little logic, pushing most of it to the `Trainer` that has a `train()` method. The
point here is to construct all of the dependencies for the `Trainer` in a way that we can do
it using `from_params()`, while having all of those dependencies transparently documented and
not hidden in calls to `params.pop()`. If you are writing your own training loop, you almost
certainly should not use this class, but you might look at the code for this class to see what
we do, to make writing your training loop easier.
In particular, if you are tempted to call the `__init__` method of this class, you are probably
doing something unnecessary. Literally all we do after `__init__` is call `trainer.train()`. You
can do that yourself, if you've constructed a `Trainer` already. What this class gives you is a
way to construct the `Trainer` by means of a config file. The actual constructor that we use
with `from_params` in this class is `from_partial_objects`. See that method for a description
of all of the allowed top-level keys in a configuration file used with `allennlp train`.
"""
default_implementation = "default"
"""
The default implementation is registered as 'default'.
"""
def __init__(
self,
serialization_dir: str,
model: Model,
trainer: Trainer,
evaluation_data_loader: DataLoader = None,
evaluate_on_test: bool = False,
batch_weight_key: str = "",
) -> None:
self.serialization_dir = serialization_dir
self.model = model
self.trainer = trainer
self.evaluation_data_loader = evaluation_data_loader
self.evaluate_on_test = evaluate_on_test
self.batch_weight_key = batch_weight_key
def run(self) -> Dict[str, Any]:
return self.trainer.train()
def finish(self, metrics: Dict[str, Any]):
if self.evaluation_data_loader is not None and self.evaluate_on_test:
logger.info("The model will be evaluated using the best epoch weights.")
test_metrics = training_util.evaluate(
self.model,
self.evaluation_data_loader,
cuda_device=self.trainer.cuda_device,
batch_weight_key=self.batch_weight_key,
)
for key, value in test_metrics.items():
metrics["test_" + key] = value
elif self.evaluation_data_loader is not None:
logger.info(
"To evaluate on the test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command."
)
common_util.dump_metrics(
os.path.join(self.serialization_dir, "metrics.json"), metrics, log=True
)
@classmethod
def from_partial_objects(
cls,
serialization_dir: str,
local_rank: int,
dataset_reader: DatasetReader,
train_data_path: str,
model: Lazy[Model],
data_loader: Lazy[DataLoader],
trainer: Lazy[Trainer],
vocabulary: Lazy[Vocabulary] = Lazy(Vocabulary),
datasets_for_vocab_creation: List[str] = None,
validation_dataset_reader: DatasetReader = None,
validation_data_path: str = None,
validation_data_loader: Lazy[DataLoader] = None,
test_data_path: str = None,
evaluate_on_test: bool = False,
batch_weight_key: str = "",
) -> "TrainModel":
"""
This method is intended for use with our `FromParams` logic, to construct a `TrainModel`
object from a config file passed to the `allennlp train` command. The arguments to this
method are the allowed top-level keys in a configuration file (except for the first three,
which are obtained separately).
You *could* use this outside of our `FromParams` logic if you really want to, but there
might be easier ways to accomplish your goal than instantiating `Lazy` objects. If you are
writing your own training loop, we recommend that you look at the implementation of this
method for inspiration and possibly some utility functions you can call, but you very likely
should not use this method directly.
The `Lazy` type annotations here are a mechanism for building dependencies to an object
sequentially - the `TrainModel` object needs data, a model, and a trainer, but the model
needs to see the data before it's constructed (to create a vocabulary) and the trainer needs
the data and the model before it's constructed. Objects that have sequential dependencies
like this are labeled as `Lazy` in their type annotations, and we pass the missing
dependencies when we call their `construct()` method, which you can see in the code below.
# Parameters
serialization_dir: `str`
The directory where logs and model archives will be saved.
In a typical AllenNLP configuration file, this parameter does not get an entry as a
top-level key, it gets passed in separately.
local_rank: `int`
The process index that is initialized using the GPU device id.
In a typical AllenNLP configuration file, this parameter does not get an entry as a
top-level key, it gets passed in separately.
dataset_reader: `DatasetReader`
The `DatasetReader` that will be used for training and (by default) for validation.
train_data_path: `str`
The file (or directory) that will be passed to `dataset_reader.read()` to construct the
training data.
model: `Lazy[Model]`
The model that we will train. This is lazy because it depends on the `Vocabulary`;
after constructing the vocabulary we call `model.construct(vocab=vocabulary)`.
data_loader: `Lazy[DataLoader]`
The data_loader we use to batch instances from the dataset reader at training and (by
default) validation time. This is lazy because it takes a dataset in it's constructor.
trainer: `Lazy[Trainer]`
The `Trainer` that actually implements the training loop. This is a lazy object because
it depends on the model that's going to be trained.
vocabulary: `Lazy[Vocabulary]`, optional (default=`Lazy(Vocabulary)`)
The `Vocabulary` that we will use to convert strings in the data to integer ids (and
possibly set sizes of embedding matrices in the `Model`). By default we construct the
vocabulary from the instances that we read.
datasets_for_vocab_creation: `List[str]`, optional (default=`None`)
If you pass in more than one dataset but don't want to use all of them to construct a
vocabulary, you can pass in this key to limit it. Valid entries in the list are
"train", "validation" and "test".
validation_dataset_reader: `DatasetReader`, optional (default=`None`)
If given, we will use this dataset reader for the validation data instead of
`dataset_reader`.
validation_data_path: `str`, optional (default=`None`)
If given, we will use this data for computing validation metrics and early stopping.
validation_data_loader: `Lazy[DataLoader]`, optional (default=`None`)
If given, the data_loader we use to batch instances from the dataset reader at
validation and test time. This is lazy because it takes a dataset in it's constructor.
test_data_path: `str`, optional (default=`None`)
If given, we will use this as test data. This makes it available for vocab creation by
default, but nothing else.
evaluate_on_test: `bool`, optional (default=`False`)
If given, we will evaluate the final model on this data at the end of training. Note
that we do not recommend using this for actual test data in every-day experimentation;
you should only very rarely evaluate your model on actual test data.
batch_weight_key: `str`, optional (default=`""`)
The name of metric used to weight the loss on a per-batch basis. This is only used
during evaluation on final test data, if you've specified `evaluate_on_test=True`.
"""
datasets = training_util.read_all_datasets(
train_data_path=train_data_path,
dataset_reader=dataset_reader,
validation_dataset_reader=validation_dataset_reader,
validation_data_path=validation_data_path,
test_data_path=test_data_path,
)
if datasets_for_vocab_creation:
for key in datasets_for_vocab_creation:
if key not in datasets:
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {key}")
logger.info(
"From dataset instances, %s will be considered for vocabulary creation.",
", ".join(datasets_for_vocab_creation),
)
instance_generator = (
instance
for key, dataset in datasets.items()
if datasets_for_vocab_creation is None or key in datasets_for_vocab_creation
for instance in dataset
)
vocabulary_ = vocabulary.construct(instances=instance_generator)
model_ = model.construct(vocab=vocabulary_, serialization_dir=serialization_dir)
# Initializing the model can have side effect of expanding the vocabulary.
# Save the vocab only in the master. In the degenerate non-distributed
# case, we're trivially the master. In the distributed case this is safe
# to do without worrying about race conditions since saving and loading
# the vocab involves acquiring a file lock.
if local_rank == 0:
vocabulary_path = os.path.join(serialization_dir, "vocabulary")
vocabulary_.save_to_files(vocabulary_path)
for dataset in datasets.values():
dataset.index_with(model_.vocab)
data_loader_ = data_loader.construct(dataset=datasets["train"])
validation_data = datasets.get("validation")
validation_data_loader_: Optional[DataLoader] = None
if validation_data is not None:
if validation_data_loader is None:
validation_data_loader_ = data_loader.construct(dataset=validation_data)
if getattr(validation_data_loader_, "_batches_per_epoch", None) is not None:
warnings.warn(
"Using 'data_loader' params to construct validation data loader since "
"'validation_data_loader' params not specified, but you have "
"'data_loader.batches_per_epoch' set which may result in different "
"validation datasets for each epoch.",
UserWarning,
)
else:
validation_data_loader_ = validation_data_loader.construct(dataset=validation_data)
test_data = datasets.get("test")
test_data_loader: Optional[DataLoader] = None
if test_data is not None:
if validation_data_loader is None:
test_data_loader = data_loader.construct(dataset=test_data)
else:
test_data_loader = validation_data_loader.construct(dataset=test_data)
# We don't need to pass serialization_dir and local_rank here, because they will have been
# passed through the trainer by from_params already, because they were keyword arguments to
# construct this class in the first place.
trainer_ = trainer.construct(
model=model_,
data_loader=data_loader_,
validation_data_loader=validation_data_loader_,
)
assert trainer_ is not None
return cls(
serialization_dir=serialization_dir,
model=model_,
trainer=trainer_,
evaluation_data_loader=test_data_loader,
evaluate_on_test=evaluate_on_test,
batch_weight_key=batch_weight_key,
)
TrainModel.register("default", constructor="from_partial_objects")(TrainModel)
| allennlp-master | allennlp/commands/train.py |
"""
The `evaluate` subcommand can be used to
evaluate a trained model against a dataset
and report any metrics calculated by the model.
"""
import argparse
import json
import logging
from typing import Any, Dict
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common import logging as common_logging
from allennlp.common.util import prepare_environment
from allennlp.data import DataLoader
from allennlp.models.archival import load_archive
from allennlp.training.util import evaluate
logger = logging.getLogger(__name__)
@Subcommand.register("evaluate")
class Evaluate(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Evaluate the specified model + dataset"""
subparser = parser.add_parser(
self.name, description=description, help="Evaluate the specified model + dataset."
)
subparser.add_argument("archive_file", type=str, help="path to an archived trained model")
subparser.add_argument(
"input_file", type=str, help="path to the file containing the evaluation data"
)
subparser.add_argument(
"--output-file", type=str, help="optional path to write the metrics to as JSON"
)
subparser.add_argument(
"--predictions-output-file",
type=str,
help="optional path to write the predictions to as JSON lines",
)
subparser.add_argument(
"--weights-file", type=str, help="a path that overrides which weights file to use"
)
cuda_device = subparser.add_mutually_exclusive_group(required=False)
cuda_device.add_argument(
"--cuda-device", type=int, default=-1, help="id of GPU to use (if any)"
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.add_argument(
"--batch-size", type=int, help="If non-empty, the batch size to use during evaluation."
)
subparser.add_argument(
"--batch-weight-key",
type=str,
default="",
help="If non-empty, name of metric used to weight the loss on a per-batch basis.",
)
subparser.add_argument(
"--extend-vocab",
action="store_true",
default=False,
help="if specified, we will use the instances in your new dataset to "
"extend your vocabulary. If pretrained-file was used to initialize "
"embedding layers, you may also need to pass --embedding-sources-mapping.",
)
subparser.add_argument(
"--embedding-sources-mapping",
type=str,
default="",
help="a JSON dict defining mapping from embedding module path to embedding "
"pretrained-file used during training. If not passed, and embedding needs to be "
"extended, we will try to use the original file paths used during training. If "
"they are not available we will use random vectors for embedding extension.",
)
subparser.add_argument(
"--file-friendly-logging",
action="store_true",
default=False,
help="outputs tqdm status on separate lines and slows tqdm refresh rate",
)
subparser.set_defaults(func=evaluate_from_args)
return subparser
def evaluate_from_args(args: argparse.Namespace) -> Dict[str, Any]:
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
# Disable some of the more verbose logging statements
logging.getLogger("allennlp.common.params").disabled = True
logging.getLogger("allennlp.nn.initializers").disabled = True
logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)
# Load from archive
archive = load_archive(
args.archive_file,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
config = archive.config
prepare_environment(config)
model = archive.model
model.eval()
# Load the evaluation data
dataset_reader = archive.validation_dataset_reader
evaluation_data_path = args.input_file
logger.info("Reading evaluation data from %s", evaluation_data_path)
instances = dataset_reader.read(evaluation_data_path)
embedding_sources = (
json.loads(args.embedding_sources_mapping) if args.embedding_sources_mapping else {}
)
if args.extend_vocab:
logger.info("Vocabulary is being extended with test instances.")
model.vocab.extend_from_instances(instances=instances)
model.extend_embedder_vocab(embedding_sources)
instances.index_with(model.vocab)
data_loader_params = config.pop("validation_data_loader", None)
if data_loader_params is None:
data_loader_params = config.pop("data_loader")
if args.batch_size:
data_loader_params["batch_size"] = args.batch_size
data_loader = DataLoader.from_params(dataset=instances, params=data_loader_params)
metrics = evaluate(
model,
data_loader,
args.cuda_device,
args.batch_weight_key,
output_file=args.output_file,
predictions_output_file=args.predictions_output_file,
)
logger.info("Finished evaluating.")
return metrics
| allennlp-master | allennlp/commands/evaluate.py |
"""
Base class for subcommands under `allennlp.run`.
"""
import argparse
from typing import Callable, Dict, Optional, Type, TypeVar
from overrides import overrides
from allennlp.common import Registrable
T = TypeVar("T", bound="Subcommand")
class Subcommand(Registrable):
"""
An abstract class representing subcommands for allennlp.run.
If you wanted to (for example) create your own custom `special-evaluate` command to use like
`allennlp special-evaluate ...`
you would create a `Subcommand` subclass and then pass it as an override to
[`main`](#main).
"""
requires_plugins: bool = True
"""
If `True`, the sub-command will trigger a call to `import_plugins()` (except for custom
subcommands which come from plugins, since plugins will already have been imported by the
time the subcommand is discovered), and will also have an additional `--include-package` flag.
"""
_reverse_registry: Dict[Type, str] = {}
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
raise NotImplementedError
@classmethod
@overrides
def register(
cls: Type[T], name: str, constructor: Optional[str] = None, exist_ok: bool = False
) -> Callable[[Type[T]], Type[T]]:
super_register_fn = super().register(name, constructor=constructor, exist_ok=exist_ok)
def add_name_to_reverse_registry(subclass: Type[T]) -> Type[T]:
subclass = super_register_fn(subclass)
# Don't need to check `exist_ok`, as it's done by super.
# Also, don't need to delete previous entries if overridden, they can just stay there.
cls._reverse_registry[subclass] = name
return subclass
return add_name_to_reverse_registry
@property
def name(self) -> str:
return self._reverse_registry[self.__class__]
| allennlp-master | allennlp/commands/subcommand.py |
"""
The `test-install` subcommand provides a programmatic way to verify
that AllenNLP has been successfully installed.
"""
import argparse
import logging
import pathlib
from overrides import overrides
import torch
import allennlp
from allennlp.common.util import import_module_and_submodules
from allennlp.commands.subcommand import Subcommand
from allennlp.version import VERSION
logger = logging.getLogger(__name__)
@Subcommand.register("test-install")
class TestInstall(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Test that AllenNLP is installed correctly."""
subparser = parser.add_parser(
self.name, description=description, help="Test AllenNLP installation."
)
subparser.set_defaults(func=_run_test)
return subparser
def _get_module_root():
return pathlib.Path(allennlp.__file__).parent
def _run_test(args: argparse.Namespace):
# Make sure we can actually import the main modules without errors.
import_module_and_submodules("allennlp.common")
import_module_and_submodules("allennlp.data")
import_module_and_submodules("allennlp.interpret")
import_module_and_submodules("allennlp.models")
import_module_and_submodules("allennlp.modules")
import_module_and_submodules("allennlp.nn")
import_module_and_submodules("allennlp.predictors")
import_module_and_submodules("allennlp.training")
logger.info("AllenNLP version %s installed to %s", VERSION, _get_module_root())
logger.info("Cuda devices available: %s", torch.cuda.device_count())
| allennlp-master | allennlp/commands/test_install.py |
"""
The `find-lr` subcommand can be used to find a good learning rate for a model.
It requires a configuration file and a directory in
which to write the results.
"""
import argparse
import logging
import math
import os
import re
from typing import List, Tuple
import itertools
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common import Params, Tqdm
from allennlp.common import logging as common_logging
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.util import prepare_environment
from allennlp.data import Vocabulary
from allennlp.data import DataLoader
from allennlp.models import Model
from allennlp.training import GradientDescentTrainer, Trainer
from allennlp.training.util import create_serialization_dir, datasets_from_params
logger = logging.getLogger(__name__)
@Subcommand.register("find-lr")
class FindLearningRate(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Find a learning rate range where loss decreases quickly
for the specified model and dataset."""
subparser = parser.add_parser(
self.name, description=description, help="Find a learning rate range."
)
subparser.add_argument(
"param_path", type=str, help="path to parameter file describing the model to be trained"
)
subparser.add_argument(
"-s",
"--serialization-dir",
required=True,
type=str,
help="The directory in which to save results.",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.add_argument(
"--start-lr", type=float, default=1e-5, help="learning rate to start the search"
)
subparser.add_argument(
"--end-lr", type=float, default=10, help="learning rate up to which search is done"
)
subparser.add_argument(
"--num-batches",
type=int,
default=100,
help="number of mini-batches to run learning rate finder",
)
subparser.add_argument(
"--stopping-factor",
type=float,
default=None,
help="stop the search when the current loss exceeds the best loss recorded by "
"multiple of stopping factor",
)
subparser.add_argument(
"--linear",
action="store_true",
help="increase learning rate linearly instead of exponential increase",
)
subparser.add_argument(
"-f",
"--force",
action="store_true",
required=False,
help="overwrite the output directory if it exists",
)
subparser.add_argument(
"--file-friendly-logging",
action="store_true",
default=False,
help="outputs tqdm status on separate lines and slows tqdm refresh rate",
)
subparser.set_defaults(func=find_learning_rate_from_args)
return subparser
def find_learning_rate_from_args(args: argparse.Namespace) -> None:
"""
Start learning rate finder for given args
"""
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
params = Params.from_file(args.param_path, args.overrides)
find_learning_rate_model(
params,
args.serialization_dir,
start_lr=args.start_lr,
end_lr=args.end_lr,
num_batches=args.num_batches,
linear_steps=args.linear,
stopping_factor=args.stopping_factor,
force=args.force,
)
def find_learning_rate_model(
params: Params,
serialization_dir: str,
start_lr: float = 1e-5,
end_lr: float = 10,
num_batches: int = 100,
linear_steps: bool = False,
stopping_factor: float = None,
force: bool = False,
) -> None:
"""
Runs learning rate search for given `num_batches` and saves the results in ``serialization_dir``
# Parameters
params : `Params`
A parameter object specifying an AllenNLP Experiment.
serialization_dir : `str`
The directory in which to save results.
start_lr : `float`
Learning rate to start the search.
end_lr : `float`
Learning rate upto which search is done.
num_batches : `int`
Number of mini-batches to run Learning rate finder.
linear_steps : `bool`
Increase learning rate linearly if False exponentially.
stopping_factor : `float`
Stop the search when the current loss exceeds the best loss recorded by
multiple of stopping factor. If `None` search proceeds till the `end_lr`
force : `bool`
If True and the serialization directory already exists, everything in it will
be removed prior to finding the learning rate.
"""
create_serialization_dir(params, serialization_dir, recover=False, force=force)
prepare_environment(params)
cuda_device = params.params.get("trainer").get("cuda_device", -1)
check_for_gpu(cuda_device)
distributed_params = params.params.get("distributed")
# See https://github.com/allenai/allennlp/issues/3658
assert not distributed_params, "find-lr is not compatible with DistributedDataParallel."
all_datasets = datasets_from_params(params, serialization_dir=serialization_dir)
datasets_for_vocab_creation = set(params.pop("datasets_for_vocab_creation", all_datasets))
for dataset in datasets_for_vocab_creation:
if dataset not in all_datasets:
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}")
logger.info(
"From dataset instances, %s will be considered for vocabulary creation.",
", ".join(datasets_for_vocab_creation),
)
vocab = Vocabulary.from_params(
params.pop("vocabulary", {}),
instances=(
instance
for key, dataset in all_datasets.items()
for instance in dataset
if key in datasets_for_vocab_creation
),
)
train_data = all_datasets["train"]
train_data.index_with(vocab)
model = Model.from_params(
vocab=vocab, params=params.pop("model"), serialization_dir=serialization_dir
)
data_loader = DataLoader.from_params(dataset=train_data, params=params.pop("data_loader"))
trainer_params = params.pop("trainer")
no_grad_regexes = trainer_params.pop("no_grad", ())
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in no_grad_regexes):
parameter.requires_grad_(False)
trainer_choice = trainer_params.pop("type", "gradient_descent")
if trainer_choice != "gradient_descent":
raise ConfigurationError(
"currently find-learning-rate only works with the GradientDescentTrainer"
)
trainer: GradientDescentTrainer = Trainer.from_params( # type: ignore
model=model,
serialization_dir=serialization_dir,
data_loader=data_loader,
params=trainer_params,
)
logger.info(
f"Starting learning rate search from {start_lr} to {end_lr} in {num_batches} iterations."
)
learning_rates, losses = search_learning_rate(
trainer,
start_lr=start_lr,
end_lr=end_lr,
num_batches=num_batches,
linear_steps=linear_steps,
stopping_factor=stopping_factor,
)
logger.info("Finished learning rate search.")
losses = _smooth(losses, 0.98)
_save_plot(learning_rates, losses, os.path.join(serialization_dir, "lr-losses.png"))
def search_learning_rate(
trainer: GradientDescentTrainer,
start_lr: float = 1e-5,
end_lr: float = 10,
num_batches: int = 100,
linear_steps: bool = False,
stopping_factor: float = None,
) -> Tuple[List[float], List[float]]:
"""
Runs training loop on the model using [`GradientDescentTrainer`](../training/trainer.md#gradientdescenttrainer)
increasing learning rate from `start_lr` to `end_lr` recording the losses.
# Parameters
trainer: `GradientDescentTrainer`
start_lr : `float`
The learning rate to start the search.
end_lr : `float`
The learning rate upto which search is done.
num_batches : `int`
Number of batches to run the learning rate finder.
linear_steps : `bool`
Increase learning rate linearly if False exponentially.
stopping_factor : `float`
Stop the search when the current loss exceeds the best loss recorded by
multiple of stopping factor. If `None` search proceeds till the `end_lr`
# Returns
(learning_rates, losses) : `Tuple[List[float], List[float]]`
Returns list of learning rates and corresponding losses.
Note: The losses are recorded before applying the corresponding learning rate
"""
if num_batches <= 10:
raise ConfigurationError(
"The number of iterations for learning rate finder should be greater than 10."
)
trainer.model.train()
infinite_generator = itertools.cycle(trainer.data_loader)
train_generator_tqdm = Tqdm.tqdm(infinite_generator, total=num_batches)
learning_rates = []
losses = []
best = 1e9
if linear_steps:
lr_update_factor = (end_lr - start_lr) / num_batches
else:
lr_update_factor = (end_lr / start_lr) ** (1.0 / num_batches)
for i, batch in enumerate(train_generator_tqdm):
if linear_steps:
current_lr = start_lr + (lr_update_factor * i)
else:
current_lr = start_lr * (lr_update_factor ** i)
for param_group in trainer.optimizer.param_groups:
param_group["lr"] = current_lr
# Zero gradients.
# NOTE: this is actually more efficient than calling `self.optimizer.zero_grad()`
# because it avoids a read op when the gradients are first updated below.
for p in param_group["params"]:
p.grad = None
loss = trainer.batch_outputs(batch, for_training=True)["loss"]
loss.backward()
loss = loss.detach().cpu().item()
if stopping_factor is not None and (math.isnan(loss) or loss > stopping_factor * best):
logger.info(f"Loss ({loss}) exceeds stopping_factor * lowest recorded loss.")
break
trainer.rescale_gradients()
trainer.optimizer.step()
learning_rates.append(current_lr)
losses.append(loss)
if loss < best and i > 10:
best = loss
if i == num_batches:
break
return learning_rates, losses
def _smooth(values: List[float], beta: float) -> List[float]:
""" Exponential smoothing of values """
avg_value = 0.0
smoothed = []
for i, value in enumerate(values):
avg_value = beta * avg_value + (1 - beta) * value
smoothed.append(avg_value / (1 - beta ** (i + 1)))
return smoothed
def _save_plot(learning_rates: List[float], losses: List[float], save_path: str):
try:
import matplotlib
matplotlib.use("Agg") # noqa
import matplotlib.pyplot as plt
except ModuleNotFoundError as error:
logger.warn(
"To use allennlp find-learning-rate, please install matplotlib: pip install matplotlib>=2.2.3 ."
)
raise error
plt.ylabel("loss")
plt.xlabel("learning rate (log10 scale)")
plt.xscale("log")
plt.plot(learning_rates, losses)
logger.info(f"Saving learning_rate vs loss plot to {save_path}.")
plt.savefig(save_path)
| allennlp-master | allennlp/commands/find_learning_rate.py |
from typing import List
import torch
from torch.nn import ParameterList, Parameter
from allennlp.common.checks import ConfigurationError
from allennlp.nn import util
class ScalarMix(torch.nn.Module):
"""
Computes a parameterised scalar mixture of N tensors, `mixture = gamma * sum(s_k * tensor_k)`
where `s = softmax(w)`, with `w` and `gamma` scalar parameters.
In addition, if `do_layer_norm=True` then apply layer normalization to each tensor
before weighting.
"""
def __init__(
self,
mixture_size: int,
do_layer_norm: bool = False,
initial_scalar_parameters: List[float] = None,
trainable: bool = True,
) -> None:
super().__init__()
self.mixture_size = mixture_size
self.do_layer_norm = do_layer_norm
if initial_scalar_parameters is None:
initial_scalar_parameters = [0.0] * mixture_size
elif len(initial_scalar_parameters) != mixture_size:
raise ConfigurationError(
"Length of initial_scalar_parameters {} differs "
"from mixture_size {}".format(initial_scalar_parameters, mixture_size)
)
self.scalar_parameters = ParameterList(
[
Parameter(
torch.FloatTensor([initial_scalar_parameters[i]]), requires_grad=trainable
)
for i in range(mixture_size)
]
)
self.gamma = Parameter(torch.FloatTensor([1.0]), requires_grad=trainable)
def forward(self, tensors: List[torch.Tensor], mask: torch.BoolTensor = None) -> torch.Tensor:
"""
Compute a weighted average of the `tensors`. The input tensors an be any shape
with at least two dimensions, but must all be the same shape.
When `do_layer_norm=True`, the `mask` is required input. If the `tensors` are
dimensioned `(dim_0, ..., dim_{n-1}, dim_n)`, then the `mask` is dimensioned
`(dim_0, ..., dim_{n-1})`, as in the typical case with `tensors` of shape
`(batch_size, timesteps, dim)` and `mask` of shape `(batch_size, timesteps)`.
When `do_layer_norm=False` the `mask` is ignored.
"""
if len(tensors) != self.mixture_size:
raise ConfigurationError(
"{} tensors were passed, but the module was initialized to "
"mix {} tensors.".format(len(tensors), self.mixture_size)
)
def _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked):
tensor_masked = tensor * broadcast_mask
mean = torch.sum(tensor_masked) / num_elements_not_masked
variance = (
torch.sum(((tensor_masked - mean) * broadcast_mask) ** 2) / num_elements_not_masked
)
return (tensor - mean) / torch.sqrt(variance + util.tiny_value_of_dtype(variance.dtype))
normed_weights = torch.nn.functional.softmax(
torch.cat([parameter for parameter in self.scalar_parameters]), dim=0
)
normed_weights = torch.split(normed_weights, split_size_or_sections=1)
if not self.do_layer_norm:
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(weight * tensor)
return self.gamma * sum(pieces)
else:
assert mask is not None
broadcast_mask = mask.unsqueeze(-1)
input_dim = tensors[0].size(-1)
num_elements_not_masked = torch.sum(mask) * input_dim
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(
weight * _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked)
)
return self.gamma * sum(pieces)
| allennlp-master | allennlp/modules/scalar_mix.py |
"""
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards.
"""
from typing import Optional, Tuple, Union, List
import torch
from torch.nn.utils.rnn import PackedSequence
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.common.checks import ConfigurationError
TensorPair = Tuple[torch.Tensor, torch.Tensor]
class StackedAlternatingLstm(torch.nn.Module):
"""
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards. This implementation is based on the
description in [Deep Semantic Role Labelling - What works and what's next][0].
[0]: https://www.aclweb.org/anthology/P17-1044.pdf
[1]: https://arxiv.org/abs/1512.05287
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
num_layers : `int`, required
The number of stacked LSTMs to use.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
The dropout probability to be used in a dropout scheme as stated in
[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks][1].
use_input_projection_bias : `bool`, optional (default = `True`)
Whether or not to use a bias on the input projection layer. This is mainly here
for backwards compatibility reasons and will be removed (and set to False)
in future releases.
# Returns
output_accumulator : `PackedSequence`
The outputs of the interleaved LSTMs per timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
) -> None:
super().__init__()
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
layers = []
lstm_input_size = input_size
for layer_index in range(num_layers):
go_forward = layer_index % 2 == 0
layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
lstm_input_size = hidden_size
self.add_module("layer_{}".format(layer_index), layer)
layers.append(layer)
self.lstm_layers = layers
def forward(
self, inputs: PackedSequence, initial_state: Optional[TensorPair] = None
) -> Tuple[Union[torch.Tensor, PackedSequence], TensorPair]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
# Returns
output_sequence : `PackedSequence`
The encoded sequence of shape (batch_size, sequence_length, hidden_size)
final_states: `Tuple[torch.Tensor, torch.Tensor]`
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size).
"""
if not initial_state:
hidden_states: List[Optional[TensorPair]] = [None] * len(self.lstm_layers)
elif initial_state[0].size()[0] != len(self.lstm_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
output_sequence = inputs
final_states = []
for i, state in enumerate(hidden_states):
layer = getattr(self, "layer_{}".format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
output_sequence, final_state = layer(output_sequence, state)
final_states.append(final_state)
final_hidden_state, final_cell_state = tuple(
torch.cat(state_list, 0) for state_list in zip(*final_states)
)
return output_sequence, (final_hidden_state, final_cell_state)
| allennlp-master | allennlp/modules/stacked_alternating_lstm.py |
from typing import Tuple, Union, Optional, Callable, Any
import torch
from torch.nn.utils.rnn import pack_padded_sequence, PackedSequence
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, sort_batch_by_length
# We have two types here for the state, because storing the state in something
# which is Iterable (like a tuple, below), is helpful for internal manipulation
# - however, the states are consumed as either Tensors or a Tuple of Tensors, so
# returning them in this format is unhelpful.
RnnState = Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]
RnnStateStorage = Tuple[torch.Tensor, ...]
class _EncoderBase(torch.nn.Module):
"""
This abstract class serves as a base for the 3 `Encoder` abstractions in AllenNLP.
- [`Seq2SeqEncoders`](./seq2seq_encoders/seq2seq_encoder.md)
- [`Seq2VecEncoders`](./seq2vec_encoders/seq2vec_encoder.md)
Additionally, this class provides functionality for sorting sequences by length
so they can be consumed by Pytorch RNN classes, which require their inputs to be
sorted by length. Finally, it also provides optional statefulness to all of it's
subclasses by allowing the caching and retrieving of the hidden states of RNNs.
"""
def __init__(self, stateful: bool = False) -> None:
super().__init__()
self.stateful = stateful
self._states: Optional[RnnStateStorage] = None
def sort_and_run_forward(
self,
module: Callable[
[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState],
],
inputs: torch.Tensor,
mask: torch.BoolTensor,
hidden_state: Optional[RnnState] = None,
):
"""
This function exists because Pytorch RNNs require that their inputs be sorted
before being passed as input. As all of our Seq2xxxEncoders use this functionality,
it is provided in a base class. This method can be called on any module which
takes as input a `PackedSequence` and some `hidden_state`, which can either be a
tuple of tensors or a tensor.
As all of our Seq2xxxEncoders have different return types, we return `sorted`
outputs from the module, which is called directly. Additionally, we return the
indices into the batch dimension required to restore the tensor to it's correct,
unsorted order and the number of valid batch elements (i.e the number of elements
in the batch which are not completely masked). This un-sorting and re-padding
of the module outputs is left to the subclasses because their outputs have different
types and handling them smoothly here is difficult.
# Parameters
module : `Callable[RnnInputs, RnnOutputs]`
A function to run on the inputs, where
`RnnInputs: [PackedSequence, Optional[RnnState]]` and
`RnnOutputs: Tuple[Union[PackedSequence, torch.Tensor], RnnState]`.
In most cases, this is a `torch.nn.Module`.
inputs : `torch.Tensor`, required.
A tensor of shape `(batch_size, sequence_length, embedding_size)` representing
the inputs to the Encoder.
mask : `torch.BoolTensor`, required.
A tensor of shape `(batch_size, sequence_length)`, representing masked and
non-masked elements of the sequence for each element in the batch.
hidden_state : `Optional[RnnState]`, (default = `None`).
A single tensor of shape (num_layers, batch_size, hidden_size) representing the
state of an RNN with or a tuple of
tensors of shapes (num_layers, batch_size, hidden_size) and
(num_layers, batch_size, memory_size), representing the hidden state and memory
state of an LSTM-like RNN.
# Returns
module_output : `Union[torch.Tensor, PackedSequence]`.
A Tensor or PackedSequence representing the output of the Pytorch Module.
The batch size dimension will be equal to `num_valid`, as sequences of zero
length are clipped off before the module is called, as Pytorch cannot handle
zero length sequences.
final_states : `Optional[RnnState]`
A Tensor representing the hidden state of the Pytorch Module. This can either
be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in
the case of a GRU, or a tuple of tensors, such as those required for an LSTM.
restoration_indices : `torch.LongTensor`
A tensor of shape `(batch_size,)`, describing the re-indexing required to transform
the outputs back to their original batch order.
"""
# In some circumstances you may have sequences of zero length. `pack_padded_sequence`
# requires all sequence lengths to be > 0, so remove sequences of zero length before
# calling self._module, then fill with zeros.
# First count how many sequences are empty.
batch_size = mask.size(0)
num_valid = torch.sum(mask[:, 0]).int().item()
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
(
sorted_inputs,
sorted_sequence_lengths,
restoration_indices,
sorting_indices,
) = sort_batch_by_length(inputs, sequence_lengths)
# Now create a PackedSequence with only the non-empty, sorted sequences.
packed_sequence_input = pack_padded_sequence(
sorted_inputs[:num_valid, :, :],
sorted_sequence_lengths[:num_valid].data.tolist(),
batch_first=True,
)
# Prepare the initial states.
if not self.stateful:
if hidden_state is None:
initial_states: Any = hidden_state
elif isinstance(hidden_state, tuple):
initial_states = [
state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous()
for state in hidden_state
]
else:
initial_states = hidden_state.index_select(1, sorting_indices)[
:, :num_valid, :
].contiguous()
else:
initial_states = self._get_initial_states(batch_size, num_valid, sorting_indices)
# Actually call the module on the sorted PackedSequence.
module_output, final_states = module(packed_sequence_input, initial_states)
return module_output, final_states, restoration_indices
def _get_initial_states(
self, batch_size: int, num_valid: int, sorting_indices: torch.LongTensor
) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
# Parameters
batch_size : `int`, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : `int`, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices `torch.LongTensor`, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to `module.forward`, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
# Returns
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns `None`, regardless
of the type of the `Module`.
Otherwise, for LSTMs, it returns a tuple of `torch.Tensors` with shape
`(num_layers, num_valid, state_size)` and `(num_layers, num_valid, memory_size)`
respectively, or for GRUs, it returns a single `torch.Tensor` of shape
`(num_layers, num_valid, state_size)`.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0), num_states_to_concat, state.size(2))
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :].contiguous()
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [
state.index_select(1, sorting_indices) for state in correctly_shaped_states
]
return tuple(state[:, :num_valid, :].contiguous() for state in sorted_states)
def _update_states(
self, final_states: RnnStateStorage, restoration_indices: torch.LongTensor
) -> None:
"""
After the RNN has run forward, the states need to be updated.
This method just sets the state to the updated new state, performing
several pieces of book-keeping along the way - namely, unsorting the
states and ensuring that the states of completely padded sequences are
not updated. Finally, it also detaches the state variable from the
computational graph, such that the graph can be garbage collected after
each batch iteration.
# Parameters
final_states : `RnnStateStorage`, required.
The hidden states returned as output from the RNN.
restoration_indices : `torch.LongTensor`, required.
The indices that invert the sorting used in `sort_and_run_forward`
to order the states with respect to the lengths of the sequences in
the batch.
"""
# TODO(Mark): seems weird to sort here, but append zeros in the subclasses.
# which way around is best?
new_unsorted_states = [state.index_select(1, restoration_indices) for state in final_states]
if self._states is None:
# We don't already have states, so just set the
# ones we receive to be the current state.
self._states = tuple(state.data for state in new_unsorted_states)
else:
# Now we've sorted the states back so that they correspond to the original
# indices, we need to figure out what states we need to update, because if we
# didn't use a state for a particular row, we want to preserve its state.
# Thankfully, the rows which are all zero in the state correspond exactly
# to those which aren't used, so we create masks of shape (new_batch_size,),
# denoting which states were used in the RNN computation.
current_state_batch_size = self._states[0].size(1)
new_state_batch_size = final_states[0].size(1)
# Masks for the unused states of shape (1, new_batch_size, 1)
used_new_rows_mask = [
(state[0, :, :].sum(-1) != 0.0).float().view(1, new_state_batch_size, 1)
for state in new_unsorted_states
]
new_states = []
if current_state_batch_size > new_state_batch_size:
# The new state is smaller than the old one,
# so just update the indices which we used.
for old_state, new_state, used_mask in zip(
self._states, new_unsorted_states, used_new_rows_mask
):
# zero out all rows in the previous state
# which _were_ used in the current state.
masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
old_state[:, :new_state_batch_size, :] = new_state + masked_old_state
new_states.append(old_state.detach())
else:
# The states are the same size, so we just have to
# deal with the possibility that some rows weren't used.
new_states = []
for old_state, new_state, used_mask in zip(
self._states, new_unsorted_states, used_new_rows_mask
):
# zero out all rows which _were_ used in the current state.
masked_old_state = old_state * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
new_state += masked_old_state
new_states.append(new_state.detach())
# It looks like there should be another case handled here - when
# the current_state_batch_size < new_state_batch_size. However,
# this never happens, because the states themeselves are mutated
# by appending zeros when calling _get_inital_states, meaning that
# the new states are either of equal size, or smaller, in the case
# that there are some unused elements (zero-length) for the RNN computation.
self._states = tuple(new_states)
def reset_states(self, mask: torch.BoolTensor = None) -> None:
"""
Resets the internal states of a stateful encoder.
# Parameters
mask : `torch.BoolTensor`, optional.
A tensor of shape `(batch_size,)` indicating which states should
be reset. If not provided, all states will be reset.
"""
if mask is None:
self._states = None
else:
# state has shape (num_layers, batch_size, hidden_size). We reshape
# mask to have shape (1, batch_size, 1) so that operations
# broadcast properly.
mask_batch_size = mask.size(0)
mask = mask.view(1, mask_batch_size, 1)
new_states = []
assert self._states is not None
for old_state in self._states:
old_state_batch_size = old_state.size(1)
if old_state_batch_size != mask_batch_size:
raise ValueError(
f"Trying to reset states using mask with incorrect batch size. "
f"Expected batch size: {old_state_batch_size}. "
f"Provided batch size: {mask_batch_size}."
)
new_state = ~mask * old_state
new_states.append(new_state.detach())
self._states = tuple(new_states)
| allennlp-master | allennlp/modules/encoder_base.py |
"""
A maxout neural network.
"""
from typing import Sequence, Union
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.registrable import FromParams
class Maxout(torch.nn.Module, FromParams):
"""
This `Module` is a maxout neural network.
# Parameters
input_dim : `int`, required
The dimensionality of the input. We assume the input has shape `(batch_size, input_dim)`.
num_layers : `int`, required
The number of maxout layers to apply to the input.
output_dims : `Union[int, Sequence[int]]`, required
The output dimension of each of the maxout layers. If this is a single `int`, we use
it for all maxout layers. If it is a `Sequence[int]`, `len(output_dims)` must be
`num_layers`.
pool_sizes : `Union[int, Sequence[int]]`, required
The size of max-pools. If this is a single `int`, we use
it for all maxout layers. If it is a `Sequence[int]`, `len(pool_sizes)` must be
`num_layers`.
dropout : `Union[float, Sequence[float]]`, optional (default = `0.0`)
If given, we will apply this amount of dropout after each layer. Semantics of `float`
versus `Sequence[float]` is the same as with other parameters.
"""
def __init__(
self,
input_dim: int,
num_layers: int,
output_dims: Union[int, Sequence[int]],
pool_sizes: Union[int, Sequence[int]],
dropout: Union[float, Sequence[float]] = 0.0,
) -> None:
super().__init__()
if not isinstance(output_dims, list):
output_dims = [output_dims] * num_layers # type: ignore
if not isinstance(pool_sizes, list):
pool_sizes = [pool_sizes] * num_layers # type: ignore
if not isinstance(dropout, list):
dropout = [dropout] * num_layers # type: ignore
if len(output_dims) != num_layers:
raise ConfigurationError(
"len(output_dims) (%d) != num_layers (%d)" % (len(output_dims), num_layers)
)
if len(pool_sizes) != num_layers:
raise ConfigurationError(
"len(pool_sizes) (%d) != num_layers (%d)" % (len(pool_sizes), num_layers)
)
if len(dropout) != num_layers:
raise ConfigurationError(
"len(dropout) (%d) != num_layers (%d)" % (len(dropout), num_layers)
)
self._pool_sizes = pool_sizes
input_dims = [input_dim] + output_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim, pool_size in zip(
input_dims, output_dims, pool_sizes
):
linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim * pool_size))
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dims = output_dims
self._output_dim = output_dims[-1]
self._input_dim = input_dim
def get_output_dim(self):
return self._output_dim
def get_input_dim(self):
return self._input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
output = inputs
for layer, layer_output_dim, dropout, pool_size in zip(
self._linear_layers, self._output_dims, self._dropout, self._pool_sizes
):
affine_output = layer(output)
# Compute and apply the proper shape for the max.
shape = list(inputs.size())
shape[-1] = layer_output_dim
shape.append(pool_size)
maxed_output = torch.max(affine_output.view(*shape), dim=-1)[0]
dropped_output = dropout(maxed_output)
output = dropped_output
return output
| allennlp-master | allennlp/modules/maxout.py |
import json
import logging
import warnings
from typing import Any, Dict, List, Union
import numpy
import torch
from overrides import overrides
from torch.nn.modules import Dropout
from allennlp.common import FromParams
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.util import lazy_groups_of
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers.elmo_indexer import (
ELMoCharacterMapper,
ELMoTokenCharactersIndexer,
)
from allennlp.modules.elmo_lstm import ElmoLstm
from allennlp.modules.highway import Highway
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.nn.util import (
add_sentence_boundary_token_ids,
get_device_of,
remove_sentence_boundaries,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
logger = logging.getLogger(__name__)
class Elmo(torch.nn.Module, FromParams):
"""
Compute ELMo representations using a pre-trained bidirectional language model.
See "Deep contextualized word representations", Peters et al. for details.
This module takes character id input and computes `num_output_representations` different layers
of ELMo representations. Typically `num_output_representations` is 1 or 2. For example, in
the case of the SRL model in the above paper, `num_output_representations=1` where ELMo was included at
the input token representation layer. In the case of the SQuAD model, `num_output_representations=2`
as ELMo was also included at the GRU output layer.
In the implementation below, we learn separate scalar weights for each output layer,
but only run the biLM once on each input sequence for efficiency.
# Parameters
options_file : `str`, required.
ELMo JSON options file
weight_file : `str`, required.
ELMo hdf5 weight file
num_output_representations : `int`, required.
The number of ELMo representation to output with
different linear weighted combination of the 3 layers (i.e.,
character-convnet output, 1st lstm output, 2nd lstm output).
requires_grad : `bool`, optional
If True, compute gradient of ELMo parameters for fine tuning.
do_layer_norm : `bool`, optional, (default = `False`).
Should we apply layer normalization (passed to `ScalarMix`)?
dropout : `float`, optional, (default = `0.5`).
The dropout to be applied to the ELMo representations.
vocab_to_cache : `List[str]`, optional, (default = `None`).
A list of words to pre-compute and cache character convolutions
for. If you use this option, Elmo expects that you pass word
indices of shape (batch_size, timesteps) to forward, instead
of character indices. If you use this option and pass a word which
wasn't pre-cached, this will break.
keep_sentence_boundaries : `bool`, optional, (default = `False`)
If True, the representation of the sentence boundary tokens are
not removed.
scalar_mix_parameters : `List[float]`, optional, (default = `None`)
If not `None`, use these scalar mix parameters to weight the representations
produced by different layers. These mixing weights are not updated during
training. The mixing weights here should be the unnormalized (i.e., pre-softmax)
weights. So, if you wanted to use only the 1st layer of a 2-layer ELMo,
you can set this to [-9e10, 1, -9e10 ].
module : `torch.nn.Module`, optional, (default = `None`).
If provided, then use this module instead of the pre-trained ELMo biLM.
If using this option, then pass `None` for both `options_file`
and `weight_file`. The module must provide a public attribute
`num_layers` with the number of internal layers and its `forward`
method must return a `dict` with `activations` and `mask` keys
(see `_ElmoBilm` for an example). Note that `requires_grad` is also
ignored with this option.
"""
def __init__(
self,
options_file: str,
weight_file: str,
num_output_representations: int,
requires_grad: bool = False,
do_layer_norm: bool = False,
dropout: float = 0.5,
vocab_to_cache: List[str] = None,
keep_sentence_boundaries: bool = False,
scalar_mix_parameters: List[float] = None,
module: torch.nn.Module = None,
) -> None:
super().__init__()
logger.info("Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ConfigurationError("Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm( # type: ignore
options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache,
)
self._has_cached_vocab = vocab_to_cache is not None
self._keep_sentence_boundaries = keep_sentence_boundaries
self._dropout = Dropout(p=dropout)
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(
self._elmo_lstm.num_layers, # type: ignore
do_layer_norm=do_layer_norm,
initial_scalar_parameters=scalar_mix_parameters,
trainable=scalar_mix_parameters is None,
)
self.add_module("scalar_mix_{}".format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
def get_output_dim(self):
return self._elmo_lstm.get_output_dim()
def forward(
self, inputs: torch.Tensor, word_inputs: torch.Tensor = None
) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
# Parameters
inputs : `torch.Tensor`, required.
Shape `(batch_size, timesteps, 50)` of character ids representing the current batch.
word_inputs : `torch.Tensor`, required.
If you passed a cached vocab, you can in addition pass a tensor of shape
`(batch_size, timesteps)`, which represent word ids which have been pre-cached.
# Returns
`Dict[str, Union[torch.Tensor, List[torch.Tensor]]]`
A dict with the following keys:
- `'elmo_representations'` (`List[torch.Tensor]`) :
A `num_output_representations` list of ELMo representations for the input sequence.
Each representation is shape `(batch_size, timesteps, embedding_dim)`
- `'mask'` (`torch.BoolTensor`) :
Shape `(batch_size, timesteps)` long tensor with sequence mask.
"""
# reshape the input if needed
original_shape = inputs.size()
if len(original_shape) > 3:
timesteps, num_characters = original_shape[-2:]
reshaped_inputs = inputs.view(-1, timesteps, num_characters)
else:
reshaped_inputs = inputs
if word_inputs is not None:
original_word_size = word_inputs.size()
if self._has_cached_vocab and len(original_word_size) > 2:
reshaped_word_inputs = word_inputs.view(-1, original_word_size[-1])
elif not self._has_cached_vocab:
logger.warning(
"Word inputs were passed to ELMo but it does not have a cached vocab."
)
reshaped_word_inputs = None
else:
reshaped_word_inputs = word_inputs
else:
reshaped_word_inputs = word_inputs
# run the biLM
bilm_output = self._elmo_lstm(reshaped_inputs, reshaped_word_inputs) # type: ignore
layer_activations = bilm_output["activations"]
mask_with_bos_eos = bilm_output["mask"]
# compute the elmo representations
representations = []
for i in range(len(self._scalar_mixes)):
scalar_mix = getattr(self, "scalar_mix_{}".format(i))
representation_with_bos_eos = scalar_mix(layer_activations, mask_with_bos_eos)
if self._keep_sentence_boundaries:
processed_representation = representation_with_bos_eos
processed_mask = mask_with_bos_eos
else:
representation_without_bos_eos, mask_without_bos_eos = remove_sentence_boundaries(
representation_with_bos_eos, mask_with_bos_eos
)
processed_representation = representation_without_bos_eos
processed_mask = mask_without_bos_eos
representations.append(self._dropout(processed_representation))
# reshape if necessary
if word_inputs is not None and len(original_word_size) > 2:
mask = processed_mask.view(original_word_size)
elmo_representations = [
representation.view(original_word_size + (-1,))
for representation in representations
]
elif len(original_shape) > 3:
mask = processed_mask.view(original_shape[:-1])
elmo_representations = [
representation.view(original_shape[:-1] + (-1,))
for representation in representations
]
else:
mask = processed_mask
elmo_representations = representations
return {"elmo_representations": elmo_representations, "mask": mask}
def batch_to_ids(batch: List[List[str]]) -> torch.Tensor:
"""
Converts a batch of tokenized sentences to a tensor representing the sentences with encoded characters
(len(batch), max sentence length, max word length).
# Parameters
batch : `List[List[str]]`, required
A list of tokenized sentences.
# Returns
A tensor of padded character ids.
"""
instances = []
indexer = ELMoTokenCharactersIndexer()
for sentence in batch:
tokens = [Token(token) for token in sentence]
field = TextField(tokens, {"character_ids": indexer})
instance = Instance({"elmo": field})
instances.append(instance)
dataset = Batch(instances)
vocab = Vocabulary()
dataset.index_instances(vocab)
return dataset.as_tensor_dict()["elmo"]["character_ids"]["elmo_tokens"]
class _ElmoCharacterEncoder(torch.nn.Module):
"""
Compute context insensitive token representation using pretrained biLM.
This embedder has input character ids of size (batch_size, sequence_length, 50)
and returns (batch_size, sequence_length + 2, embedding_dim), where embedding_dim
is specified in the options file (typically 512).
We add special entries at the beginning and end of each sequence corresponding
to <S> and </S>, the beginning and end of sentence tokens.
Note: this is a lower level class useful for advanced usage. Most users should
use `ElmoTokenEmbedder` or `allennlp.modules.Elmo` instead.
# Parameters
options_file : `str`
ELMo JSON options file
weight_file : `str`
ELMo hdf5 weight file
requires_grad : `bool`, optional, (default = `False`).
If True, compute gradient of ELMo parameters for fine tuning.
The relevant section of the options file is something like:
```
{'char_cnn': {
'activation': 'relu',
'embedding': {'dim': 4},
'filters': [[1, 4], [2, 8], [3, 16], [4, 32], [5, 64]],
'max_characters_per_token': 50,
'n_characters': 262,
'n_highway': 2
}
}
```
"""
def __init__(self, options_file: str, weight_file: str, requires_grad: bool = False) -> None:
super().__init__()
with open(cached_path(options_file), "r") as fin:
self._options = json.load(fin)
self._weight_file = weight_file
self.output_dim = self._options["lstm"]["projection_dim"]
self.requires_grad = requires_grad
self._load_weights()
# Cache the arrays for use in forward -- +1 due to masking.
self._beginning_of_sentence_characters = torch.from_numpy(
numpy.array(ELMoCharacterMapper.beginning_of_sentence_characters) + 1
)
self._end_of_sentence_characters = torch.from_numpy(
numpy.array(ELMoCharacterMapper.end_of_sentence_characters) + 1
)
def get_output_dim(self):
return self.output_dim
@overrides
def forward(self, inputs: torch.Tensor) -> Dict[str, torch.Tensor]:
"""
Compute context insensitive token embeddings for ELMo representations.
# Parameters
inputs : `torch.Tensor`
Shape `(batch_size, sequence_length, 50)` of character ids representing the
current batch.
# Returns
Dict with keys:
`'token_embedding'` : `torch.Tensor`
Shape `(batch_size, sequence_length + 2, embedding_dim)` tensor with context
insensitive token representations.
`'mask'`: `torch.BoolTensor`
Shape `(batch_size, sequence_length + 2)` long tensor with sequence mask.
"""
# Add BOS/EOS
mask = (inputs > 0).sum(dim=-1) > 0
character_ids_with_bos_eos, mask_with_bos_eos = add_sentence_boundary_token_ids(
inputs, mask, self._beginning_of_sentence_characters, self._end_of_sentence_characters
)
# the character id embedding
max_chars_per_token = self._options["char_cnn"]["max_characters_per_token"]
# (batch_size * sequence_length, max_chars_per_token, embed_dim)
character_embedding = torch.nn.functional.embedding(
character_ids_with_bos_eos.view(-1, max_chars_per_token), self._char_embedding_weights
)
# run convolutions
cnn_options = self._options["char_cnn"]
if cnn_options["activation"] == "tanh":
activation = torch.tanh
elif cnn_options["activation"] == "relu":
activation = torch.nn.functional.relu
else:
raise ConfigurationError("Unknown activation")
# (batch_size * sequence_length, embed_dim, max_chars_per_token)
character_embedding = torch.transpose(character_embedding, 1, 2)
convs = []
for i in range(len(self._convolutions)):
conv = getattr(self, "char_conv_{}".format(i))
convolved = conv(character_embedding)
# (batch_size * sequence_length, n_filters for this width)
convolved, _ = torch.max(convolved, dim=-1)
convolved = activation(convolved)
convs.append(convolved)
# (batch_size * sequence_length, n_filters)
token_embedding = torch.cat(convs, dim=-1)
# apply the highway layers (batch_size * sequence_length, n_filters)
token_embedding = self._highways(token_embedding)
# final projection (batch_size * sequence_length, embedding_dim)
token_embedding = self._projection(token_embedding)
# reshape to (batch_size, sequence_length, embedding_dim)
batch_size, sequence_length, _ = character_ids_with_bos_eos.size()
return {
"mask": mask_with_bos_eos,
"token_embedding": token_embedding.view(batch_size, sequence_length, -1),
}
def _load_weights(self):
self._load_char_embedding()
self._load_cnn_weights()
self._load_highway()
self._load_projection()
def _load_char_embedding(self):
with h5py.File(cached_path(self._weight_file), "r") as fin:
char_embed_weights = fin["char_embed"][...]
weights = numpy.zeros(
(char_embed_weights.shape[0] + 1, char_embed_weights.shape[1]), dtype="float32"
)
weights[1:, :] = char_embed_weights
self._char_embedding_weights = torch.nn.Parameter(
torch.FloatTensor(weights), requires_grad=self.requires_grad
)
def _load_cnn_weights(self):
cnn_options = self._options["char_cnn"]
filters = cnn_options["filters"]
char_embed_dim = cnn_options["embedding"]["dim"]
convolutions = []
for i, (width, num) in enumerate(filters):
conv = torch.nn.Conv1d(
in_channels=char_embed_dim, out_channels=num, kernel_size=width, bias=True
)
# load the weights
with h5py.File(cached_path(self._weight_file), "r") as fin:
weight = fin["CNN"]["W_cnn_{}".format(i)][...]
bias = fin["CNN"]["b_cnn_{}".format(i)][...]
w_reshaped = numpy.transpose(weight.squeeze(axis=0), axes=(2, 1, 0))
if w_reshaped.shape != tuple(conv.weight.data.shape):
raise ValueError("Invalid weight file")
conv.weight.data.copy_(torch.FloatTensor(w_reshaped))
conv.bias.data.copy_(torch.FloatTensor(bias))
conv.weight.requires_grad = self.requires_grad
conv.bias.requires_grad = self.requires_grad
convolutions.append(conv)
self.add_module("char_conv_{}".format(i), conv)
self._convolutions = convolutions
def _load_highway(self):
# the highway layers have same dimensionality as the number of cnn filters
cnn_options = self._options["char_cnn"]
filters = cnn_options["filters"]
n_filters = sum(f[1] for f in filters)
n_highway = cnn_options["n_highway"]
# create the layers, and load the weights
self._highways = Highway(n_filters, n_highway, activation=torch.nn.functional.relu)
for k in range(n_highway):
# The AllenNLP highway is one matrix multplication with concatenation of
# transform and carry weights.
with h5py.File(cached_path(self._weight_file), "r") as fin:
# The weights are transposed due to multiplication order assumptions in tf
# vs pytorch (tf.matmul(X, W) vs pytorch.matmul(W, X))
w_transform = numpy.transpose(fin["CNN_high_{}".format(k)]["W_transform"][...])
# -1.0 since AllenNLP is g * x + (1 - g) * f(x) but tf is (1 - g) * x + g * f(x)
w_carry = -1.0 * numpy.transpose(fin["CNN_high_{}".format(k)]["W_carry"][...])
weight = numpy.concatenate([w_transform, w_carry], axis=0)
self._highways._layers[k].weight.data.copy_(torch.FloatTensor(weight))
self._highways._layers[k].weight.requires_grad = self.requires_grad
b_transform = fin["CNN_high_{}".format(k)]["b_transform"][...]
b_carry = -1.0 * fin["CNN_high_{}".format(k)]["b_carry"][...]
bias = numpy.concatenate([b_transform, b_carry], axis=0)
self._highways._layers[k].bias.data.copy_(torch.FloatTensor(bias))
self._highways._layers[k].bias.requires_grad = self.requires_grad
def _load_projection(self):
cnn_options = self._options["char_cnn"]
filters = cnn_options["filters"]
n_filters = sum(f[1] for f in filters)
self._projection = torch.nn.Linear(n_filters, self.output_dim, bias=True)
with h5py.File(cached_path(self._weight_file), "r") as fin:
weight = fin["CNN_proj"]["W_proj"][...]
bias = fin["CNN_proj"]["b_proj"][...]
self._projection.weight.data.copy_(torch.FloatTensor(numpy.transpose(weight)))
self._projection.bias.data.copy_(torch.FloatTensor(bias))
self._projection.weight.requires_grad = self.requires_grad
self._projection.bias.requires_grad = self.requires_grad
class _ElmoBiLm(torch.nn.Module):
"""
Run a pre-trained bidirectional language model, outputting the activations at each
layer for weighting together into an ELMo representation (with
`allennlp.modules.seq2seq_encoders.Elmo`). This is a lower level class, useful
for advanced uses, but most users should use `allennlp.modules.Elmo` directly.
# Parameters
options_file : `str`
ELMo JSON options file
weight_file : `str`
ELMo hdf5 weight file
requires_grad : `bool`, optional, (default = `False`).
If True, compute gradient of ELMo parameters for fine tuning.
vocab_to_cache : `List[str]`, optional, (default = `None`).
A list of words to pre-compute and cache character convolutions
for. If you use this option, _ElmoBiLm expects that you pass word
indices of shape (batch_size, timesteps) to forward, instead
of character indices. If you use this option and pass a word which
wasn't pre-cached, this will break.
"""
def __init__(
self,
options_file: str,
weight_file: str,
requires_grad: bool = False,
vocab_to_cache: List[str] = None,
) -> None:
super().__init__()
self._token_embedder = _ElmoCharacterEncoder(
options_file, weight_file, requires_grad=requires_grad
)
self._requires_grad = requires_grad
if requires_grad and vocab_to_cache:
logging.warning(
"You are fine tuning ELMo and caching char CNN word vectors. "
"This behaviour is not guaranteed to be well defined, particularly. "
"if not all of your inputs will occur in the vocabulary cache."
)
# This is an embedding, used to look up cached
# word vectors built from character level cnn embeddings.
self._word_embedding = None
self._bos_embedding: torch.Tensor = None
self._eos_embedding: torch.Tensor = None
if vocab_to_cache:
logging.info("Caching character cnn layers for words in vocabulary.")
# This sets 3 attributes, _word_embedding, _bos_embedding and _eos_embedding.
# They are set in the method so they can be accessed from outside the
# constructor.
self.create_cached_cnn_embeddings(vocab_to_cache)
with open(cached_path(options_file), "r") as fin:
options = json.load(fin)
if not options["lstm"].get("use_skip_connections"):
raise ConfigurationError("We only support pretrained biLMs with residual connections")
self._elmo_lstm = ElmoLstm(
input_size=options["lstm"]["projection_dim"],
hidden_size=options["lstm"]["projection_dim"],
cell_size=options["lstm"]["dim"],
num_layers=options["lstm"]["n_layers"],
memory_cell_clip_value=options["lstm"]["cell_clip"],
state_projection_clip_value=options["lstm"]["proj_clip"],
requires_grad=requires_grad,
)
self._elmo_lstm.load_weights(weight_file)
# Number of representation layers including context independent layer
self.num_layers = options["lstm"]["n_layers"] + 1
def get_output_dim(self):
return 2 * self._token_embedder.get_output_dim()
def forward(
self, inputs: torch.Tensor, word_inputs: torch.Tensor = None
) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
# Parameters
inputs : `torch.Tensor`, required.
Shape `(batch_size, timesteps, 50)` of character ids representing the current batch.
word_inputs : `torch.Tensor`, required.
If you passed a cached vocab, you can in addition pass a tensor of shape `(batch_size, timesteps)`,
which represent word ids which have been pre-cached.
# Returns
Dict with keys:
`'activations'` : `List[torch.Tensor]`
A list of activations at each layer of the network, each of shape
`(batch_size, timesteps + 2, embedding_dim)`
`'mask'`: `torch.BoolTensor`
Shape `(batch_size, timesteps + 2)` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers.
"""
if self._word_embedding is not None and word_inputs is not None:
try:
mask_without_bos_eos = word_inputs > 0
# The character cnn part is cached - just look it up.
embedded_inputs = self._word_embedding(word_inputs) # type: ignore
# shape (batch_size, timesteps + 2, embedding_dim)
type_representation, mask = add_sentence_boundary_token_ids(
embedded_inputs, mask_without_bos_eos, self._bos_embedding, self._eos_embedding
)
except (RuntimeError, IndexError):
# Back off to running the character convolutions,
# as we might not have the words in the cache.
token_embedding = self._token_embedder(inputs)
mask = token_embedding["mask"]
type_representation = token_embedding["token_embedding"]
else:
token_embedding = self._token_embedder(inputs)
mask = token_embedding["mask"]
type_representation = token_embedding["token_embedding"]
lstm_outputs = self._elmo_lstm(type_representation, mask)
# Prepare the output. The first layer is duplicated.
# Because of minor differences in how masking is applied depending
# on whether the char cnn layers are cached, we'll be defensive and
# multiply by the mask here. It's not strictly necessary, as the
# mask passed on is correct, but the values in the padded areas
# of the char cnn representations can change.
output_tensors = [
torch.cat([type_representation, type_representation], dim=-1) * mask.unsqueeze(-1)
]
for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0):
output_tensors.append(layer_activations.squeeze(0))
return {"activations": output_tensors, "mask": mask}
def create_cached_cnn_embeddings(self, tokens: List[str]) -> None:
"""
Given a list of tokens, this method precomputes word representations
by running just the character convolutions and highway layers of elmo,
essentially creating uncontextual word vectors. On subsequent forward passes,
the word ids are looked up from an embedding, rather than being computed on
the fly via the CNN encoder.
This function sets 3 attributes:
_word_embedding : `torch.Tensor`
The word embedding for each word in the tokens passed to this method.
_bos_embedding : `torch.Tensor`
The embedding for the BOS token.
_eos_embedding : `torch.Tensor`
The embedding for the EOS token.
# Parameters
tokens : `List[str]`, required.
A list of tokens to precompute character convolutions for.
"""
tokens = [ELMoCharacterMapper.bos_token, ELMoCharacterMapper.eos_token] + tokens
timesteps = 32
batch_size = 32
chunked_tokens = lazy_groups_of(iter(tokens), timesteps)
all_embeddings = []
device = get_device_of(next(self.parameters()))
for batch in lazy_groups_of(chunked_tokens, batch_size):
# Shape (batch_size, timesteps, 50)
batched_tensor = batch_to_ids(batch)
# NOTE: This device check is for when a user calls this method having
# already placed the model on a device. If this is called in the
# constructor, it will probably happen on the CPU. This isn't too bad,
# because it's only a few convolutions and will likely be very fast.
if device >= 0:
batched_tensor = batched_tensor.cuda(device)
output = self._token_embedder(batched_tensor)
token_embedding = output["token_embedding"]
mask = output["mask"]
token_embedding, _ = remove_sentence_boundaries(token_embedding, mask)
all_embeddings.append(token_embedding.view(-1, token_embedding.size(-1)))
full_embedding = torch.cat(all_embeddings, 0)
# We might have some trailing embeddings from padding in the batch, so
# we clip the embedding and lookup to the right size.
full_embedding = full_embedding[: len(tokens), :]
embedding = full_embedding[2 : len(tokens), :]
vocab_size, embedding_dim = list(embedding.size())
from allennlp.modules.token_embedders import Embedding # type: ignore
self._bos_embedding = full_embedding[0, :]
self._eos_embedding = full_embedding[1, :]
self._word_embedding = Embedding( # type: ignore
num_embeddings=vocab_size,
embedding_dim=embedding_dim,
weight=embedding.data,
trainable=self._requires_grad,
padding_index=0,
)
| allennlp-master | allennlp/modules/elmo.py |
"""
A wrapper that unrolls the second (time) dimension of a tensor
into the first (batch) dimension, applies some other `Module`,
and then rolls the time dimension back up.
"""
from typing import List
from overrides import overrides
import torch
class TimeDistributed(torch.nn.Module):
"""
Given an input shaped like `(batch_size, time_steps, [rest])` and a `Module` that takes
inputs like `(batch_size, [rest])`, `TimeDistributed` reshapes the input to be
`(batch_size * time_steps, [rest])`, applies the contained `Module`, then reshapes it back.
Note that while the above gives shapes with `batch_size` first, this `Module` also works if
`batch_size` is second - we always just combine the first two dimensions, then split them.
It also reshapes keyword arguments unless they are not tensors or their name is specified in
the optional `pass_through` iterable.
"""
def __init__(self, module):
super().__init__()
self._module = module
@overrides
def forward(self, *inputs, pass_through: List[str] = None, **kwargs):
pass_through = pass_through or []
reshaped_inputs = [self._reshape_tensor(input_tensor) for input_tensor in inputs]
# Need some input to then get the batch_size and time_steps.
some_input = None
if inputs:
some_input = inputs[-1]
reshaped_kwargs = {}
for key, value in kwargs.items():
if isinstance(value, torch.Tensor) and key not in pass_through:
if some_input is None:
some_input = value
value = self._reshape_tensor(value)
reshaped_kwargs[key] = value
reshaped_outputs = self._module(*reshaped_inputs, **reshaped_kwargs)
if some_input is None:
raise RuntimeError("No input tensor to time-distribute")
# Now get the output back into the right shape.
# (batch_size, time_steps, **output_size)
new_size = some_input.size()[:2] + reshaped_outputs.size()[1:]
outputs = reshaped_outputs.contiguous().view(new_size)
return outputs
@staticmethod
def _reshape_tensor(input_tensor):
input_size = input_tensor.size()
if len(input_size) <= 2:
raise RuntimeError(f"No dimension to distribute: {input_size}")
# Squash batch_size and time_steps into a single axis; result has shape
# (batch_size * time_steps, **input_size).
squashed_shape = [-1] + list(input_size[2:])
return input_tensor.contiguous().view(*squashed_shape)
| allennlp-master | allennlp/modules/time_distributed.py |
import torch
import numpy as np
class SoftmaxLoss(torch.nn.Module):
"""
Given some embeddings and some targets, applies a linear layer
to create logits over possible words and then returns the
negative log likelihood.
"""
def __init__(self, num_words: int, embedding_dim: int) -> None:
super().__init__()
# TODO(joelgrus): implement tie_embeddings (maybe)
self.tie_embeddings = False
self.softmax_w = torch.nn.Parameter(
torch.randn(embedding_dim, num_words) / np.sqrt(embedding_dim)
)
self.softmax_b = torch.nn.Parameter(torch.zeros(num_words))
def forward(self, embeddings: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
# embeddings is size (n, embedding_dim)
# targets is (batch_size, ) with the correct class id
# Does not do any count normalization / divide by batch size
probs = torch.nn.functional.log_softmax(
torch.matmul(embeddings, self.softmax_w) + self.softmax_b, dim=-1
)
return torch.nn.functional.nll_loss(probs, targets.long(), reduction="sum")
| allennlp-master | allennlp/modules/softmax_loss.py |
"""
A feed-forward neural network.
"""
from typing import List, Union
import torch
from allennlp.common import FromParams
from allennlp.common.checks import ConfigurationError
from allennlp.nn import Activation
class FeedForward(torch.nn.Module, FromParams):
"""
This `Module` is a feed-forward neural network, just a sequence of `Linear` layers with
activation functions in between.
# Parameters
input_dim : `int`, required
The dimensionality of the input. We assume the input has shape `(batch_size, input_dim)`.
num_layers : `int`, required
The number of `Linear` layers to apply to the input.
hidden_dims : `Union[int, List[int]]`, required
The output dimension of each of the `Linear` layers. If this is a single `int`, we use
it for all `Linear` layers. If it is a `List[int]`, `len(hidden_dims)` must be
`num_layers`.
activations : `Union[Activation, List[Activation]]`, required
The activation function to use after each `Linear` layer. If this is a single function,
we use it after all `Linear` layers. If it is a `List[Activation]`,
`len(activations)` must be `num_layers`. Activation must have torch.nn.Module type.
dropout : `Union[float, List[float]]`, optional (default = `0.0`)
If given, we will apply this amount of dropout after each layer. Semantics of `float`
versus `List[float]` is the same as with other parameters.
# Examples
```python
FeedForward(124, 2, [64, 32], torch.nn.ReLU(), 0.2)
#> FeedForward(
#> (_activations): ModuleList(
#> (0): ReLU()
#> (1): ReLU()
#> )
#> (_linear_layers): ModuleList(
#> (0): Linear(in_features=124, out_features=64, bias=True)
#> (1): Linear(in_features=64, out_features=32, bias=True)
#> )
#> (_dropout): ModuleList(
#> (0): Dropout(p=0.2, inplace=False)
#> (1): Dropout(p=0.2, inplace=False)
#> )
#> )
```
"""
def __init__(
self,
input_dim: int,
num_layers: int,
hidden_dims: Union[int, List[int]],
activations: Union[Activation, List[Activation]],
dropout: Union[float, List[float]] = 0.0,
) -> None:
super().__init__()
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims] * num_layers # type: ignore
if not isinstance(activations, list):
activations = [activations] * num_layers # type: ignore
if not isinstance(dropout, list):
dropout = [dropout] * num_layers # type: ignore
if len(hidden_dims) != num_layers:
raise ConfigurationError(
"len(hidden_dims) (%d) != num_layers (%d)" % (len(hidden_dims), num_layers)
)
if len(activations) != num_layers:
raise ConfigurationError(
"len(activations) (%d) != num_layers (%d)" % (len(activations), num_layers)
)
if len(dropout) != num_layers:
raise ConfigurationError(
"len(dropout) (%d) != num_layers (%d)" % (len(dropout), num_layers)
)
self._activations = torch.nn.ModuleList(activations)
input_dims = [input_dim] + hidden_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):
linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim))
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dim = hidden_dims[-1]
self.input_dim = input_dim
def get_output_dim(self):
return self._output_dim
def get_input_dim(self):
return self.input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
output = inputs
for layer, activation, dropout in zip(
self._linear_layers, self._activations, self._dropout
):
output = dropout(activation(layer(output)))
return output
| allennlp-master | allennlp/modules/feedforward.py |
"""
Custom PyTorch
`Module <https://pytorch.org/docs/master/nn.html#torch.nn.Module>`_ s
that are used as components in AllenNLP `Model` s.
"""
from allennlp.modules.attention import Attention
from allennlp.modules.bimpm_matching import BiMpmMatching
from allennlp.modules.conditional_random_field import ConditionalRandomField
from allennlp.modules.elmo import Elmo
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.gated_sum import GatedSum
from allennlp.modules.highway import Highway
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.layer_norm import LayerNorm
from allennlp.modules.matrix_attention import MatrixAttention
from allennlp.modules.maxout import Maxout
from allennlp.modules.residual_with_layer_dropout import ResidualWithLayerDropout
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.modules.text_field_embedders import TextFieldEmbedder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders import TokenEmbedder, Embedding
from allennlp.modules.softmax_loss import SoftmaxLoss
| allennlp-master | allennlp/modules/__init__.py |
# https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/python/ops/nn_impl.py#L885
from typing import Set, Tuple
import numpy as np
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.nn import util
def _choice(num_words: int, num_samples: int) -> Tuple[np.ndarray, int]:
"""
Chooses `num_samples` samples without replacement from [0, ..., num_words).
Returns a tuple (samples, num_tries).
"""
num_tries = 0
num_chosen = 0
def get_buffer() -> np.ndarray:
log_samples = np.random.rand(num_samples) * np.log(num_words + 1)
samples = np.exp(log_samples).astype("int64") - 1
return np.clip(samples, a_min=0, a_max=num_words - 1)
sample_buffer = get_buffer()
buffer_index = 0
samples: Set[int] = set()
while num_chosen < num_samples:
num_tries += 1
# choose sample
sample_id = sample_buffer[buffer_index]
if sample_id not in samples:
samples.add(sample_id)
num_chosen += 1
buffer_index += 1
if buffer_index == num_samples:
# Reset the buffer
sample_buffer = get_buffer()
buffer_index = 0
return np.array(list(samples)), num_tries
class SampledSoftmaxLoss(torch.nn.Module):
"""
Based on the default log_uniform_candidate_sampler in tensorflow.
!!! NOTE
num_words DOES NOT include padding id.
!!! NOTE
In all cases except (tie_embeddings=True and use_character_inputs=False)
the weights are dimensioned as num_words and do not include an entry for the padding (0) id.
For the (tie_embeddings=True and use_character_inputs=False) case,
then the embeddings DO include the extra 0 padding, to be consistent with the word embedding layer.
# Parameters
num_words, `int`, required
The number of words in the vocabulary
embedding_dim, `int`, required
The dimension to softmax over
num_samples, `int`, required
During training take this many samples. Must be less than num_words.
sparse, `bool`, optional (default = `False`)
If this is true, we use a sparse embedding matrix.
unk_id, `int`, optional (default = `None`)
If provided, the id that represents unknown characters.
use_character_inputs, `bool`, optional (default = `True`)
Whether to use character inputs
use_fast_sampler, `bool`, optional (default = `False`)
Whether to use the fast cython sampler.
"""
def __init__(
self,
num_words: int,
embedding_dim: int,
num_samples: int,
sparse: bool = False,
unk_id: int = None,
use_character_inputs: bool = True,
use_fast_sampler: bool = False,
) -> None:
super().__init__()
# TODO(joelgrus): implement tie_embeddings (maybe)
self.tie_embeddings = False
assert num_samples < num_words
if use_fast_sampler:
raise ConfigurationError("fast sampler is not implemented")
else:
self.choice_func = _choice
# Glorit init (std=(1.0 / sqrt(fan_in))
if sparse:
# create our own sparse embedding
self.softmax_w = torch.nn.Embedding(
num_embeddings=num_words, embedding_dim=embedding_dim, sparse=True
)
self.softmax_w.weight.data.normal_(mean=0.0, std=1.0 / np.sqrt(embedding_dim))
self.softmax_b = torch.nn.Embedding(
num_embeddings=num_words, embedding_dim=1, sparse=True
)
self.softmax_b.weight.data.fill_(0.0)
else:
# just create tensors to use as the embeddings
# Glorit init (std=(1.0 / sqrt(fan_in))
self.softmax_w = torch.nn.Parameter(
torch.randn(num_words, embedding_dim) / np.sqrt(embedding_dim)
)
self.softmax_b = torch.nn.Parameter(torch.zeros(num_words))
self.sparse = sparse
self.use_character_inputs = use_character_inputs
if use_character_inputs:
self._unk_id = unk_id
self._num_samples = num_samples
self._embedding_dim = embedding_dim
self._num_words = num_words
self.initialize_num_words()
def initialize_num_words(self):
if self.sparse:
num_words = self.softmax_w.weight.size(0)
else:
num_words = self.softmax_w.size(0)
self._num_words = num_words
self._log_num_words_p1 = np.log(num_words + 1)
# compute the probability of each sampled id
self._probs = (
np.log(np.arange(num_words) + 2) - np.log(np.arange(num_words) + 1)
) / self._log_num_words_p1
def forward(
self,
embeddings: torch.Tensor,
targets: torch.Tensor,
target_token_embedding: torch.Tensor = None,
) -> torch.Tensor:
# embeddings is size (n, embedding_dim)
# targets is (n_words, ) with the index of the actual target
# when tieing weights, target_token_embedding is required.
# it is size (n_words, embedding_dim)
# returns log likelihood loss (batch_size, )
# Does not do any count normalization / divide by batch size
if embeddings.shape[0] == 0:
# empty batch
return torch.tensor(0.0, device=embeddings.device)
if not self.training:
return self._forward_eval(embeddings, targets)
else:
return self._forward_train(embeddings, targets, target_token_embedding)
def _forward_train(
self, embeddings: torch.Tensor, targets: torch.Tensor, target_token_embedding: torch.Tensor
) -> torch.Tensor:
# (target_token_embedding is only used in the tie_embeddings case,
# which is not implemented)
# want to compute (n, n_samples + 1) array with the log
# probabilities where the first index is the true target
# and the remaining ones are the the negative samples.
# then we can just select the first column
# NOTE: targets input has padding removed (so 0 == the first id, NOT the padding id)
(
sampled_ids,
target_expected_count,
sampled_expected_count,
) = self.log_uniform_candidate_sampler(targets, choice_func=self.choice_func)
long_targets = targets.long()
long_targets.requires_grad_(False)
# Get the softmax weights (so we can compute logits)
# shape (batch_size * max_sequence_length + num_samples)
all_ids = torch.cat([long_targets, sampled_ids], dim=0)
if self.sparse:
all_ids_1 = all_ids.unsqueeze(1)
all_w = self.softmax_w(all_ids_1).squeeze(1)
all_b = self.softmax_b(all_ids_1).squeeze(2).squeeze(1)
else:
all_w = torch.nn.functional.embedding(all_ids, self.softmax_w)
# the unsqueeze / squeeze works around an issue with 1 dim
# embeddings
all_b = torch.nn.functional.embedding(all_ids, self.softmax_b.unsqueeze(1)).squeeze(1)
batch_size = long_targets.size(0)
true_w = all_w[:batch_size, :]
sampled_w = all_w[batch_size:, :]
true_b = all_b[:batch_size]
sampled_b = all_b[batch_size:]
# compute the logits and remove log expected counts
# [batch_size, ]
true_logits = (
(true_w * embeddings).sum(dim=1)
+ true_b
- torch.log(
target_expected_count + util.tiny_value_of_dtype(target_expected_count.dtype)
)
)
# [batch_size, n_samples]
sampled_logits = (
torch.matmul(embeddings, sampled_w.t())
+ sampled_b
- torch.log(
sampled_expected_count + util.tiny_value_of_dtype(sampled_expected_count.dtype)
)
)
# remove true labels -- we will take
# softmax, so set the sampled logits of true values to a large
# negative number
# [batch_size, n_samples]
true_in_sample_mask = sampled_ids == long_targets.unsqueeze(1)
masked_sampled_logits = sampled_logits.masked_fill(true_in_sample_mask, -10000.0)
# now concat the true logits as index 0
# [batch_size, n_samples + 1]
logits = torch.cat([true_logits.unsqueeze(1), masked_sampled_logits], dim=1)
# finally take log_softmax
log_softmax = torch.nn.functional.log_softmax(logits, dim=1)
# true log likelihood is index 0, loss = -1.0 * sum over batch
# the likelihood loss can become very large if the corresponding
# true logit is very small, so we apply a per-target cap here
# so that a single logit for a very rare word won't dominate the batch.
nll_loss = -1.0 * log_softmax[:, 0].sum()
return nll_loss
def _forward_eval(self, embeddings: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
# evaluation mode, use full softmax
if self.sparse:
w = self.softmax_w.weight
b = self.softmax_b.weight.squeeze(1)
else:
w = self.softmax_w
b = self.softmax_b
log_softmax = torch.nn.functional.log_softmax(torch.matmul(embeddings, w.t()) + b, dim=-1)
if self.tie_embeddings and not self.use_character_inputs:
targets_ = targets + 1
else:
targets_ = targets
return torch.nn.functional.nll_loss(log_softmax, targets_.long(), reduction="sum")
def log_uniform_candidate_sampler(self, targets, choice_func=_choice):
# returns sampled, true_expected_count, sampled_expected_count
# targets = (batch_size, )
#
# samples = (n_samples, )
# true_expected_count = (batch_size, )
# sampled_expected_count = (n_samples, )
# see: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/range_sampler.h
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/range_sampler.cc
# algorithm: keep track of number of tries when doing sampling,
# then expected count is
# -expm1(num_tries * log1p(-p))
# = (1 - (1-p)^num_tries) where p is self._probs[id]
np_sampled_ids, num_tries = choice_func(self._num_words, self._num_samples)
sampled_ids = torch.from_numpy(np_sampled_ids).to(targets.device)
# Compute expected count = (1 - (1-p)^num_tries) = -expm1(num_tries * log1p(-p))
# P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
target_probs = (
torch.log((targets.float() + 2.0) / (targets.float() + 1.0)) / self._log_num_words_p1
)
target_expected_count = -1.0 * (torch.exp(num_tries * torch.log1p(-target_probs)) - 1.0)
sampled_probs = (
torch.log((sampled_ids.float() + 2.0) / (sampled_ids.float() + 1.0))
/ self._log_num_words_p1
)
sampled_expected_count = -1.0 * (torch.exp(num_tries * torch.log1p(-sampled_probs)) - 1.0)
sampled_ids.requires_grad_(False)
target_expected_count.requires_grad_(False)
sampled_expected_count.requires_grad_(False)
return sampled_ids, target_expected_count, sampled_expected_count
| allennlp-master | allennlp/modules/sampled_softmax_loss.py |
import torch
class ResidualWithLayerDropout(torch.nn.Module):
"""
A residual connection with the layer dropout technique [Deep Networks with Stochastic
Depth](https://arxiv.org/pdf/1603.09382.pdf).
This module accepts the input and output of a layer, decides whether this layer should
be stochastically dropped, returns either the input or output + input. During testing,
it will re-calibrate the outputs of this layer by the expected number of times it
participates in training.
"""
def __init__(self, undecayed_dropout_prob: float = 0.5) -> None:
super().__init__()
if undecayed_dropout_prob < 0 or undecayed_dropout_prob > 1:
raise ValueError(
f"undecayed dropout probability has to be between 0 and 1, "
f"but got {undecayed_dropout_prob}"
)
self.undecayed_dropout_prob = undecayed_dropout_prob
def forward(
self, # type: ignore
layer_input: torch.Tensor,
layer_output: torch.Tensor,
layer_index: int = None,
total_layers: int = None,
) -> torch.Tensor:
"""
Apply dropout to this layer, for this whole mini-batch.
dropout_prob = layer_index / total_layers * undecayed_dropout_prob if layer_idx and
total_layers is specified, else it will use the undecayed_dropout_prob directly.
# Parameters
layer_input `torch.FloatTensor` required
The input tensor of this layer.
layer_output `torch.FloatTensor` required
The output tensor of this layer, with the same shape as the layer_input.
layer_index `int`
The layer index, starting from 1. This is used to calcuate the dropout prob
together with the `total_layers` parameter.
total_layers `int`
The total number of layers.
# Returns
output : `torch.FloatTensor`
A tensor with the same shape as `layer_input` and `layer_output`.
"""
if layer_index is not None and total_layers is not None:
dropout_prob = 1.0 * self.undecayed_dropout_prob * layer_index / total_layers
else:
dropout_prob = 1.0 * self.undecayed_dropout_prob
if self.training:
if torch.rand(1) < dropout_prob:
return layer_input
else:
return layer_output + layer_input
else:
return (1 - dropout_prob) * layer_output + layer_input
| allennlp-master | allennlp/modules/residual_with_layer_dropout.py |
"""
Multi-perspective matching layer
"""
from typing import Tuple, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from allennlp.common.checks import ConfigurationError
from allennlp.common.registrable import FromParams
from allennlp.nn.util import (
get_lengths_from_binary_sequence_mask,
masked_max,
masked_mean,
masked_softmax,
tiny_value_of_dtype,
)
def multi_perspective_match(
vector1: torch.Tensor, vector2: torch.Tensor, weight: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate multi-perspective cosine matching between time-steps of vectors
of the same length.
# Parameters
vector1 : `torch.Tensor`
A tensor of shape `(batch, seq_len, hidden_size)`
vector2 : `torch.Tensor`
A tensor of shape `(batch, seq_len or 1, hidden_size)`
weight : `torch.Tensor`
A tensor of shape `(num_perspectives, hidden_size)`
# Returns
`torch.Tensor` :
Shape `(batch, seq_len, 1)`.
`torch.Tensor` :
Shape `(batch, seq_len, num_perspectives)`.
"""
assert vector1.size(0) == vector2.size(0)
assert weight.size(1) == vector1.size(2) == vector1.size(2)
# (batch, seq_len, 1)
similarity_single = F.cosine_similarity(vector1, vector2, 2).unsqueeze(2)
# (1, 1, num_perspectives, hidden_size)
weight = weight.unsqueeze(0).unsqueeze(0)
# (batch, seq_len, num_perspectives, hidden_size)
vector1 = weight * vector1.unsqueeze(2)
vector2 = weight * vector2.unsqueeze(2)
similarity_multi = F.cosine_similarity(vector1, vector2, dim=3)
return similarity_single, similarity_multi
def multi_perspective_match_pairwise(
vector1: torch.Tensor, vector2: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor:
"""
Calculate multi-perspective cosine matching between each time step of
one vector and each time step of another vector.
# Parameters
vector1 : `torch.Tensor`
A tensor of shape `(batch, seq_len1, hidden_size)`
vector2 : `torch.Tensor`
A tensor of shape `(batch, seq_len2, hidden_size)`
weight : `torch.Tensor`
A tensor of shape `(num_perspectives, hidden_size)`
# Returns
`torch.Tensor` :
A tensor of shape `(batch, seq_len1, seq_len2, num_perspectives)` consisting
multi-perspective matching results
"""
num_perspectives = weight.size(0)
# (1, num_perspectives, 1, hidden_size)
weight = weight.unsqueeze(0).unsqueeze(2)
# (batch, num_perspectives, seq_len*, hidden_size)
vector1 = weight * vector1.unsqueeze(1).expand(-1, num_perspectives, -1, -1)
vector2 = weight * vector2.unsqueeze(1).expand(-1, num_perspectives, -1, -1)
# (batch, num_perspectives, seq_len*, 1)
vector1_norm = vector1.norm(p=2, dim=3, keepdim=True)
vector2_norm = vector2.norm(p=2, dim=3, keepdim=True)
# (batch, num_perspectives, seq_len1, seq_len2)
mul_result = torch.matmul(vector1, vector2.transpose(2, 3))
norm_value = vector1_norm * vector2_norm.transpose(2, 3)
# (batch, seq_len1, seq_len2, num_perspectives)
return (mul_result / norm_value.clamp(min=tiny_value_of_dtype(norm_value.dtype))).permute(
0, 2, 3, 1
)
class BiMpmMatching(nn.Module, FromParams):
"""
This `Module` implements the matching layer of BiMPM model described in [Bilateral
Multi-Perspective Matching for Natural Language Sentences](https://arxiv.org/abs/1702.03814)
by Zhiguo Wang et al., 2017.
Also please refer to the [TensorFlow implementation](https://github.com/zhiguowang/BiMPM/) and
[PyTorch implementation](https://github.com/galsang/BIMPM-pytorch).
# Parameters
hidden_dim : `int`, optional (default = `100`)
The hidden dimension of the representations
num_perspectives : `int`, optional (default = `20`)
The number of perspectives for matching
share_weights_between_directions : `bool`, optional (default = `True`)
If True, share weight between matching from sentence1 to sentence2 and from sentence2
to sentence1, useful for non-symmetric tasks
is_forward : `bool`, optional (default = `None`)
Whether the matching is for forward sequence or backward sequence, useful in finding last
token in full matching. It can not be None if with_full_match is True.
with_full_match : `bool`, optional (default = `True`)
If True, include full match
with_maxpool_match : `bool`, optional (default = `True`)
If True, include max pool match
with_attentive_match : `bool`, optional (default = `True`)
If True, include attentive match
with_max_attentive_match : `bool`, optional (default = `True`)
If True, include max attentive match
"""
def __init__(
self,
hidden_dim: int = 100,
num_perspectives: int = 20,
share_weights_between_directions: bool = True,
is_forward: bool = None,
with_full_match: bool = True,
with_maxpool_match: bool = True,
with_attentive_match: bool = True,
with_max_attentive_match: bool = True,
) -> None:
super().__init__()
self.hidden_dim = hidden_dim
self.num_perspectives = num_perspectives
self.is_forward = is_forward
self.with_full_match = with_full_match
self.with_maxpool_match = with_maxpool_match
self.with_attentive_match = with_attentive_match
self.with_max_attentive_match = with_max_attentive_match
if not (
with_full_match
or with_maxpool_match
or with_attentive_match
or with_max_attentive_match
):
raise ConfigurationError("At least one of the matching method should be enabled")
def create_parameter(): # utility function to create and initialize a parameter
param = nn.Parameter(torch.zeros(num_perspectives, hidden_dim))
torch.nn.init.kaiming_normal_(param)
return param
def share_or_create(weights_to_share): # utility function to create or share the weights
return weights_to_share if share_weights_between_directions else create_parameter()
output_dim = (
2 # used to calculate total output dimension, 2 is for cosine max and cosine min
)
if with_full_match:
if is_forward is None:
raise ConfigurationError("Must specify is_forward to enable full matching")
self.full_match_weights = create_parameter()
self.full_match_weights_reversed = share_or_create(self.full_match_weights)
output_dim += num_perspectives + 1
if with_maxpool_match:
self.maxpool_match_weights = create_parameter()
output_dim += num_perspectives * 2
if with_attentive_match:
self.attentive_match_weights = create_parameter()
self.attentive_match_weights_reversed = share_or_create(self.attentive_match_weights)
output_dim += num_perspectives + 1
if with_max_attentive_match:
self.max_attentive_match_weights = create_parameter()
self.max_attentive_match_weights_reversed = share_or_create(
self.max_attentive_match_weights
)
output_dim += num_perspectives + 1
self.output_dim = output_dim
def get_output_dim(self) -> int:
return self.output_dim
def forward(
self,
context_1: torch.Tensor,
mask_1: torch.BoolTensor,
context_2: torch.Tensor,
mask_2: torch.BoolTensor,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Given the forward (or backward) representations of sentence1 and sentence2, apply four bilateral
matching functions between them in one direction.
# Parameters
context_1 : `torch.Tensor`
Tensor of shape (batch_size, seq_len1, hidden_dim) representing the encoding of the first sentence.
mask_1 : `torch.BoolTensor`
Boolean Tensor of shape (batch_size, seq_len1), indicating which
positions in the first sentence are padding (0) and which are not (1).
context_2 : `torch.Tensor`
Tensor of shape (batch_size, seq_len2, hidden_dim) representing the encoding of the second sentence.
mask_2 : `torch.BoolTensor`
Boolean Tensor of shape (batch_size, seq_len2), indicating which
positions in the second sentence are padding (0) and which are not (1).
# Returns
`Tuple[List[torch.Tensor], List[torch.Tensor]]` :
A tuple of matching vectors for the two sentences. Each of which is a list of
matching vectors of shape (batch, seq_len, num_perspectives or 1)
"""
assert (not mask_2.requires_grad) and (not mask_1.requires_grad)
assert context_1.size(-1) == context_2.size(-1) == self.hidden_dim
# (batch,)
len_1 = get_lengths_from_binary_sequence_mask(mask_1)
len_2 = get_lengths_from_binary_sequence_mask(mask_2)
# explicitly set masked weights to zero
# (batch_size, seq_len*, hidden_dim)
context_1 = context_1 * mask_1.unsqueeze(-1)
context_2 = context_2 * mask_2.unsqueeze(-1)
# array to keep the matching vectors for the two sentences
matching_vector_1: List[torch.Tensor] = []
matching_vector_2: List[torch.Tensor] = []
# Step 0. unweighted cosine
# First calculate the cosine similarities between each forward
# (or backward) contextual embedding and every forward (or backward)
# contextual embedding of the other sentence.
# (batch, seq_len1, seq_len2)
cosine_sim = F.cosine_similarity(context_1.unsqueeze(-2), context_2.unsqueeze(-3), dim=3)
# (batch, seq_len*, 1)
cosine_max_1 = masked_max(cosine_sim, mask_2.unsqueeze(-2), dim=2, keepdim=True)
cosine_mean_1 = masked_mean(cosine_sim, mask_2.unsqueeze(-2), dim=2, keepdim=True)
cosine_max_2 = masked_max(
cosine_sim.permute(0, 2, 1), mask_1.unsqueeze(-2), dim=2, keepdim=True
)
cosine_mean_2 = masked_mean(
cosine_sim.permute(0, 2, 1), mask_1.unsqueeze(-2), dim=2, keepdim=True
)
matching_vector_1.extend([cosine_max_1, cosine_mean_1])
matching_vector_2.extend([cosine_max_2, cosine_mean_2])
# Step 1. Full-Matching
# Each time step of forward (or backward) contextual embedding of one sentence
# is compared with the last time step of the forward (or backward)
# contextual embedding of the other sentence
if self.with_full_match:
# (batch, 1, hidden_dim)
if self.is_forward:
# (batch, 1, hidden_dim)
last_position_1 = (len_1 - 1).clamp(min=0)
last_position_1 = last_position_1.view(-1, 1, 1).expand(-1, 1, self.hidden_dim)
last_position_2 = (len_2 - 1).clamp(min=0)
last_position_2 = last_position_2.view(-1, 1, 1).expand(-1, 1, self.hidden_dim)
context_1_last = context_1.gather(1, last_position_1)
context_2_last = context_2.gather(1, last_position_2)
else:
context_1_last = context_1[:, 0:1, :]
context_2_last = context_2[:, 0:1, :]
# (batch, seq_len*, num_perspectives)
matching_vector_1_full = multi_perspective_match(
context_1, context_2_last, self.full_match_weights
)
matching_vector_2_full = multi_perspective_match(
context_2, context_1_last, self.full_match_weights_reversed
)
matching_vector_1.extend(matching_vector_1_full)
matching_vector_2.extend(matching_vector_2_full)
# Step 2. Maxpooling-Matching
# Each time step of forward (or backward) contextual embedding of one sentence
# is compared with every time step of the forward (or backward)
# contextual embedding of the other sentence, and only the max value of each
# dimension is retained.
if self.with_maxpool_match:
# (batch, seq_len1, seq_len2, num_perspectives)
matching_vector_max = multi_perspective_match_pairwise(
context_1, context_2, self.maxpool_match_weights
)
# (batch, seq_len*, num_perspectives)
matching_vector_1_max = masked_max(
matching_vector_max, mask_2.unsqueeze(-2).unsqueeze(-1), dim=2
)
matching_vector_1_mean = masked_mean(
matching_vector_max, mask_2.unsqueeze(-2).unsqueeze(-1), dim=2
)
matching_vector_2_max = masked_max(
matching_vector_max.permute(0, 2, 1, 3), mask_1.unsqueeze(-2).unsqueeze(-1), dim=2
)
matching_vector_2_mean = masked_mean(
matching_vector_max.permute(0, 2, 1, 3), mask_1.unsqueeze(-2).unsqueeze(-1), dim=2
)
matching_vector_1.extend([matching_vector_1_max, matching_vector_1_mean])
matching_vector_2.extend([matching_vector_2_max, matching_vector_2_mean])
# Step 3. Attentive-Matching
# Each forward (or backward) similarity is taken as the weight
# of the forward (or backward) contextual embedding, and calculate an
# attentive vector for the sentence by weighted summing all its
# contextual embeddings.
# Finally match each forward (or backward) contextual embedding
# with its corresponding attentive vector.
# (batch, seq_len1, seq_len2, hidden_dim)
att_2 = context_2.unsqueeze(-3) * cosine_sim.unsqueeze(-1)
# (batch, seq_len1, seq_len2, hidden_dim)
att_1 = context_1.unsqueeze(-2) * cosine_sim.unsqueeze(-1)
if self.with_attentive_match:
# (batch, seq_len*, hidden_dim)
att_mean_2 = masked_softmax(att_2.sum(dim=2), mask_1.unsqueeze(-1))
att_mean_1 = masked_softmax(att_1.sum(dim=1), mask_2.unsqueeze(-1))
# (batch, seq_len*, num_perspectives)
matching_vector_1_att_mean = multi_perspective_match(
context_1, att_mean_2, self.attentive_match_weights
)
matching_vector_2_att_mean = multi_perspective_match(
context_2, att_mean_1, self.attentive_match_weights_reversed
)
matching_vector_1.extend(matching_vector_1_att_mean)
matching_vector_2.extend(matching_vector_2_att_mean)
# Step 4. Max-Attentive-Matching
# Pick the contextual embeddings with the highest cosine similarity as the attentive
# vector, and match each forward (or backward) contextual embedding with its
# corresponding attentive vector.
if self.with_max_attentive_match:
# (batch, seq_len*, hidden_dim)
att_max_2 = masked_max(att_2, mask_2.unsqueeze(-2).unsqueeze(-1), dim=2)
att_max_1 = masked_max(
att_1.permute(0, 2, 1, 3), mask_1.unsqueeze(-2).unsqueeze(-1), dim=2
)
# (batch, seq_len*, num_perspectives)
matching_vector_1_att_max = multi_perspective_match(
context_1, att_max_2, self.max_attentive_match_weights
)
matching_vector_2_att_max = multi_perspective_match(
context_2, att_max_1, self.max_attentive_match_weights_reversed
)
matching_vector_1.extend(matching_vector_1_att_max)
matching_vector_2.extend(matching_vector_2_att_max)
return matching_vector_1, matching_vector_2
| allennlp-master | allennlp/modules/bimpm_matching.py |
"""
Conditional random field
"""
from typing import List, Tuple, Dict, Union
import torch
from allennlp.common.checks import ConfigurationError
import allennlp.nn.util as util
VITERBI_DECODING = Tuple[List[int], float] # a list of tags, and a viterbi score
def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]:
"""
Given labels and a constraint type, returns the allowed transitions. It will
additionally include transitions for the start and end states, which are used
by the conditional random field.
# Parameters
constraint_type : `str`, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
labels : `Dict[int, str]`, required
A mapping {label_id -> label}. Most commonly this would be the value from
Vocabulary.get_index_to_token_vocabulary()
# Returns
`List[Tuple[int, int]]`
The allowed transitions (from_label_id, to_label_id).
"""
num_labels = len(labels)
start_tag = num_labels
end_tag = num_labels + 1
labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")]
allowed = []
for from_label_index, from_label in labels_with_boundaries:
if from_label in ("START", "END"):
from_tag = from_label
from_entity = ""
else:
from_tag = from_label[0]
from_entity = from_label[1:]
for to_label_index, to_label in labels_with_boundaries:
if to_label in ("START", "END"):
to_tag = to_label
to_entity = ""
else:
to_tag = to_label[0]
to_entity = to_label[1:]
if is_transition_allowed(constraint_type, from_tag, from_entity, to_tag, to_entity):
allowed.append((from_label_index, to_label_index))
return allowed
def is_transition_allowed(
constraint_type: str, from_tag: str, from_entity: str, to_tag: str, to_entity: str
):
"""
Given a constraint type and strings `from_tag` and `to_tag` that
represent the origin and destination of the transition, return whether
the transition is allowed under the given constraint type.
# Parameters
constraint_type : `str`, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
from_tag : `str`, required
The tag that the transition originates from. For example, if the
label is `I-PER`, the `from_tag` is `I`.
from_entity : `str`, required
The entity corresponding to the `from_tag`. For example, if the
label is `I-PER`, the `from_entity` is `PER`.
to_tag : `str`, required
The tag that the transition leads to. For example, if the
label is `I-PER`, the `to_tag` is `I`.
to_entity : `str`, required
The entity corresponding to the `to_tag`. For example, if the
label is `I-PER`, the `to_entity` is `PER`.
# Returns
`bool`
Whether the transition is allowed under the given `constraint_type`.
"""
if to_tag == "START" or from_tag == "END":
# Cannot transition into START or from END
return False
if constraint_type == "BIOUL":
if from_tag == "START":
return to_tag in ("O", "B", "U")
if to_tag == "END":
return from_tag in ("O", "L", "U")
return any(
[
# O can transition to O, B-* or U-*
# L-x can transition to O, B-*, or U-*
# U-x can transition to O, B-*, or U-*
from_tag in ("O", "L", "U") and to_tag in ("O", "B", "U"),
# B-x can only transition to I-x or L-x
# I-x can only transition to I-x or L-x
from_tag in ("B", "I") and to_tag in ("I", "L") and from_entity == to_entity,
]
)
elif constraint_type == "BIO":
if from_tag == "START":
return to_tag in ("O", "B")
if to_tag == "END":
return from_tag in ("O", "B", "I")
return any(
[
# Can always transition to O or B-x
to_tag in ("O", "B"),
# Can only transition to I-x from B-x or I-x
to_tag == "I" and from_tag in ("B", "I") and from_entity == to_entity,
]
)
elif constraint_type == "IOB1":
if from_tag == "START":
return to_tag in ("O", "I")
if to_tag == "END":
return from_tag in ("O", "B", "I")
return any(
[
# Can always transition to O or I-x
to_tag in ("O", "I"),
# Can only transition to B-x from B-x or I-x, where
# x is the same tag.
to_tag == "B" and from_tag in ("B", "I") and from_entity == to_entity,
]
)
elif constraint_type == "BMES":
if from_tag == "START":
return to_tag in ("B", "S")
if to_tag == "END":
return from_tag in ("E", "S")
return any(
[
# Can only transition to B or S from E or S.
to_tag in ("B", "S") and from_tag in ("E", "S"),
# Can only transition to M-x from B-x, where
# x is the same tag.
to_tag == "M" and from_tag in ("B", "M") and from_entity == to_entity,
# Can only transition to E-x from B-x or M-x, where
# x is the same tag.
to_tag == "E" and from_tag in ("B", "M") and from_entity == to_entity,
]
)
else:
raise ConfigurationError(f"Unknown constraint type: {constraint_type}")
class ConditionalRandomField(torch.nn.Module):
"""
This module uses the "forward-backward" algorithm to compute
the log-likelihood of its inputs assuming a conditional random field model.
See, e.g. http://www.cs.columbia.edu/~mcollins/fb.pdf
# Parameters
num_tags : `int`, required
The number of tags.
constraints : `List[Tuple[int, int]]`, optional (default = `None`)
An optional list of allowed transitions (from_tag_id, to_tag_id).
These are applied to `viterbi_tags()` but do not affect `forward()`.
These should be derived from `allowed_transitions` so that the
start and end transitions are handled correctly for your tag type.
include_start_end_transitions : `bool`, optional (default = `True`)
Whether to include the start and end transition parameters.
"""
def __init__(
self,
num_tags: int,
constraints: List[Tuple[int, int]] = None,
include_start_end_transitions: bool = True,
) -> None:
super().__init__()
self.num_tags = num_tags
# transitions[i, j] is the logit for transitioning from state i to state j.
self.transitions = torch.nn.Parameter(torch.Tensor(num_tags, num_tags))
# _constraint_mask indicates valid transitions (based on supplied constraints).
# Include special start of sequence (num_tags + 1) and end of sequence tags (num_tags + 2)
if constraints is None:
# All transitions are valid.
constraint_mask = torch.Tensor(num_tags + 2, num_tags + 2).fill_(1.0)
else:
constraint_mask = torch.Tensor(num_tags + 2, num_tags + 2).fill_(0.0)
for i, j in constraints:
constraint_mask[i, j] = 1.0
self._constraint_mask = torch.nn.Parameter(constraint_mask, requires_grad=False)
# Also need logits for transitioning from "start" state and to "end" state.
self.include_start_end_transitions = include_start_end_transitions
if include_start_end_transitions:
self.start_transitions = torch.nn.Parameter(torch.Tensor(num_tags))
self.end_transitions = torch.nn.Parameter(torch.Tensor(num_tags))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.transitions)
if self.include_start_end_transitions:
torch.nn.init.normal_(self.start_transitions)
torch.nn.init.normal_(self.end_transitions)
def _input_likelihood(self, logits: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
"""
Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences.
"""
batch_size, sequence_length, num_tags = logits.size()
# Transpose batch size and sequence dimensions
mask = mask.transpose(0, 1).contiguous()
logits = logits.transpose(0, 1).contiguous()
# Initial alpha is the (batch_size, num_tags) tensor of likelihoods combining the
# transitions to the initial states and the logits for the first timestep.
if self.include_start_end_transitions:
alpha = self.start_transitions.view(1, num_tags) + logits[0]
else:
alpha = logits[0]
# For each i we compute logits for the transitions from timestep i-1 to timestep i.
# We do so in a (batch_size, num_tags, num_tags) tensor where the axes are
# (instance, current_tag, next_tag)
for i in range(1, sequence_length):
# The emit scores are for time i ("next_tag") so we broadcast along the current_tag axis.
emit_scores = logits[i].view(batch_size, 1, num_tags)
# Transition scores are (current_tag, next_tag) so we broadcast along the instance axis.
transition_scores = self.transitions.view(1, num_tags, num_tags)
# Alpha is for the current_tag, so we broadcast along the next_tag axis.
broadcast_alpha = alpha.view(batch_size, num_tags, 1)
# Add all the scores together and logexp over the current_tag axis.
inner = broadcast_alpha + emit_scores + transition_scores
# In valid positions (mask == True) we want to take the logsumexp over the current_tag dimension
# of `inner`. Otherwise (mask == False) we want to retain the previous alpha.
alpha = util.logsumexp(inner, 1) * mask[i].view(batch_size, 1) + alpha * (
~mask[i]
).view(batch_size, 1)
# Every sequence needs to end with a transition to the stop_tag.
if self.include_start_end_transitions:
stops = alpha + self.end_transitions.view(1, num_tags)
else:
stops = alpha
# Finally we log_sum_exp along the num_tags dim, result is (batch_size,)
return util.logsumexp(stops)
def _joint_likelihood(
self, logits: torch.Tensor, tags: torch.Tensor, mask: torch.BoolTensor
) -> torch.Tensor:
"""
Computes the numerator term for the log-likelihood, which is just score(inputs, tags)
"""
batch_size, sequence_length, _ = logits.data.shape
# Transpose batch size and sequence dimensions:
logits = logits.transpose(0, 1).contiguous()
mask = mask.transpose(0, 1).contiguous()
tags = tags.transpose(0, 1).contiguous()
# Start with the transition scores from start_tag to the first tag in each input
if self.include_start_end_transitions:
score = self.start_transitions.index_select(0, tags[0])
else:
score = 0.0
# Add up the scores for the observed transitions and all the inputs but the last
for i in range(sequence_length - 1):
# Each is shape (batch_size,)
current_tag, next_tag = tags[i], tags[i + 1]
# The scores for transitioning from current_tag to next_tag
transition_score = self.transitions[current_tag.view(-1), next_tag.view(-1)]
# The score for using current_tag
emit_score = logits[i].gather(1, current_tag.view(batch_size, 1)).squeeze(1)
# Include transition score if next element is unmasked,
# input_score if this element is unmasked.
score = score + transition_score * mask[i + 1] + emit_score * mask[i]
# Transition from last state to "stop" state. To start with, we need to find the last tag
# for each instance.
last_tag_index = mask.sum(0).long() - 1
last_tags = tags.gather(0, last_tag_index.view(1, batch_size)).squeeze(0)
# Compute score of transitioning to `stop_tag` from each "last tag".
if self.include_start_end_transitions:
last_transition_score = self.end_transitions.index_select(0, last_tags)
else:
last_transition_score = 0.0
# Add the last input if it's not masked.
last_inputs = logits[-1] # (batch_size, num_tags)
last_input_score = last_inputs.gather(1, last_tags.view(-1, 1)) # (batch_size, 1)
last_input_score = last_input_score.squeeze() # (batch_size,)
score = score + last_transition_score + last_input_score * mask[-1]
return score
def forward(
self, inputs: torch.Tensor, tags: torch.Tensor, mask: torch.BoolTensor = None
) -> torch.Tensor:
"""
Computes the log likelihood.
"""
if mask is None:
mask = torch.ones(*tags.size(), dtype=torch.bool)
else:
# The code below fails in weird ways if this isn't a bool tensor, so we make sure.
mask = mask.to(torch.bool)
log_denominator = self._input_likelihood(inputs, mask)
log_numerator = self._joint_likelihood(inputs, tags, mask)
return torch.sum(log_numerator - log_denominator)
def viterbi_tags(
self, logits: torch.Tensor, mask: torch.BoolTensor = None, top_k: int = None
) -> Union[List[VITERBI_DECODING], List[List[VITERBI_DECODING]]]:
"""
Uses viterbi algorithm to find most likely tags for the given inputs.
If constraints are applied, disallows all other transitions.
Returns a list of results, of the same size as the batch (one result per batch member)
Each result is a List of length top_k, containing the top K viterbi decodings
Each decoding is a tuple (tag_sequence, viterbi_score)
For backwards compatibility, if top_k is None, then instead returns a flat list of
tag sequences (the top tag sequence for each batch item).
"""
if mask is None:
mask = torch.ones(*logits.shape[:2], dtype=torch.bool, device=logits.device)
if top_k is None:
top_k = 1
flatten_output = True
else:
flatten_output = False
_, max_seq_length, num_tags = logits.size()
# Get the tensors out of the variables
logits, mask = logits.data, mask.data
# Augment transitions matrix with start and end transitions
start_tag = num_tags
end_tag = num_tags + 1
transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.0)
# Apply transition constraints
constrained_transitions = self.transitions * self._constraint_mask[
:num_tags, :num_tags
] + -10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags])
transitions[:num_tags, :num_tags] = constrained_transitions.data
if self.include_start_end_transitions:
transitions[
start_tag, :num_tags
] = self.start_transitions.detach() * self._constraint_mask[
start_tag, :num_tags
].data + -10000.0 * (
1 - self._constraint_mask[start_tag, :num_tags].detach()
)
transitions[:num_tags, end_tag] = self.end_transitions.detach() * self._constraint_mask[
:num_tags, end_tag
].data + -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
else:
transitions[start_tag, :num_tags] = -10000.0 * (
1 - self._constraint_mask[start_tag, :num_tags].detach()
)
transitions[:num_tags, end_tag] = -10000.0 * (
1 - self._constraint_mask[:num_tags, end_tag].detach()
)
best_paths = []
# Pad the max sequence length by 2 to account for start_tag + end_tag.
tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2)
for prediction, prediction_mask in zip(logits, mask):
mask_indices = prediction_mask.nonzero(as_tuple=False).squeeze()
masked_prediction = torch.index_select(prediction, 0, mask_indices)
sequence_length = masked_prediction.shape[0]
# Start with everything totally unlikely
tag_sequence.fill_(-10000.0)
# At timestep 0 we must have the START_TAG
tag_sequence[0, start_tag] = 0.0
# At steps 1, ..., sequence_length we just use the incoming prediction
tag_sequence[1 : (sequence_length + 1), :num_tags] = masked_prediction
# And at the last timestep we must have the END_TAG
tag_sequence[sequence_length + 1, end_tag] = 0.0
# We pass the tags and the transitions to `viterbi_decode`.
viterbi_paths, viterbi_scores = util.viterbi_decode(
tag_sequence=tag_sequence[: (sequence_length + 2)],
transition_matrix=transitions,
top_k=top_k,
)
top_k_paths = []
for viterbi_path, viterbi_score in zip(viterbi_paths, viterbi_scores):
# Get rid of START and END sentinels and append.
viterbi_path = viterbi_path[1:-1]
top_k_paths.append((viterbi_path, viterbi_score.item()))
best_paths.append(top_k_paths)
if flatten_output:
return [top_k_paths[0] for top_k_paths in best_paths]
return best_paths
| allennlp-master | allennlp/modules/conditional_random_field.py |
from typing import Optional, Tuple, List
import torch
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.common.checks import ConfigurationError
TensorPair = Tuple[torch.Tensor, torch.Tensor]
class StackedBidirectionalLstm(torch.nn.Module):
"""
A standard stacked Bidirectional LSTM where the LSTM layers
are concatenated between each layer. The only difference between
this and a regular bidirectional LSTM is the application of
variational dropout to the hidden states and outputs of each layer apart
from the last layer of the LSTM. Note that this will be slower, as it
doesn't use CUDNN.
[0]: https://arxiv.org/abs/1512.05287
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
num_layers : `int`, required
The number of stacked Bidirectional LSTMs to use.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
The recurrent dropout probability to be used in a dropout scheme as
stated in [A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks][0].
layer_dropout_probability : `float`, optional (default = `0.0`)
The layer wise dropout probability to be used in a dropout scheme as
stated in [A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks][0].
use_highway : `bool`, optional (default = `True`)
Whether or not to use highway connections between layers. This effectively involves
reparameterising the normal output of an LSTM as::
gate = sigmoid(W_x1 * x_t + W_h * h_t)
output = gate * h_t + (1 - gate) * (W_x2 * x_t)
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
layer_dropout_probability: float = 0.0,
use_highway: bool = True,
) -> None:
super().__init__()
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = True
layers = []
lstm_input_size = input_size
for layer_index in range(num_layers):
forward_layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward=True,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=False,
)
backward_layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward=False,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=False,
)
lstm_input_size = hidden_size * 2
self.add_module("forward_layer_{}".format(layer_index), forward_layer)
self.add_module("backward_layer_{}".format(layer_index), backward_layer)
layers.append([forward_layer, backward_layer])
self.lstm_layers = layers
self.layer_dropout = InputVariationalDropout(layer_dropout_probability)
def forward(
self, inputs: PackedSequence, initial_state: Optional[TensorPair] = None
) -> Tuple[PackedSequence, TensorPair]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (num_layers, batch_size, output_dimension * 2).
# Returns
output_sequence : `PackedSequence`
The encoded sequence of shape (batch_size, sequence_length, hidden_size * 2)
final_states: `torch.Tensor`
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers * 2, batch_size, hidden_size * 2).
"""
if initial_state is None:
hidden_states: List[Optional[TensorPair]] = [None] * len(self.lstm_layers)
elif initial_state[0].size()[0] != len(self.lstm_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
output_sequence = inputs
final_h = []
final_c = []
for i, state in enumerate(hidden_states):
forward_layer = getattr(self, "forward_layer_{}".format(i))
backward_layer = getattr(self, "backward_layer_{}".format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
forward_output, final_forward_state = forward_layer(output_sequence, state)
backward_output, final_backward_state = backward_layer(output_sequence, state)
forward_output, lengths = pad_packed_sequence(forward_output, batch_first=True)
backward_output, _ = pad_packed_sequence(backward_output, batch_first=True)
output_sequence = torch.cat([forward_output, backward_output], -1)
# Apply layer wise dropout on each output sequence apart from the
# first (input) and last
if i < (self.num_layers - 1):
output_sequence = self.layer_dropout(output_sequence)
output_sequence = pack_padded_sequence(output_sequence, lengths, batch_first=True)
final_h.extend([final_forward_state[0], final_backward_state[0]])
final_c.extend([final_forward_state[1], final_backward_state[1]])
final_h = torch.cat(final_h, dim=0)
final_c = torch.cat(final_c, dim=0)
final_state_tuple = (final_h, final_c)
return output_sequence, final_state_tuple
| allennlp-master | allennlp/modules/stacked_bidirectional_lstm.py |
"""
A [Highway layer](https://arxiv.org/abs/1505.00387) that does a gated combination of a linear
transformation and a non-linear transformation of its input.
"""
from typing import Callable
import torch
from overrides import overrides
class Highway(torch.nn.Module):
"""
A [Highway layer](https://arxiv.org/abs/1505.00387) does a gated combination of a linear
transformation and a non-linear transformation of its input. :math:`y = g * x + (1 - g) *
f(A(x))`, where :math:`A` is a linear transformation, :math:`f` is an element-wise
non-linearity, and :math:`g` is an element-wise gate, computed as :math:`sigmoid(B(x))`.
This module will apply a fixed number of highway layers to its input, returning the final
result.
# Parameters
input_dim : `int`, required
The dimensionality of :math:`x`. We assume the input has shape `(batch_size, ...,
input_dim)`.
num_layers : `int`, optional (default=`1`)
The number of highway layers to apply to the input.
activation : `Callable[[torch.Tensor], torch.Tensor]`, optional (default=`torch.nn.functional.relu`)
The non-linearity to use in the highway layers.
"""
def __init__(
self,
input_dim: int,
num_layers: int = 1,
activation: Callable[[torch.Tensor], torch.Tensor] = torch.nn.functional.relu,
) -> None:
super().__init__()
self._input_dim = input_dim
self._layers = torch.nn.ModuleList(
[torch.nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]
)
self._activation = activation
for layer in self._layers:
# We should bias the highway layer to just carry its input forward. We do that by
# setting the bias on `B(x)` to be positive, because that means `g` will be biased to
# be high, so we will carry the input forward. The bias on `B(x)` is the second half
# of the bias vector in each Linear layer.
layer.bias[input_dim:].data.fill_(1)
@overrides
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
current_input = inputs
for layer in self._layers:
projected_input = layer(current_input)
linear_part = current_input
# NOTE: if you modify this, think about whether you should modify the initialization
# above, too.
nonlinear_part, gate = projected_input.chunk(2, dim=-1)
nonlinear_part = self._activation(nonlinear_part)
gate = torch.sigmoid(gate)
current_input = gate * linear_part + (1 - gate) * nonlinear_part
return current_input
| allennlp-master | allennlp/modules/highway.py |
import torch
class InputVariationalDropout(torch.nn.Dropout):
"""
Apply the dropout technique in Gal and Ghahramani, [Dropout as a Bayesian Approximation:
Representing Model Uncertainty in Deep Learning](https://arxiv.org/abs/1506.02142) to a
3D tensor.
This module accepts a 3D tensor of shape `(batch_size, num_timesteps, embedding_dim)`
and samples a single dropout mask of shape `(batch_size, embedding_dim)` and applies
it to every time step.
"""
def forward(self, input_tensor):
"""
Apply dropout to input tensor.
# Parameters
input_tensor : `torch.FloatTensor`
A tensor of shape `(batch_size, num_timesteps, embedding_dim)`
# Returns
output : `torch.FloatTensor`
A tensor of shape `(batch_size, num_timesteps, embedding_dim)` with dropout applied.
"""
ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1])
dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False)
if self.inplace:
input_tensor *= dropout_mask.unsqueeze(1)
return None
else:
return dropout_mask.unsqueeze(1) * input_tensor
| allennlp-master | allennlp/modules/input_variational_dropout.py |
import torch
from allennlp.nn import util
class MaskedLayerNorm(torch.nn.Module):
"""
See LayerNorm for details.
Note, however, that unlike LayerNorm this norm includes a batch component.
"""
def __init__(self, size: int, gamma0: float = 0.1) -> None:
super().__init__()
self.gamma = torch.nn.Parameter(torch.ones(1, 1, size) * gamma0)
self.beta = torch.nn.Parameter(torch.zeros(1, 1, size))
self.size = size
def forward(self, tensor: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
broadcast_mask = mask.unsqueeze(-1)
num_elements = broadcast_mask.sum() * self.size
mean = (tensor * broadcast_mask).sum() / num_elements
masked_centered = (tensor - mean) * broadcast_mask
std = torch.sqrt(
(masked_centered * masked_centered).sum() / num_elements
+ util.tiny_value_of_dtype(tensor.dtype)
)
return (
self.gamma * (tensor - mean) / (std + util.tiny_value_of_dtype(tensor.dtype))
+ self.beta
)
| allennlp-master | allennlp/modules/masked_layer_norm.py |
"""
A stacked bidirectional LSTM with skip connections between layers.
"""
import warnings
from typing import List, Optional, Tuple, Any
import numpy
import torch
from torch.nn.utils.rnn import PackedSequence, pad_packed_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.modules.lstm_cell_with_projection import LstmCellWithProjection
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
class ElmoLstm(_EncoderBase):
"""
A stacked, bidirectional LSTM which uses
[`LstmCellWithProjection`'s](./lstm_cell_with_projection.md)
with highway layers between the inputs to layers.
The inputs to the forward and backward directions are independent - forward and backward
states are not concatenated between layers.
Additionally, this LSTM maintains its `own` state, which is updated every time
`forward` is called. It is dynamically resized for different batch sizes and is
designed for use with non-continuous inputs (i.e inputs which aren't formatted as a stream,
such as text used for a language modeling task, which is how stateful RNNs are typically used).
This is non-standard, but can be thought of as having an "end of sentence" state, which is
carried across different sentences.
[0]: https://arxiv.org/abs/1512.05287
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
cell_size : `int`, required.
The dimension of the memory cell of the `LstmCellWithProjection`.
num_layers : `int`, required
The number of bidirectional LSTMs to use.
requires_grad : `bool`, optional
If True, compute gradient of ELMo parameters for fine tuning.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
The dropout probability to be used in a dropout scheme as stated in
[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks][0].
state_projection_clip_value : `float`, optional, (default = `None`)
The magnitude with which to clip the hidden_state after projecting it.
memory_cell_clip_value : `float`, optional, (default = `None`)
The magnitude with which to clip the memory cell.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
cell_size: int,
num_layers: int,
requires_grad: bool = False,
recurrent_dropout_probability: float = 0.0,
memory_cell_clip_value: Optional[float] = None,
state_projection_clip_value: Optional[float] = None,
) -> None:
super().__init__(stateful=True)
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.cell_size = cell_size
self.requires_grad = requires_grad
forward_layers = []
backward_layers = []
lstm_input_size = input_size
go_forward = True
for layer_index in range(num_layers):
forward_layer = LstmCellWithProjection(
lstm_input_size,
hidden_size,
cell_size,
go_forward,
recurrent_dropout_probability,
memory_cell_clip_value,
state_projection_clip_value,
)
backward_layer = LstmCellWithProjection(
lstm_input_size,
hidden_size,
cell_size,
not go_forward,
recurrent_dropout_probability,
memory_cell_clip_value,
state_projection_clip_value,
)
lstm_input_size = hidden_size
self.add_module("forward_layer_{}".format(layer_index), forward_layer)
self.add_module("backward_layer_{}".format(layer_index), backward_layer)
forward_layers.append(forward_layer)
backward_layers.append(backward_layer)
self.forward_layers = forward_layers
self.backward_layers = backward_layers
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A Tensor of shape `(batch_size, sequence_length, hidden_size)`.
mask : `torch.BoolTensor`, required.
A binary mask of shape `(batch_size, sequence_length)` representing the
non-padded elements in each sequence in the batch.
# Returns
`torch.Tensor`
A `torch.Tensor` of shape (num_layers, batch_size, sequence_length, hidden_size),
where the num_layers dimension represents the LSTM output from that layer.
"""
batch_size, total_sequence_length = mask.size()
stacked_sequence_output, final_states, restoration_indices = self.sort_and_run_forward(
self._lstm_forward, inputs, mask
)
num_layers, num_valid, returned_timesteps, encoder_dim = stacked_sequence_output.size()
# Add back invalid rows which were removed in the call to sort_and_run_forward.
if num_valid < batch_size:
zeros = stacked_sequence_output.new_zeros(
num_layers, batch_size - num_valid, returned_timesteps, encoder_dim
)
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 1)
# The states also need to have invalid rows added back.
new_states = []
for state in final_states:
state_dim = state.size(-1)
zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)
new_states.append(torch.cat([state, zeros], 1))
final_states = new_states
# It's possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2StackEncoder. However, packing and unpacking
# the sequences mean that the returned tensor won't include these dimensions, because
# the RNN did not need to process them. We add them back on in the form of zeros here.
sequence_length_difference = total_sequence_length - returned_timesteps
if sequence_length_difference > 0:
zeros = stacked_sequence_output.new_zeros(
num_layers,
batch_size,
sequence_length_difference,
stacked_sequence_output[0].size(-1),
)
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 2)
self._update_states(final_states, restoration_indices)
# Restore the original indices and return the sequence.
# Has shape (num_layers, batch_size, sequence_length, hidden_size)
return stacked_sequence_output.index_select(1, restoration_indices)
def _lstm_forward(
self,
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and
(num_layers, batch_size, 2 * cell_size) respectively.
# Returns
output_sequence : `torch.FloatTensor`
The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size)
final_states : `Tuple[torch.FloatTensor, torch.FloatTensor]`
The per-layer final (state, memory) states of the LSTM, with shape
(num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size)
respectively. The last dimension is duplicated because it contains the state/memory
for both the forward and backward layers.
"""
if initial_state is None:
hidden_states: List[Optional[Tuple[torch.Tensor, torch.Tensor]]] = [None] * len(
self.forward_layers
)
elif initial_state[0].size()[0] != len(self.forward_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
inputs, batch_lengths = pad_packed_sequence(inputs, batch_first=True)
forward_output_sequence = inputs
backward_output_sequence = inputs
final_states = []
sequence_outputs = []
for layer_index, state in enumerate(hidden_states):
forward_layer = getattr(self, "forward_layer_{}".format(layer_index))
backward_layer = getattr(self, "backward_layer_{}".format(layer_index))
forward_cache = forward_output_sequence
backward_cache = backward_output_sequence
forward_state: Optional[Tuple[Any, Any]] = None
backward_state: Optional[Tuple[Any, Any]] = None
if state is not None:
forward_hidden_state, backward_hidden_state = state[0].split(self.hidden_size, 2)
forward_memory_state, backward_memory_state = state[1].split(self.cell_size, 2)
forward_state = (forward_hidden_state, forward_memory_state)
backward_state = (backward_hidden_state, backward_memory_state)
forward_output_sequence, forward_state = forward_layer(
forward_output_sequence, batch_lengths, forward_state
)
backward_output_sequence, backward_state = backward_layer(
backward_output_sequence, batch_lengths, backward_state
)
# Skip connections, just adding the input to the output.
if layer_index != 0:
forward_output_sequence += forward_cache
backward_output_sequence += backward_cache
sequence_outputs.append(
torch.cat([forward_output_sequence, backward_output_sequence], -1)
)
# Append the state tuples in a list, so that we can return
# the final states for all the layers.
final_states.append(
(
torch.cat([forward_state[0], backward_state[0]], -1), # type: ignore
torch.cat([forward_state[1], backward_state[1]], -1), # type: ignore
)
)
stacked_sequence_outputs: torch.FloatTensor = torch.stack(sequence_outputs)
# Stack the hidden state and memory for each layer into 2 tensors of shape
# (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size)
# respectively.
final_hidden_states, final_memory_states = zip(*final_states)
final_state_tuple: Tuple[torch.FloatTensor, torch.FloatTensor] = (
torch.cat(final_hidden_states, 0),
torch.cat(final_memory_states, 0),
)
return stacked_sequence_outputs, final_state_tuple
def load_weights(self, weight_file: str) -> None:
"""
Load the pre-trained weights from the file.
"""
requires_grad = self.requires_grad
with h5py.File(cached_path(weight_file), "r") as fin:
for i_layer, lstms in enumerate(zip(self.forward_layers, self.backward_layers)):
for j_direction, lstm in enumerate(lstms):
# lstm is an instance of LSTMCellWithProjection
cell_size = lstm.cell_size
dataset = fin["RNN_%s" % j_direction]["RNN"]["MultiRNNCell"][
"Cell%s" % i_layer
]["LSTMCell"]
# tensorflow packs together both W and U matrices into one matrix,
# but pytorch maintains individual matrices. In addition, tensorflow
# packs the gates as input, memory, forget, output but pytorch
# uses input, forget, memory, output. So we need to modify the weights.
tf_weights = numpy.transpose(dataset["W_0"][...])
torch_weights = tf_weights.copy()
# split the W from U matrices
input_size = lstm.input_size
input_weights = torch_weights[:, :input_size]
recurrent_weights = torch_weights[:, input_size:]
tf_input_weights = tf_weights[:, :input_size]
tf_recurrent_weights = tf_weights[:, input_size:]
# handle the different gate order convention
for torch_w, tf_w in [
[input_weights, tf_input_weights],
[recurrent_weights, tf_recurrent_weights],
]:
torch_w[(1 * cell_size) : (2 * cell_size), :] = tf_w[
(2 * cell_size) : (3 * cell_size), :
]
torch_w[(2 * cell_size) : (3 * cell_size), :] = tf_w[
(1 * cell_size) : (2 * cell_size), :
]
lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))
lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))
lstm.input_linearity.weight.requires_grad = requires_grad
lstm.state_linearity.weight.requires_grad = requires_grad
# the bias weights
tf_bias = dataset["B"][...]
# tensorflow adds 1.0 to forget gate bias instead of modifying the
# parameters...
tf_bias[(2 * cell_size) : (3 * cell_size)] += 1
torch_bias = tf_bias.copy()
torch_bias[(1 * cell_size) : (2 * cell_size)] = tf_bias[
(2 * cell_size) : (3 * cell_size)
]
torch_bias[(2 * cell_size) : (3 * cell_size)] = tf_bias[
(1 * cell_size) : (2 * cell_size)
]
lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))
lstm.state_linearity.bias.requires_grad = requires_grad
# the projection weights
proj_weights = numpy.transpose(dataset["W_P_0"][...])
lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))
lstm.state_projection.weight.requires_grad = requires_grad
| allennlp-master | allennlp/modules/elmo_lstm.py |
"""
An LSTM with Recurrent Dropout, a hidden_state which is projected and
clipping on both the hidden state and the memory state of the LSTM.
"""
from typing import Optional, Tuple, List
import torch
from allennlp.nn.util import get_dropout_mask
from allennlp.nn.initializers import block_orthogonal
class LstmCellWithProjection(torch.nn.Module):
"""
An LSTM with Recurrent Dropout and a projected and clipped hidden state and
memory. Note: this implementation is slower than the native Pytorch LSTM because
it cannot make use of CUDNN optimizations for stacked RNNs due to and
variational dropout and the custom nature of the cell state.
[0]: https://arxiv.org/abs/1512.05287
# Parameters
input_size : `int`, required.
The dimension of the inputs to the LSTM.
hidden_size : `int`, required.
The dimension of the outputs of the LSTM.
cell_size : `int`, required.
The dimension of the memory cell used for the LSTM.
go_forward : `bool`, optional (default = `True`)
The direction in which the LSTM is applied to the sequence.
Forwards by default, or backwards if False.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
The dropout probability to be used in a dropout scheme as stated in
[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks]
[0]. Implementation wise, this simply
applies a fixed dropout mask per sequence to the recurrent connection of the
LSTM.
state_projection_clip_value : `float`, optional, (default = `None`)
The magnitude with which to clip the hidden_state after projecting it.
memory_cell_clip_value : `float`, optional, (default = `None`)
The magnitude with which to clip the memory cell.
# Returns
output_accumulator : `torch.FloatTensor`
The outputs of the LSTM for each timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
final_state : `Tuple[torch.FloatTensor, torch.FloatTensor]`
The final (state, memory) states of the LSTM, with shape
(1, batch_size, hidden_size) and (1, batch_size, cell_size)
respectively. The first dimension is 1 in order to match the Pytorch
API for returning stacked LSTM states.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
cell_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
memory_cell_clip_value: Optional[float] = None,
state_projection_clip_value: Optional[float] = None,
) -> None:
super().__init__()
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.cell_size = cell_size
self.go_forward = go_forward
self.state_projection_clip_value = state_projection_clip_value
self.memory_cell_clip_value = memory_cell_clip_value
self.recurrent_dropout_probability = recurrent_dropout_probability
# We do the projections for all the gates all at once.
self.input_linearity = torch.nn.Linear(input_size, 4 * cell_size, bias=False)
self.state_linearity = torch.nn.Linear(hidden_size, 4 * cell_size, bias=True)
# Additional projection matrix for making the hidden state smaller.
self.state_projection = torch.nn.Linear(cell_size, hidden_size, bias=False)
self.reset_parameters()
def reset_parameters(self):
# Use sensible default initializations for parameters.
block_orthogonal(self.input_linearity.weight.data, [self.cell_size, self.input_size])
block_orthogonal(self.state_linearity.weight.data, [self.cell_size, self.hidden_size])
self.state_linearity.bias.data.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.state_linearity.bias.data[self.cell_size : 2 * self.cell_size].fill_(1.0)
def forward(
self,
inputs: torch.FloatTensor,
batch_lengths: List[int],
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
# Parameters
inputs : `torch.FloatTensor`, required.
A tensor of shape (batch_size, num_timesteps, input_size)
to apply the LSTM over.
batch_lengths : `List[int]`, required.
A list of length batch_size containing the lengths of the sequences in batch.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. The `state` has shape (1, batch_size, hidden_size) and the
`memory` has shape (1, batch_size, cell_size).
# Returns
output_accumulator : `torch.FloatTensor`
The outputs of the LSTM for each timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
final_state : `Tuple[torch.FloatTensor, torch.FloatTensor]`
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. The `state` has shape (1, batch_size, hidden_size) and the
`memory` has shape (1, batch_size, cell_size).
"""
batch_size = inputs.size()[0]
total_timesteps = inputs.size()[1]
output_accumulator = inputs.new_zeros(batch_size, total_timesteps, self.hidden_size)
if initial_state is None:
full_batch_previous_memory = inputs.new_zeros(batch_size, self.cell_size)
full_batch_previous_state = inputs.new_zeros(batch_size, self.hidden_size)
else:
full_batch_previous_state = initial_state[0].squeeze(0)
full_batch_previous_memory = initial_state[1].squeeze(0)
current_length_index = batch_size - 1 if self.go_forward else 0
if self.recurrent_dropout_probability > 0.0 and self.training:
dropout_mask = get_dropout_mask(
self.recurrent_dropout_probability, full_batch_previous_state
)
else:
dropout_mask = None
for timestep in range(total_timesteps):
# The index depends on which end we start.
index = timestep if self.go_forward else total_timesteps - timestep - 1
# What we are doing here is finding the index into the batch dimension
# which we need to use for this timestep, because the sequences have
# variable length, so once the index is greater than the length of this
# particular batch sequence, we no longer need to do the computation for
# this sequence. The key thing to recognise here is that the batch inputs
# must be _ordered_ by length from longest (first in batch) to shortest
# (last) so initially, we are going forwards with every sequence and as we
# pass the index at which the shortest elements of the batch finish,
# we stop picking them up for the computation.
if self.go_forward:
while batch_lengths[current_length_index] <= index:
current_length_index -= 1
# If we're going backwards, we are _picking up_ more indices.
else:
# First conditional: Are we already at the maximum number of elements in the batch?
# Second conditional: Does the next shortest sequence beyond the current batch
# index require computation use this timestep?
while (
current_length_index < (len(batch_lengths) - 1)
and batch_lengths[current_length_index + 1] > index
):
current_length_index += 1
# Actually get the slices of the batch which we
# need for the computation at this timestep.
# shape (batch_size, cell_size)
previous_memory = full_batch_previous_memory[0 : current_length_index + 1].clone()
# Shape (batch_size, hidden_size)
previous_state = full_batch_previous_state[0 : current_length_index + 1].clone()
# Shape (batch_size, input_size)
timestep_input = inputs[0 : current_length_index + 1, index]
# Do the projections for all the gates all at once.
# Both have shape (batch_size, 4 * cell_size)
projected_input = self.input_linearity(timestep_input)
projected_state = self.state_linearity(previous_state)
# Main LSTM equations using relevant chunks of the big linear
# projections of the hidden state and inputs.
input_gate = torch.sigmoid(
projected_input[:, (0 * self.cell_size) : (1 * self.cell_size)]
+ projected_state[:, (0 * self.cell_size) : (1 * self.cell_size)]
)
forget_gate = torch.sigmoid(
projected_input[:, (1 * self.cell_size) : (2 * self.cell_size)]
+ projected_state[:, (1 * self.cell_size) : (2 * self.cell_size)]
)
memory_init = torch.tanh(
projected_input[:, (2 * self.cell_size) : (3 * self.cell_size)]
+ projected_state[:, (2 * self.cell_size) : (3 * self.cell_size)]
)
output_gate = torch.sigmoid(
projected_input[:, (3 * self.cell_size) : (4 * self.cell_size)]
+ projected_state[:, (3 * self.cell_size) : (4 * self.cell_size)]
)
memory = input_gate * memory_init + forget_gate * previous_memory
# Here is the non-standard part of this LSTM cell; first, we clip the
# memory cell, then we project the output of the timestep to a smaller size
# and again clip it.
if self.memory_cell_clip_value:
memory = torch.clamp(
memory, -self.memory_cell_clip_value, self.memory_cell_clip_value
)
# shape (current_length_index, cell_size)
pre_projection_timestep_output = output_gate * torch.tanh(memory)
# shape (current_length_index, hidden_size)
timestep_output = self.state_projection(pre_projection_timestep_output)
if self.state_projection_clip_value:
timestep_output = torch.clamp(
timestep_output,
-self.state_projection_clip_value,
self.state_projection_clip_value,
)
# Only do dropout if the dropout prob is > 0.0 and we are in training mode.
if dropout_mask is not None:
timestep_output = timestep_output * dropout_mask[0 : current_length_index + 1]
# We've been doing computation with less than the full batch, so here we create a new
# variable for the the whole batch at this timestep and insert the result for the
# relevant elements of the batch into it.
full_batch_previous_memory = full_batch_previous_memory.clone()
full_batch_previous_state = full_batch_previous_state.clone()
full_batch_previous_memory[0 : current_length_index + 1] = memory
full_batch_previous_state[0 : current_length_index + 1] = timestep_output
output_accumulator[0 : current_length_index + 1, index] = timestep_output
# Mimic the pytorch API by returning state in the following shape:
# (num_layers * num_directions, batch_size, ...). As this
# LSTM cell cannot be stacked, the first dimension here is just 1.
final_state = (
full_batch_previous_state.unsqueeze(0),
full_batch_previous_memory.unsqueeze(0),
)
return output_accumulator, final_state
| allennlp-master | allennlp/modules/lstm_cell_with_projection.py |
"""
An LSTM with Recurrent Dropout and the option to use highway
connections between layers.
Based on PyText version (that was based on a previous AllenNLP version)
"""
from typing import Optional, Tuple
import torch
from allennlp.common.checks import ConfigurationError
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from allennlp.nn.initializers import block_orthogonal
from allennlp.nn.util import get_dropout_mask
class AugmentedLSTMCell(torch.nn.Module):
"""
`AugmentedLSTMCell` implements a AugmentedLSTM cell.
# Parameters
embed_dim : `int`
The number of expected features in the input.
lstm_dim : `int`
Number of features in the hidden state of the LSTM.
use_highway : `bool`, optional (default = `True`)
If `True` we append a highway network to the outputs of the LSTM.
use_bias : `bool`, optional (default = `True`)
If `True` we use a bias in our LSTM calculations, otherwise we don't.
# Attributes
input_linearity : `nn.Module`
Fused weight matrix which computes a linear function over the input.
state_linearity : `nn.Module`
Fused weight matrix which computes a linear function over the states.
"""
def __init__(
self, embed_dim: int, lstm_dim: int, use_highway: bool = True, use_bias: bool = True
):
super().__init__()
self.embed_dim = embed_dim
self.lstm_dim = lstm_dim
self.use_highway = use_highway
self.use_bias = use_bias
if use_highway:
self._highway_inp_proj_start = 5 * self.lstm_dim
self._highway_inp_proj_end = 6 * self.lstm_dim
# fused linearity of input to input_gate,
# forget_gate, memory_init, output_gate, highway_gate,
# and the actual highway value
self.input_linearity = torch.nn.Linear(
self.embed_dim, self._highway_inp_proj_end, bias=self.use_bias
)
# fused linearity of input to input_gate,
# forget_gate, memory_init, output_gate, highway_gate
self.state_linearity = torch.nn.Linear(
self.lstm_dim, self._highway_inp_proj_start, bias=True
)
else:
# If there's no highway layer then we have a standard
# LSTM. The 4 comes from fusing input, forget, memory, output
# gates/inputs.
self.input_linearity = torch.nn.Linear(
self.embed_dim, 4 * self.lstm_dim, bias=self.use_bias
)
self.state_linearity = torch.nn.Linear(self.lstm_dim, 4 * self.lstm_dim, bias=True)
self.reset_parameters()
def reset_parameters(self):
# Use sensible default initializations for parameters.
block_orthogonal(self.input_linearity.weight.data, [self.lstm_dim, self.embed_dim])
block_orthogonal(self.state_linearity.weight.data, [self.lstm_dim, self.lstm_dim])
self.state_linearity.bias.data.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.state_linearity.bias.data[self.lstm_dim : 2 * self.lstm_dim].fill_(1.0)
def forward(
self,
x: torch.Tensor,
states=Tuple[torch.Tensor, torch.Tensor],
variational_dropout_mask: Optional[torch.BoolTensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
!!! Warning
DO NOT USE THIS LAYER DIRECTLY, instead use the AugmentedLSTM class
# Parameters
x : `torch.Tensor`
Input tensor of shape (bsize x input_dim).
states : `Tuple[torch.Tensor, torch.Tensor]`
Tuple of tensors containing
the hidden state and the cell state of each element in
the batch. Each of these tensors have a dimension of
(bsize x nhid). Defaults to `None`.
# Returns
`Tuple[torch.Tensor, torch.Tensor]`
Returned states. Shape of each state is (bsize x nhid).
"""
hidden_state, memory_state = states
# In Pytext this was done as the last step of the cell.
# But the original AugmentedLSTM from AllenNLP this was done before the processing
if variational_dropout_mask is not None and self.training:
hidden_state = hidden_state * variational_dropout_mask
projected_input = self.input_linearity(x)
projected_state = self.state_linearity(hidden_state)
input_gate = forget_gate = memory_init = output_gate = highway_gate = None
if self.use_highway:
fused_op = projected_input[:, : 5 * self.lstm_dim] + projected_state
fused_chunked = torch.chunk(fused_op, 5, 1)
(input_gate, forget_gate, memory_init, output_gate, highway_gate) = fused_chunked
highway_gate = torch.sigmoid(highway_gate)
else:
fused_op = projected_input + projected_state
input_gate, forget_gate, memory_init, output_gate = torch.chunk(fused_op, 4, 1)
input_gate = torch.sigmoid(input_gate)
forget_gate = torch.sigmoid(forget_gate)
memory_init = torch.tanh(memory_init)
output_gate = torch.sigmoid(output_gate)
memory = input_gate * memory_init + forget_gate * memory_state
timestep_output: torch.Tensor = output_gate * torch.tanh(memory)
if self.use_highway:
highway_input_projection = projected_input[
:, self._highway_inp_proj_start : self._highway_inp_proj_end
]
timestep_output = (
highway_gate * timestep_output
+ (1 - highway_gate) * highway_input_projection # type: ignore
)
return timestep_output, memory
class AugmentedLstm(torch.nn.Module):
"""
`AugmentedLstm` implements a one-layer single directional
AugmentedLSTM layer. AugmentedLSTM is an LSTM which optionally
appends an optional highway network to the output layer. Furthermore the
dropout controls the level of variational dropout done.
# Parameters
input_size : `int`
The number of expected features in the input.
hidden_size : `int`
Number of features in the hidden state of the LSTM.
Defaults to 32.
go_forward : `bool`
Whether to compute features left to right (forward)
or right to left (backward).
recurrent_dropout_probability : `float`
Variational dropout probability to use. Defaults to 0.0.
use_highway : `bool`
If `True` we append a highway network to the outputs of the LSTM.
use_input_projection_bias : `bool`
If `True` we use a bias in our LSTM calculations, otherwise we don't.
# Attributes
cell : `AugmentedLSTMCell`
`AugmentedLSTMCell` that is applied at every timestep.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
):
super().__init__()
self.embed_dim = input_size
self.lstm_dim = hidden_size
self.go_forward = go_forward
self.use_highway = use_highway
self.recurrent_dropout_probability = recurrent_dropout_probability
self.cell = AugmentedLSTMCell(
self.embed_dim, self.lstm_dim, self.use_highway, use_input_projection_bias
)
def forward(
self, inputs: PackedSequence, states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]:
"""
Warning: Would be better to use the BiAugmentedLstm class in a regular model
Given an input batch of sequential data such as word embeddings, produces a single layer unidirectional
AugmentedLSTM representation of the sequential input and new state tensors.
# Parameters
inputs : `PackedSequence`
`bsize` sequences of shape `(len, input_dim)` each, in PackedSequence format
states : `Tuple[torch.Tensor, torch.Tensor]`
Tuple of tensors containing the initial hidden state and
the cell state of each element in the batch. Each of these tensors have a dimension of
(1 x bsize x nhid). Defaults to `None`.
# Returns
`Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]`
AugmentedLSTM representation of input and the state of the LSTM `t = seq_len`.
Shape of representation is (bsize x seq_len x representation_dim).
Shape of each state is (1 x bsize x nhid).
"""
if not isinstance(inputs, PackedSequence):
raise ConfigurationError("inputs must be PackedSequence but got %s" % (type(inputs)))
sequence_tensor, batch_lengths = pad_packed_sequence(inputs, batch_first=True)
batch_size = sequence_tensor.size()[0]
total_timesteps = sequence_tensor.size()[1]
output_accumulator = sequence_tensor.new_zeros(batch_size, total_timesteps, self.lstm_dim)
if states is None:
full_batch_previous_memory = sequence_tensor.new_zeros(batch_size, self.lstm_dim)
full_batch_previous_state = sequence_tensor.data.new_zeros(batch_size, self.lstm_dim)
else:
full_batch_previous_state = states[0].squeeze(0)
full_batch_previous_memory = states[1].squeeze(0)
current_length_index = batch_size - 1 if self.go_forward else 0
if self.recurrent_dropout_probability > 0.0:
dropout_mask = get_dropout_mask(
self.recurrent_dropout_probability, full_batch_previous_memory
)
else:
dropout_mask = None
for timestep in range(total_timesteps):
index = timestep if self.go_forward else total_timesteps - timestep - 1
if self.go_forward:
while batch_lengths[current_length_index] <= index:
current_length_index -= 1
# If we're going backwards, we are _picking up_ more indices.
else:
# First conditional: Are we already at the maximum
# number of elements in the batch?
# Second conditional: Does the next shortest
# sequence beyond the current batch
# index require computation use this timestep?
while (
current_length_index < (len(batch_lengths) - 1)
and batch_lengths[current_length_index + 1] > index
):
current_length_index += 1
previous_memory = full_batch_previous_memory[0 : current_length_index + 1].clone()
previous_state = full_batch_previous_state[0 : current_length_index + 1].clone()
timestep_input = sequence_tensor[0 : current_length_index + 1, index]
timestep_output, memory = self.cell(
timestep_input,
(previous_state, previous_memory),
dropout_mask[0 : current_length_index + 1] if dropout_mask is not None else None,
)
full_batch_previous_memory = full_batch_previous_memory.data.clone()
full_batch_previous_state = full_batch_previous_state.data.clone()
full_batch_previous_memory[0 : current_length_index + 1] = memory
full_batch_previous_state[0 : current_length_index + 1] = timestep_output
output_accumulator[0 : current_length_index + 1, index, :] = timestep_output
output_accumulator = pack_padded_sequence(
output_accumulator, batch_lengths, batch_first=True
)
# Mimic the pytorch API by returning state in the following shape:
# (num_layers * num_directions, batch_size, lstm_dim). As this
# LSTM cannot be stacked, the first dimension here is just 1.
final_state = (
full_batch_previous_state.unsqueeze(0),
full_batch_previous_memory.unsqueeze(0),
)
return output_accumulator, final_state
class BiAugmentedLstm(torch.nn.Module):
"""
`BiAugmentedLstm` implements a generic AugmentedLSTM representation layer.
BiAugmentedLstm is an LSTM which optionally appends an optional highway network to the output layer.
Furthermore the dropout controls the level of variational dropout done.
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required.
The dimension of the outputs of the LSTM.
num_layers : `int`
Number of recurrent layers. Eg. setting `num_layers=2`
would mean stacking two LSTMs together to form a stacked LSTM,
with the second LSTM taking in the outputs of the first LSTM and
computing the final result. Defaults to 1.
bias : `bool`
If `True` we use a bias in our LSTM calculations, otherwise we don't.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
Variational dropout probability to use.
bidirectional : `bool`
If `True`, becomes a bidirectional LSTM. Defaults to `True`.
padding_value : `float`, optional (default = `0.0`)
Value for the padded elements. Defaults to 0.0.
use_highway : `bool`, optional (default = `True`)
Whether or not to use highway connections between layers. This effectively involves
reparameterising the normal output of an LSTM as::
gate = sigmoid(W_x1 * x_t + W_h * h_t)
output = gate * h_t + (1 - gate) * (W_x2 * x_t)
# Returns
output_accumulator : `PackedSequence`
The outputs of the LSTM for each timestep. A tensor of shape (batch_size, max_timesteps, hidden_size) where
for a given batch element, all outputs past the sequence length for that batch are zero tensors.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
recurrent_dropout_probability: float = 0.0,
bidirectional: bool = False,
padding_value: float = 0.0,
use_highway: bool = True,
) -> None:
super().__init__()
self.input_size = input_size
self.padding_value = padding_value
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = bidirectional
self.recurrent_dropout_probability = recurrent_dropout_probability
self.use_highway = use_highway
self.use_bias = bias
num_directions = int(self.bidirectional) + 1
self.forward_layers = torch.nn.ModuleList()
if self.bidirectional:
self.backward_layers = torch.nn.ModuleList()
lstm_embed_dim = self.input_size
for _ in range(self.num_layers):
self.forward_layers.append(
AugmentedLstm(
lstm_embed_dim,
self.hidden_size,
go_forward=True,
recurrent_dropout_probability=self.recurrent_dropout_probability,
use_highway=self.use_highway,
use_input_projection_bias=self.use_bias,
)
)
if self.bidirectional:
self.backward_layers.append(
AugmentedLstm(
lstm_embed_dim,
self.hidden_size,
go_forward=False,
recurrent_dropout_probability=self.recurrent_dropout_probability,
use_highway=self.use_highway,
use_input_projection_bias=self.use_bias,
)
)
lstm_embed_dim = self.hidden_size * num_directions
self.representation_dim = lstm_embed_dim
def forward(
self, inputs: torch.Tensor, states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Given an input batch of sequential data such as word embeddings, produces
a AugmentedLSTM representation of the sequential input and new state
tensors.
# Parameters
inputs : `PackedSequence`, required.
A tensor of shape (batch_size, num_timesteps, input_size)
to apply the LSTM over.
states : `Tuple[torch.Tensor, torch.Tensor]`
Tuple of tensors containing
the initial hidden state and the cell state of each element in
the batch. Each of these tensors have a dimension of
(bsize x num_layers x num_directions * nhid). Defaults to `None`.
# Returns
`Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]`
AgumentedLSTM representation of input and
the state of the LSTM `t = seq_len`.
Shape of representation is (bsize x seq_len x representation_dim).
Shape of each state is (bsize x num_layers * num_directions x nhid).
"""
if not isinstance(inputs, PackedSequence):
raise ConfigurationError("inputs must be PackedSequence but got %s" % (type(inputs)))
# if states is not None:
# states = (states[0].transpose(0, 1), states[1].transpose(0, 1))
if self.bidirectional:
return self._forward_bidirectional(inputs, states)
return self._forward_unidirectional(inputs, states)
def _forward_bidirectional(
self, inputs: PackedSequence, states: Optional[Tuple[torch.Tensor, torch.Tensor]]
):
output_sequence = inputs
final_h = []
final_c = []
if not states:
hidden_states = [None] * self.num_layers
elif states[0].size()[0] != self.num_layers:
raise RuntimeError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(
zip( # type: ignore
states[0].chunk(self.num_layers, 0), states[1].chunk(self.num_layers, 0)
)
)
for i, state in enumerate(hidden_states):
if state:
forward_state = state[0].chunk(2, -1)
backward_state = state[1].chunk(2, -1)
else:
forward_state = backward_state = None
forward_layer = self.forward_layers[i]
backward_layer = self.backward_layers[i]
# The state is duplicated to mirror the Pytorch API for LSTMs.
forward_output, final_forward_state = forward_layer(output_sequence, forward_state)
backward_output, final_backward_state = backward_layer(output_sequence, backward_state)
forward_output, lengths = pad_packed_sequence(forward_output, batch_first=True)
backward_output, _ = pad_packed_sequence(backward_output, batch_first=True)
output_sequence = torch.cat([forward_output, backward_output], -1)
output_sequence = pack_padded_sequence(output_sequence, lengths, batch_first=True)
final_h.extend([final_forward_state[0], final_backward_state[0]])
final_c.extend([final_forward_state[1], final_backward_state[1]])
final_h = torch.cat(final_h, dim=0)
final_c = torch.cat(final_c, dim=0)
final_state_tuple = (final_h, final_c)
output_sequence, batch_lengths = pad_packed_sequence(
output_sequence, padding_value=self.padding_value, batch_first=True
)
output_sequence = pack_padded_sequence(output_sequence, batch_lengths, batch_first=True)
return output_sequence, final_state_tuple
def _forward_unidirectional(
self, inputs: PackedSequence, states: Optional[Tuple[torch.Tensor, torch.Tensor]]
):
output_sequence = inputs
final_h = []
final_c = []
if not states:
hidden_states = [None] * self.num_layers
elif states[0].size()[0] != self.num_layers:
raise RuntimeError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(
zip( # type: ignore
states[0].chunk(self.num_layers, 0), states[1].chunk(self.num_layers, 0)
) # type: ignore
)
for i, state in enumerate(hidden_states):
forward_layer = self.forward_layers[i]
# The state is duplicated to mirror the Pytorch API for LSTMs.
forward_output, final_forward_state = forward_layer(output_sequence, state)
output_sequence = forward_output
final_h.append(final_forward_state[0])
final_c.append(final_forward_state[1])
final_h = torch.cat(final_h, dim=0)
final_c = torch.cat(final_c, dim=0)
final_state_tuple = (final_h, final_c)
output_sequence, batch_lengths = pad_packed_sequence(
output_sequence, padding_value=self.padding_value, batch_first=True
)
output_sequence = pack_padded_sequence(output_sequence, batch_lengths, batch_first=True)
return output_sequence, final_state_tuple
| allennlp-master | allennlp/modules/augmented_lstm.py |
import torch
from allennlp.nn import util
class LayerNorm(torch.nn.Module):
"""
An implementation of [Layer Normalization](
https://www.semanticscholar.org/paper/Layer-Normalization-Ba-Kiros/97fb4e3d45bb098e27e0071448b6152217bd35a5).
Layer Normalization stabilises the training of deep neural networks by
normalising the outputs of neurons from a particular layer. It computes:
output = (gamma * (tensor - mean) / (std + eps)) + beta
# Parameters
dimension : `int`, required.
The dimension of the layer output to normalize.
# Returns
The normalized layer output.
""" # noqa
def __init__(self, dimension: int) -> None:
super().__init__()
self.gamma = torch.nn.Parameter(torch.ones(dimension))
self.beta = torch.nn.Parameter(torch.zeros(dimension))
def forward(self, tensor: torch.Tensor):
mean = tensor.mean(-1, keepdim=True)
std = tensor.std(-1, unbiased=False, keepdim=True)
return (
self.gamma * (tensor - mean) / (std + util.tiny_value_of_dtype(std.dtype)) + self.beta
)
| allennlp-master | allennlp/modules/layer_norm.py |
import torch
from allennlp.nn import Activation
class GatedSum(torch.nn.Module):
"""
This `Module` represents a gated sum of two tensors `a` and `b`. Specifically:
```
f = activation(W [a; b])
out = f * a + (1 - f) * b
```
# Parameters
input_dim : `int`, required
The dimensionality of the input. We assume the input have shape `(..., input_dim)`.
activation : `Activation`, optional (default = `torch.nn.Sigmoid()`)
The activation function to use.
"""
def __init__(self, input_dim: int, activation: Activation = torch.nn.Sigmoid()) -> None:
super().__init__()
self.input_dim = input_dim
self._gate = torch.nn.Linear(input_dim * 2, 1)
self._activation = activation
def get_input_dim(self):
return self.input_dim
def get_output_dim(self):
return self.input_dim
def forward(self, input_a: torch.Tensor, input_b: torch.Tensor) -> torch.Tensor:
if input_a.size() != input_b.size():
raise ValueError("The input must have the same size.")
if input_a.size(-1) != self.input_dim:
raise ValueError("Input size must match `input_dim`.")
gate_value = self._activation(self._gate(torch.cat([input_a, input_b], -1)))
return gate_value * input_a + (1 - gate_value) * input_b
| allennlp-master | allennlp/modules/gated_sum.py |
from overrides import overrides
import torch
from typing import List
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
@Seq2SeqEncoder.register("compose")
class ComposeEncoder(Seq2SeqEncoder):
"""This class can be used to compose several encoders in sequence.
Among other things, this can be used to add a "pre-contextualizer" before a Seq2SeqEncoder.
Registered as a `Seq2SeqEncoder` with name "compose".
# Parameters
encoders : `List[Seq2SeqEncoder]`, required.
A non-empty list of encoders to compose. The encoders must match in bidirectionality.
"""
def __init__(self, encoders: List[Seq2SeqEncoder]):
super().__init__()
self.encoders = encoders
for idx, encoder in enumerate(encoders):
self.add_module("encoder%d" % idx, encoder)
# Compute bidirectionality.
all_bidirectional = all(encoder.is_bidirectional() for encoder in encoders)
any_bidirectional = any(encoder.is_bidirectional() for encoder in encoders)
self.bidirectional = all_bidirectional
if all_bidirectional != any_bidirectional:
raise ValueError("All encoders need to match in bidirectionality.")
if len(self.encoders) < 1:
raise ValueError("Need at least one encoder.")
last_enc = None
for enc in encoders:
if last_enc is not None and last_enc.get_output_dim() != enc.get_input_dim():
raise ValueError("Encoder input and output dimensions don't match.")
last_enc = enc
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A tensor of shape (batch_size, timesteps, input_dim)
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, timesteps).
# Returns
A tensor computed by composing the sequence of encoders.
"""
for encoder in self.encoders:
inputs = encoder(inputs, mask)
return inputs
@overrides
def get_input_dim(self) -> int:
return self.encoders[0].get_input_dim()
@overrides
def get_output_dim(self) -> int:
return self.encoders[-1].get_output_dim()
@overrides
def is_bidirectional(self) -> bool:
return self.bidirectional
| allennlp-master | allennlp/modules/seq2seq_encoders/compose_encoder.py |
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common import Registrable
class Seq2SeqEncoder(_EncoderBase, Registrable):
"""
A `Seq2SeqEncoder` is a `Module` that takes as input a sequence of vectors and returns a
modified sequence of vectors. Input shape : `(batch_size, sequence_length, input_dim)`; output
shape : `(batch_size, sequence_length, output_dim)`.
We add two methods to the basic `Module` API: `get_input_dim()` and `get_output_dim()`.
You might need this if you want to construct a `Linear` layer using the output of this encoder,
or to raise sensible errors for mis-matching input dimensions.
"""
def get_input_dim(self) -> int:
"""
Returns the dimension of the vector input for each element in the sequence input
to a `Seq2SeqEncoder`. This is `not` the shape of the input tensor, but the
last element of that shape.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the dimension of each vector in the sequence output by this `Seq2SeqEncoder`.
This is `not` the shape of the returned tensor, but the last element of that shape.
"""
raise NotImplementedError
def is_bidirectional(self) -> bool:
"""
Returns `True` if this encoder is bidirectional. If so, we assume the forward direction
of the encoder is the first half of the final dimension, and the backward direction is the
second half.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/seq2seq_encoders/seq2seq_encoder.py |
from overrides import overrides
import torch
from torch.nn.utils.rnn import pad_packed_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
from allennlp.modules.stacked_bidirectional_lstm import StackedBidirectionalLstm
class PytorchSeq2SeqWrapper(Seq2SeqEncoder):
"""
Pytorch's RNNs have two outputs: the hidden state for every time step, and the hidden state at
the last time step for every layer. We just want the first one as a single output. This
wrapper pulls out that output, and adds a `get_output_dim` method, which is useful if you
want to, e.g., define a linear + softmax layer on top of this to get some distribution over a
set of labels. The linear layer needs to know its input dimension before it is called, and you
can get that from `get_output_dim`.
In order to be wrapped with this wrapper, a class must have the following members:
- `self.input_size: int`
- `self.hidden_size: int`
- `def forward(inputs: PackedSequence, hidden_state: torch.Tensor) ->
Tuple[PackedSequence, torch.Tensor]`.
- `self.bidirectional: bool` (optional)
This is what pytorch's RNN's look like - just make sure your class looks like those, and it
should work.
Note that we *require* you to pass a binary mask of shape (batch_size, sequence_length)
when you call this module, to avoid subtle bugs around masking. If you already have a
`PackedSequence` you can pass `None` as the second parameter.
We support stateful RNNs where the final state from each batch is used as the initial
state for the subsequent batch by passing `stateful=True` to the constructor.
"""
def __init__(self, module: torch.nn.Module, stateful: bool = False) -> None:
super().__init__(stateful)
self._module = module
try:
if not self._module.batch_first:
raise ConfigurationError("Our encoder semantics assumes batch is always first!")
except AttributeError:
pass
try:
self._is_bidirectional = self._module.bidirectional
except AttributeError:
self._is_bidirectional = False
if self._is_bidirectional:
self._num_directions = 2
else:
self._num_directions = 1
@overrides
def get_input_dim(self) -> int:
return self._module.input_size
@overrides
def get_output_dim(self) -> int:
return self._module.hidden_size * self._num_directions
@overrides
def is_bidirectional(self) -> bool:
return self._is_bidirectional
@overrides
def forward(
self, inputs: torch.Tensor, mask: torch.BoolTensor, hidden_state: torch.Tensor = None
) -> torch.Tensor:
if self.stateful and mask is None:
raise ValueError("Always pass a mask with stateful RNNs.")
if self.stateful and hidden_state is not None:
raise ValueError("Stateful RNNs provide their own initial hidden_state.")
if mask is None:
return self._module(inputs, hidden_state)[0]
batch_size, total_sequence_length = mask.size()
packed_sequence_output, final_states, restoration_indices = self.sort_and_run_forward(
self._module, inputs, mask, hidden_state
)
unpacked_sequence_tensor, _ = pad_packed_sequence(packed_sequence_output, batch_first=True)
num_valid = unpacked_sequence_tensor.size(0)
# Some RNNs (GRUs) only return one state as a Tensor. Others (LSTMs) return two.
# If one state, use a single element list to handle in a consistent manner below.
if not isinstance(final_states, (list, tuple)) and self.stateful:
final_states = [final_states]
# Add back invalid rows.
if num_valid < batch_size:
_, length, output_dim = unpacked_sequence_tensor.size()
zeros = unpacked_sequence_tensor.new_zeros(batch_size - num_valid, length, output_dim)
unpacked_sequence_tensor = torch.cat([unpacked_sequence_tensor, zeros], 0)
# The states also need to have invalid rows added back.
if self.stateful:
new_states = []
for state in final_states:
num_layers, _, state_dim = state.size()
zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)
new_states.append(torch.cat([state, zeros], 1))
final_states = new_states
# It's possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2SeqEncoder. However, packing and unpacking
# the sequences mean that the returned tensor won't include these dimensions, because
# the RNN did not need to process them. We add them back on in the form of zeros here.
sequence_length_difference = total_sequence_length - unpacked_sequence_tensor.size(1)
if sequence_length_difference > 0:
zeros = unpacked_sequence_tensor.new_zeros(
batch_size, sequence_length_difference, unpacked_sequence_tensor.size(-1)
)
unpacked_sequence_tensor = torch.cat([unpacked_sequence_tensor, zeros], 1)
if self.stateful:
self._update_states(final_states, restoration_indices)
# Restore the original indices and return the sequence.
return unpacked_sequence_tensor.index_select(0, restoration_indices)
@Seq2SeqEncoder.register("gru")
class GruSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "gru".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
stateful: bool = False,
):
module = torch.nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("lstm")
class LstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
stateful: bool = False,
):
module = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("rnn")
class RnnSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "rnn".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
nonlinearity: str = "tanh",
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
stateful: bool = False,
):
module = torch.nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
nonlinearity=nonlinearity,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("augmented_lstm")
class AugmentedLstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "augmented_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
stateful: bool = False,
) -> None:
module = AugmentedLstm(
input_size=input_size,
hidden_size=hidden_size,
go_forward=go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("alternating_lstm")
class StackedAlternatingLstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "alternating_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
stateful: bool = False,
) -> None:
module = StackedAlternatingLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("stacked_bidirectional_lstm")
class StackedBidirectionalLstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "stacked_bidirectional_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
layer_dropout_probability: float = 0.0,
use_highway: bool = True,
stateful: bool = False,
) -> None:
module = StackedBidirectionalLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
layer_dropout_probability=layer_dropout_probability,
use_highway=use_highway,
)
super().__init__(module=module, stateful=stateful)
| allennlp-master | allennlp/modules/seq2seq_encoders/pytorch_seq2seq_wrapper.py |
import torch
from overrides import overrides
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
@Seq2SeqEncoder.register("feedforward")
class FeedForwardEncoder(Seq2SeqEncoder):
"""
This class applies the `FeedForward` to each item in sequences.
Registered as a `Seq2SeqEncoder` with name "feedforward".
"""
def __init__(self, feedforward: FeedForward) -> None:
super().__init__()
self._feedforward = feedforward
@overrides
def get_input_dim(self) -> int:
return self._feedforward.get_input_dim()
@overrides
def get_output_dim(self) -> int:
return self._feedforward.get_output_dim()
@overrides
def is_bidirectional(self) -> bool:
return False
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A tensor of shape (batch_size, timesteps, input_dim)
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, timesteps).
# Returns
A tensor of shape (batch_size, timesteps, output_dim).
"""
if mask is None:
return self._feedforward(inputs)
else:
outputs = self._feedforward(inputs)
return outputs * mask.unsqueeze(dim=-1)
| allennlp-master | allennlp/modules/seq2seq_encoders/feedforward_encoder.py |
from overrides import overrides
import torch
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
@Seq2SeqEncoder.register("pass_through")
class PassThroughEncoder(Seq2SeqEncoder):
"""
This class allows you to specify skipping a `Seq2SeqEncoder` just
by changing a configuration file. This is useful for ablations and
measuring the impact of different elements of your model.
Registered as a `Seq2SeqEncoder` with name "pass_through".
"""
def __init__(self, input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
@overrides
def get_input_dim(self) -> int:
return self._input_dim
@overrides
def get_output_dim(self) -> int:
return self._input_dim
@overrides
def is_bidirectional(self):
return False
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A tensor of shape (batch_size, timesteps, input_dim)
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, timesteps).
# Returns
A tensor of shape (batch_size, timesteps, output_dim),
where output_dim = input_dim.
"""
if mask is None:
return inputs
else:
# We should mask out the output instead of the input.
# But here, output = input, so we directly mask out the input.
return inputs * mask.unsqueeze(dim=-1)
| allennlp-master | allennlp/modules/seq2seq_encoders/pass_through_encoder.py |
"""
Modules that transform a sequence of input vectors
into a sequence of output vectors.
Some are just basic wrappers around existing PyTorch modules,
others are AllenNLP modules.
The available Seq2Seq encoders are
- `"gru"` : allennlp.modules.seq2seq_encoders.GruSeq2SeqEncoder
- `"lstm"` : allennlp.modules.seq2seq_encoders.LstmSeq2SeqEncoder
- `"rnn"` : allennlp.modules.seq2seq_encoders.RnnSeq2SeqEncoder
- `"augmented_lstm"` : allennlp.modules.seq2seq_encoders.AugmentedLstmSeq2SeqEncoder
- `"alternating_lstm"` : allennlp.modules.seq2seq_encoders.StackedAlternatingLstmSeq2SeqEncoder
- `"pass_through"` : allennlp.modules.seq2seq_encoders.PassThroughEncoder
- `"feedforward"` : allennlp.modules.seq2seq_encoders.FeedForwardEncoder
- `"pytorch_transformer"` : allennlp.modules.seq2seq_encoders.PytorchTransformer
- `"compose"` : allennlp.modules.seq2seq_encoders.ComposeEncoder
- `"gated-cnn-encoder"` : allennlp.momdules.seq2seq_encoders.GatedCnnEncoder
- `"stacked_bidirectional_lstm"`: allennlp.modules.seq2seq_encoders.StackedBidirectionalLstmSeq2SeqEncoder
"""
from allennlp.modules.seq2seq_encoders.compose_encoder import ComposeEncoder
from allennlp.modules.seq2seq_encoders.feedforward_encoder import FeedForwardEncoder
from allennlp.modules.seq2seq_encoders.gated_cnn_encoder import GatedCnnEncoder
from allennlp.modules.seq2seq_encoders.pass_through_encoder import PassThroughEncoder
from allennlp.modules.seq2seq_encoders.pytorch_seq2seq_wrapper import (
AugmentedLstmSeq2SeqEncoder,
GruSeq2SeqEncoder,
LstmSeq2SeqEncoder,
PytorchSeq2SeqWrapper,
RnnSeq2SeqEncoder,
StackedAlternatingLstmSeq2SeqEncoder,
StackedBidirectionalLstmSeq2SeqEncoder,
)
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.modules.seq2seq_encoders.pytorch_transformer_wrapper import PytorchTransformer
| allennlp-master | allennlp/modules/seq2seq_encoders/__init__.py |
from typing import Optional
from overrides import overrides
import torch
from torch import nn
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.nn.util import add_positional_features
@Seq2SeqEncoder.register("pytorch_transformer")
class PytorchTransformer(Seq2SeqEncoder):
"""
Implements a stacked self-attention encoder similar to the Transformer
architecture in [Attention is all you Need]
(https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077).
This class adapts the Transformer from torch.nn for use in AllenNLP. Optionally, it adds positional encodings.
Registered as a `Seq2SeqEncoder` with name "pytorch_transformer".
# Parameters
input_dim : `int`, required.
The input dimension of the encoder.
feedforward_hidden_dim : `int`, required.
The middle dimension of the FeedForward network. The input and output
dimensions are fixed to ensure sizes match up for the self attention layers.
num_layers : `int`, required.
The number of stacked self attention -> feedforward -> layer normalisation blocks.
num_attention_heads : `int`, required.
The number of attention heads to use per layer.
use_positional_encoding : `bool`, optional, (default = `True`)
Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended,
as without this feature, the self attention layers have no idea of absolute or relative
position (as they are just computing pairwise similarity between vectors of elements),
which can be important features for many tasks.
dropout_prob : `float`, optional, (default = `0.1`)
The dropout probability for the feedforward network.
""" # noqa
def __init__(
self,
input_dim: int,
num_layers: int,
feedforward_hidden_dim: int = 2048,
num_attention_heads: int = 8,
positional_encoding: Optional[str] = None,
positional_embedding_size: int = 512,
dropout_prob: float = 0.1,
activation: str = "relu",
) -> None:
super().__init__()
layer = nn.TransformerEncoderLayer(
d_model=input_dim,
nhead=num_attention_heads,
dim_feedforward=feedforward_hidden_dim,
dropout=dropout_prob,
activation=activation,
)
self._transformer = nn.TransformerEncoder(layer, num_layers)
self._input_dim = input_dim
# initialize parameters
# We do this before the embeddings are initialized so we get the default initialization for the embeddings.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
if positional_encoding is None:
self._sinusoidal_positional_encoding = False
self._positional_embedding = None
elif positional_encoding == "sinusoidal":
self._sinusoidal_positional_encoding = True
self._positional_embedding = None
elif positional_encoding == "embedding":
self._sinusoidal_positional_encoding = False
self._positional_embedding = nn.Embedding(positional_embedding_size, input_dim)
else:
raise ValueError(
"positional_encoding must be one of None, 'sinusoidal', or 'embedding'"
)
@overrides
def get_input_dim(self) -> int:
return self._input_dim
@overrides
def get_output_dim(self) -> int:
return self._input_dim
@overrides
def is_bidirectional(self):
return False
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor):
output = inputs
if self._sinusoidal_positional_encoding:
output = add_positional_features(output)
if self._positional_embedding is not None:
position_ids = torch.arange(inputs.size(1), dtype=torch.long, device=output.device)
position_ids = position_ids.unsqueeze(0).expand(inputs.shape[:-1])
output = output + self._positional_embedding(position_ids)
# For some reason the torch transformer expects the shape (sequence, batch, features), not the more
# familiar (batch, sequence, features), so we have to fix it.
output = output.permute(1, 0, 2)
# For some other reason, the torch transformer takes the mask backwards.
mask = ~mask
output = self._transformer(output, src_key_padding_mask=mask)
output = output.permute(1, 0, 2)
return output
| allennlp-master | allennlp/modules/seq2seq_encoders/pytorch_transformer_wrapper.py |
from typing import Sequence, List
import math
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
class ResidualBlock(torch.nn.Module):
def __init__(
self,
input_dim: int,
layers: Sequence[Sequence[int]],
direction: str,
do_weight_norm: bool = True,
dropout: float = 0.0,
) -> None:
super().__init__()
self.dropout = dropout
self._convolutions = torch.nn.ModuleList()
last_dim = input_dim
for k, layer in enumerate(layers):
# We run two convolutions for each block -- one for the
# output and one for the gates -- do them at once, and
# we'll worry about slicing them in forward
if len(layer) == 2:
# no dilation
conv = torch.nn.Conv1d(
last_dim, layer[1] * 2, layer[0], stride=1, padding=layer[0] - 1, bias=True
)
elif len(layer) == 3:
# a dilation
assert layer[0] == 2, "only support kernel = 2 for now"
conv = torch.nn.Conv1d(
last_dim,
layer[1] * 2,
layer[0],
stride=1,
padding=layer[2],
dilation=layer[2],
bias=True,
)
else:
raise ValueError("each layer must have length 2 or 3")
# from Convolutional Sequence to Sequence Learning
if k == 0:
conv_dropout = dropout
else:
# no dropout
conv_dropout = 0.0
std = math.sqrt((4 * (1.0 - conv_dropout)) / (layer[0] * last_dim))
conv.weight.data.normal_(0, std=std)
conv.bias.data.zero_()
if do_weight_norm:
# conv.weight.shape == (out_channels, in_channels, kernel width)
# in fairseq, conv.weight.shape == ([width, in, out])
# for ConvTBC. In ConvTBC, weight norm is applied as
# nn.utils.weight_norm(m, dim=2) over the output dimension.
# so for regular 1D convs we need to apply over dimension=0
conv = torch.nn.utils.weight_norm(conv, name="weight", dim=0)
self._convolutions.append(conv)
last_dim = layer[1]
assert last_dim == input_dim
if direction not in ("forward", "backward"):
raise ConfigurationError(f"invalid direction: {direction}")
self._direction = direction
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x = (batch_size, dim, timesteps)
# outputs: (batch_size, dim, timesteps) = f(x) + x
out = x
timesteps = x.size(2)
for k, convolution in enumerate(self._convolutions):
if k == 0 and self.dropout > 0:
# apply dropout to the input
out = torch.nn.functional.dropout(out, self.dropout, self.training)
conv_out = convolution(out)
# remove the padding indices
# x is padded by convolution width - 1 in each direction
dims_to_remove = conv_out.size(2) - timesteps
if dims_to_remove > 0:
if self._direction == "forward":
# remove from the end of the sequence
conv_out = conv_out.narrow(2, 0, timesteps)
else:
# remove from the beginning of the sequence
conv_out = conv_out.narrow(2, dims_to_remove, timesteps)
out = torch.nn.functional.glu(conv_out, dim=1)
# see Convolutional Sequence to Sequence Learning
return (out + x) * math.sqrt(0.5)
@Seq2SeqEncoder.register("gated-cnn-encoder")
class GatedCnnEncoder(Seq2SeqEncoder):
"""
**This is work-in-progress and has not been fully tested yet. Use at your own risk!**
A `Seq2SeqEncoder` that uses a Gated CNN.
see
Language Modeling with Gated Convolutional Networks, Yann N. Dauphin et al, ICML 2017
https://arxiv.org/abs/1612.08083
Convolutional Sequence to Sequence Learning, Jonas Gehring et al, ICML 2017
https://arxiv.org/abs/1705.03122
Some possibilities:
Each element of the list is wrapped in a residual block:
input_dim = 512
layers = [ [[4, 512]], [[4, 512], [4, 512]], [[4, 512], [4, 512]], [[4, 512], [4, 512]]
dropout = 0.05
A "bottleneck architecture"
input_dim = 512
layers = [ [[4, 512]], [[1, 128], [5, 128], [1, 512]], ... ]
An architecture with dilated convolutions
input_dim = 512
layers = [
[[2, 512, 1]], [[2, 512, 2]], [[2, 512, 4]], [[2, 512, 8]], # receptive field == 16
[[2, 512, 1]], [[2, 512, 2]], [[2, 512, 4]], [[2, 512, 8]], # receptive field == 31
[[2, 512, 1]], [[2, 512, 2]], [[2, 512, 4]], [[2, 512, 8]], # receptive field == 46
[[2, 512, 1]], [[2, 512, 2]], [[2, 512, 4]], [[2, 512, 8]], # receptive field == 57
]
Registered as a `Seq2SeqEncoder` with name "gated-cnn-encoder".
# Parameters
input_dim : `int`, required
The dimension of the inputs.
layers : `Sequence[Sequence[Sequence[int]]]`, required
The layer dimensions for each `ResidualBlock`.
dropout : `float`, optional (default = `0.0`)
The dropout for each `ResidualBlock`.
return_all_layers : `bool`, optional (default = `False`)
Whether to return all layers or just the last layer.
"""
def __init__(
self,
input_dim: int,
layers: Sequence[Sequence[Sequence[int]]],
dropout: float = 0.0,
return_all_layers: bool = False,
) -> None:
super().__init__()
self._forward_residual_blocks = torch.nn.ModuleList()
self._backward_residual_blocks = torch.nn.ModuleList()
self._input_dim = input_dim
self._output_dim = input_dim * 2
for layer in layers:
self._forward_residual_blocks.append(
ResidualBlock(input_dim, layer, "forward", dropout=dropout)
)
self._backward_residual_blocks.append(
ResidualBlock(input_dim, layer, "backward", dropout=dropout)
)
self._return_all_layers = return_all_layers
def forward(self, token_embeddings: torch.Tensor, mask: torch.BoolTensor):
# Convolutions need transposed input
transposed_embeddings = torch.transpose(token_embeddings, 1, 2)
# We need to broadcast the mask to feature dimension,
# and to use masked_fill_ we need the inverse of the mask.
mask_for_fill = ~mask.unsqueeze(1)
if self._return_all_layers:
# outputs will be [[all forward layers], [all backward layers]]
layer_outputs: List[List[torch.Tensor]] = [[], []]
else:
# outputs will be [forward final layer, backward final layer]
outputs: List[torch.Tensor] = []
for k, blocks in enumerate([self._forward_residual_blocks, self._backward_residual_blocks]):
out = transposed_embeddings
# Due to zero padding for backward sequences, we need
# to ensure that the input has zeros everywhere where
# there isn't a mask.
for block in blocks:
out = block(out.masked_fill(mask_for_fill, 0.0))
if self._return_all_layers:
layer_outputs[k].append(out)
if not self._return_all_layers:
outputs.append(out)
if self._return_all_layers:
return [
torch.cat([fwd, bwd], dim=1).transpose(1, 2) for fwd, bwd in zip(*layer_outputs)
]
else:
# Concatenate forward and backward, then transpose back
return torch.cat(outputs, dim=1).transpose(1, 2)
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._output_dim
def is_bidirectional(self) -> bool:
return True
| allennlp-master | allennlp/modules/seq2seq_encoders/gated_cnn_encoder.py |
"""
An *attention* module that computes the similarity between
an input vector and the rows of a matrix.
"""
import torch
from overrides import overrides
from allennlp.common.registrable import Registrable
from allennlp.nn.util import masked_softmax
class Attention(torch.nn.Module, Registrable):
"""
An `Attention` takes two inputs: a (batched) vector and a matrix, plus an optional mask on the
rows of the matrix. We compute the similarity between the vector and each row in the matrix,
and then (optionally) perform a softmax over rows using those computed similarities.
Inputs:
- vector: shape `(batch_size, embedding_dim)`
- matrix: shape `(batch_size, num_rows, embedding_dim)`
- matrix_mask: shape `(batch_size, num_rows)`, specifying which rows are just padding.
Output:
- attention: shape `(batch_size, num_rows)`.
# Parameters
normalize : `bool`, optional (default = `True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, normalize: bool = True) -> None:
super().__init__()
self._normalize = normalize
@overrides
def forward(
self, vector: torch.Tensor, matrix: torch.Tensor, matrix_mask: torch.BoolTensor = None
) -> torch.Tensor:
similarities = self._forward_internal(vector, matrix)
if self._normalize:
return masked_softmax(similarities, matrix_mask)
else:
return similarities
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
| allennlp-master | allennlp/modules/attention/attention.py |
from overrides import overrides
import torch
from torch.nn.parameter import Parameter
from allennlp.modules.attention.attention import Attention
from allennlp.nn import Activation
@Attention.register("bilinear")
class BilinearAttention(Attention):
"""
Computes attention between a vector and a matrix using a bilinear attention function. This
function has a matrix of weights `W` and a bias `b`, and the similarity between the vector
`x` and the matrix `y` is computed as `x^T W y + b`.
Registered as an `Attention` with name "bilinear".
# Parameters
vector_dim : `int`, required
The dimension of the vector, `x`, described above. This is `x.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
matrix_dim : `int`, required
The dimension of the matrix, `y`, described above. This is `y.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
activation : `Activation`, optional (default=`linear`)
An activation function applied after the `x^T W y + b` calculation. Default is
linear, i.e. no activation.
normalize : `bool`, optional (default=`True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(
self,
vector_dim: int,
matrix_dim: int,
activation: Activation = None,
normalize: bool = True,
) -> None:
super().__init__(normalize)
self._weight_matrix = Parameter(torch.Tensor(vector_dim, matrix_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._weight_matrix)
self._bias.data.fill_(0)
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
intermediate = vector.mm(self._weight_matrix).unsqueeze(1)
return self._activation(intermediate.bmm(matrix.transpose(1, 2)).squeeze(1) + self._bias)
| allennlp-master | allennlp/modules/attention/bilinear_attention.py |
from allennlp.modules.attention.attention import Attention
from allennlp.modules.attention.bilinear_attention import BilinearAttention
from allennlp.modules.attention.additive_attention import AdditiveAttention
from allennlp.modules.attention.cosine_attention import CosineAttention
from allennlp.modules.attention.dot_product_attention import DotProductAttention
from allennlp.modules.attention.linear_attention import LinearAttention
| allennlp-master | allennlp/modules/attention/__init__.py |
import torch
from overrides import overrides
from allennlp.modules.attention.attention import Attention
@Attention.register("dot_product")
class DotProductAttention(Attention):
"""
Computes attention between a vector and a matrix using dot product.
Registered as an `Attention` with name "dot_product".
"""
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
return matrix.bmm(vector.unsqueeze(-1)).squeeze(-1)
| allennlp-master | allennlp/modules/attention/dot_product_attention.py |
import torch
from overrides import overrides
from allennlp.modules.attention.attention import Attention
from allennlp.nn import util
@Attention.register("cosine")
class CosineAttention(Attention):
"""
Computes attention between a vector and a matrix using cosine similarity.
Registered as an `Attention` with name "cosine".
"""
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
a_norm = vector / (
vector.norm(p=2, dim=-1, keepdim=True) + util.tiny_value_of_dtype(vector.dtype)
)
b_norm = matrix / (
matrix.norm(p=2, dim=-1, keepdim=True) + util.tiny_value_of_dtype(matrix.dtype)
)
return torch.bmm(a_norm.unsqueeze(dim=1), b_norm.transpose(-1, -2)).squeeze(1)
| allennlp-master | allennlp/modules/attention/cosine_attention.py |
import math
import torch
from torch.nn import Parameter
from overrides import overrides
from allennlp.modules.attention.attention import Attention
from allennlp.nn import util
from allennlp.nn.activations import Activation
@Attention.register("linear")
class LinearAttention(Attention):
"""
This `Attention` module performs a dot product between a vector of weights and some
combination of the two input vectors, followed by an (optional) activation function. The
combination used is configurable.
If the two vectors are `x` and `y`, we allow the following kinds of combinations : `x`,
`y`, `x*y`, `x+y`, `x-y`, `x/y`, where each of those binary operations is performed
elementwise. You can list as many combinations as you want, comma separated. For example, you
might give `x,y,x*y` as the `combination` parameter to this class. The computed similarity
function would then be `w^T [x; y; x*y] + b`, where `w` is a vector of weights, `b` is a
bias parameter, and `[;]` is vector concatenation.
Note that if you want a bilinear similarity function with a diagonal weight matrix W, where the
similarity function is computed as `x * w * y + b` (with `w` the diagonal of `W`), you can
accomplish that with this class by using "x*y" for `combination`.
Registered as an `Attention` with name "linear".
# Parameters
tensor_1_dim : `int`, required
The dimension of the first tensor, `x`, described above. This is `x.size()[-1]` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
tensor_2_dim : `int`, required
The dimension of the second tensor, `y`, described above. This is `y.size()[-1]` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
combination : `str`, optional (default=`"x,y"`)
Described above.
activation : `Activation`, optional (default=`linear`)
An activation function applied after the `w^T * [x;y] + b` calculation. Default is
linear, i.e. no activation.
normalize : `bool`, optional (default=`True`)
"""
def __init__(
self,
tensor_1_dim: int,
tensor_2_dim: int,
combination: str = "x,y",
activation: Activation = None,
normalize: bool = True,
) -> None:
super().__init__(normalize)
self._combination = combination
combined_dim = util.get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])
self._weight_vector = Parameter(torch.Tensor(combined_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(6 / (self._weight_vector.size(0) + 1))
self._weight_vector.data.uniform_(-std, std)
self._bias.data.fill_(0)
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
combined_tensors = util.combine_tensors_and_multiply(
self._combination, [vector.unsqueeze(1), matrix], self._weight_vector
)
return self._activation(combined_tensors.squeeze(1) + self._bias)
| allennlp-master | allennlp/modules/attention/linear_attention.py |
from overrides import overrides
import torch
from torch.nn.parameter import Parameter
from allennlp.modules.attention.attention import Attention
@Attention.register("additive")
class AdditiveAttention(Attention):
"""
Computes attention between a vector and a matrix using an additive attention function. This
function has two matrices `W`, `U` and a vector `V`. The similarity between the vector
`x` and the matrix `y` is computed as `V tanh(Wx + Uy)`.
This attention is often referred as concat or additive attention. It was introduced in
<https://arxiv.org/abs/1409.0473> by Bahdanau et al.
Registered as an `Attention` with name "additive".
# Parameters
vector_dim : `int`, required
The dimension of the vector, `x`, described above. This is `x.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
matrix_dim : `int`, required
The dimension of the matrix, `y`, described above. This is `y.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
normalize : `bool`, optional (default = `True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, vector_dim: int, matrix_dim: int, normalize: bool = True) -> None:
super().__init__(normalize)
self._w_matrix = Parameter(torch.Tensor(vector_dim, vector_dim))
self._u_matrix = Parameter(torch.Tensor(matrix_dim, vector_dim))
self._v_vector = Parameter(torch.Tensor(vector_dim, 1))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._w_matrix)
torch.nn.init.xavier_uniform_(self._u_matrix)
torch.nn.init.xavier_uniform_(self._v_vector)
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
intermediate = vector.matmul(self._w_matrix).unsqueeze(1) + matrix.matmul(self._u_matrix)
intermediate = torch.tanh(intermediate)
return intermediate.matmul(self._v_vector).squeeze(2)
| allennlp-master | allennlp/modules/attention/additive_attention.py |
from typing import Dict
import inspect
import torch
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.data import TextFieldTensors
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.token_embedders import EmptyEmbedder
@TextFieldEmbedder.register("basic")
class BasicTextFieldEmbedder(TextFieldEmbedder):
"""
This is a `TextFieldEmbedder` that wraps a collection of
[`TokenEmbedder`](../token_embedders/token_embedder.md) objects. Each
`TokenEmbedder` embeds or encodes the representation output from one
[`allennlp.data.TokenIndexer`](../../data/token_indexers/token_indexer.md). As the data produced by a
[`allennlp.data.fields.TextField`](../../data/fields/text_field.md) is a dictionary mapping names to these
representations, we take `TokenEmbedders` with corresponding names. Each `TokenEmbedders`
embeds its input, and the result is concatenated in an arbitrary (but consistent) order.
Registered as a `TextFieldEmbedder` with name "basic", which is also the default.
# Parameters
token_embedders : `Dict[str, TokenEmbedder]`, required.
A dictionary mapping token embedder names to implementations.
These names should match the corresponding indexer used to generate
the tensor passed to the TokenEmbedder.
"""
def __init__(self, token_embedders: Dict[str, TokenEmbedder]) -> None:
super().__init__()
# NOTE(mattg): I'd prefer to just use ModuleDict(token_embedders) here, but that changes
# weight locations in torch state dictionaries and invalidates all prior models, just for a
# cosmetic change in the code.
self._token_embedders = token_embedders
for key, embedder in token_embedders.items():
name = "token_embedder_%s" % key
self.add_module(name, embedder)
self._ordered_embedder_keys = sorted(self._token_embedders.keys())
@overrides
def get_output_dim(self) -> int:
output_dim = 0
for embedder in self._token_embedders.values():
output_dim += embedder.get_output_dim()
return output_dim
def forward(
self, text_field_input: TextFieldTensors, num_wrapping_dims: int = 0, **kwargs
) -> torch.Tensor:
if sorted(self._token_embedders.keys()) != sorted(text_field_input.keys()):
message = "Mismatched token keys: %s and %s" % (
str(self._token_embedders.keys()),
str(text_field_input.keys()),
)
embedder_keys = set(self._token_embedders.keys())
input_keys = set(text_field_input.keys())
if embedder_keys > input_keys and all(
isinstance(embedder, EmptyEmbedder)
for name, embedder in self._token_embedders.items()
if name in embedder_keys - input_keys
):
# Allow extra embedders that are only in the token embedders (but not input) and are empty to pass
# config check
pass
else:
raise ConfigurationError(message)
embedded_representations = []
for key in self._ordered_embedder_keys:
# Note: need to use getattr here so that the pytorch voodoo
# with submodules works with multiple GPUs.
embedder = getattr(self, "token_embedder_{}".format(key))
if isinstance(embedder, EmptyEmbedder):
# Skip empty embedders
continue
forward_params = inspect.signature(embedder.forward).parameters
forward_params_values = {}
missing_tensor_args = set()
for param in forward_params.keys():
if param in kwargs:
forward_params_values[param] = kwargs[param]
else:
missing_tensor_args.add(param)
for _ in range(num_wrapping_dims):
embedder = TimeDistributed(embedder)
tensors: Dict[str, torch.Tensor] = text_field_input[key]
if len(tensors) == 1 and len(missing_tensor_args) == 1:
# If there's only one tensor argument to the embedder, and we just have one tensor to
# embed, we can just pass in that tensor, without requiring a name match.
token_vectors = embedder(list(tensors.values())[0], **forward_params_values)
else:
# If there are multiple tensor arguments, we have to require matching names from the
# TokenIndexer. I don't think there's an easy way around that.
token_vectors = embedder(**tensors, **forward_params_values)
if token_vectors is not None:
# To handle some very rare use cases, we allow the return value of the embedder to
# be None; we just skip it in that case.
embedded_representations.append(token_vectors)
return torch.cat(embedded_representations, dim=-1)
| allennlp-master | allennlp/modules/text_field_embedders/basic_text_field_embedder.py |
"""
A `TextFieldEmbedder` is a `Module` that takes as input the `dict` of NumPy arrays
produced by a `TextField` and returns as output an embedded representation of the tokens in that field.
"""
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
from allennlp.modules.text_field_embedders.basic_text_field_embedder import BasicTextFieldEmbedder
| allennlp-master | allennlp/modules/text_field_embedders/__init__.py |
import torch
from allennlp.common import Registrable
from allennlp.data import TextFieldTensors
class TextFieldEmbedder(torch.nn.Module, Registrable):
"""
A `TextFieldEmbedder` is a `Module` that takes as input the
[`DataArray`](../../data/fields/text_field.md) produced by a [`TextField`](../../data/fields/text_field.md) and
returns as output an embedded representation of the tokens in that field.
The `DataArrays` produced by `TextFields` are _dictionaries_ with named representations, like
"words" and "characters". When you create a `TextField`, you pass in a dictionary of
[`TokenIndexer`](../../data/token_indexers/token_indexer.md) objects, telling the field how exactly the
tokens in the field should be represented. This class changes the type signature of `Module.forward`,
restricting `TextFieldEmbedders` to take inputs corresponding to a single `TextField`, which is
a dictionary of tensors with the same names as were passed to the `TextField`.
We also add a method to the basic `Module` API: `get_output_dim()`. You might need this
if you want to construct a `Linear` layer using the output of this embedder, for instance.
"""
default_implementation = "basic"
def forward(
self, text_field_input: TextFieldTensors, num_wrapping_dims: int = 0, **kwargs
) -> torch.Tensor:
"""
# Parameters
text_field_input : `TextFieldTensors`
A dictionary that was the output of a call to `TextField.as_tensor`. Each tensor in
here is assumed to have a shape roughly similar to `(batch_size, sequence_length)`
(perhaps with an extra trailing dimension for the characters in each token).
num_wrapping_dims : `int`, optional (default=`0`)
If you have a `ListField[TextField]` that created the `text_field_input`, you'll
end up with tensors of shape `(batch_size, wrapping_dim1, wrapping_dim2, ...,
sequence_length)`. This parameter tells us how many wrapping dimensions there are, so
that we can correctly `TimeDistribute` the embedding of each named representation.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the dimension of the vector representing each token in the output of this
`TextFieldEmbedder`. This is _not_ the shape of the returned tensor, but the last element
of that shape.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/text_field_embedders/text_field_embedder.py |
from overrides import overrides
import torch
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
@Seq2VecEncoder.register("boe")
@Seq2VecEncoder.register("bag_of_embeddings")
class BagOfEmbeddingsEncoder(Seq2VecEncoder):
"""
A `BagOfEmbeddingsEncoder` is a simple [`Seq2VecEncoder`](./seq2vec_encoder.md) which simply sums
the embeddings of a sequence across the time dimension. The input to this module is of shape
`(batch_size, num_tokens, embedding_dim)`, and the output is of shape `(batch_size, embedding_dim)`.
Registered as a `Seq2VecEncoder` with name "bag_of_embeddings" and "boe".
# Parameters
embedding_dim : `int`, required
This is the input dimension to the encoder.
averaged : `bool`, optional (default=`False`)
If `True`, this module will average the embeddings across time, rather than simply summing
(ie. we will divide the summed embeddings by the length of the sentence).
"""
def __init__(self, embedding_dim: int, averaged: bool = False) -> None:
super().__init__()
self._embedding_dim = embedding_dim
self._averaged = averaged
@overrides
def get_input_dim(self) -> int:
return self._embedding_dim
@overrides
def get_output_dim(self) -> int:
return self._embedding_dim
def forward(self, tokens: torch.Tensor, mask: torch.BoolTensor = None):
if mask is not None:
tokens = tokens * mask.unsqueeze(-1)
# Our input has shape `(batch_size, num_tokens, embedding_dim)`, so we sum out the `num_tokens`
# dimension.
summed = tokens.sum(1)
if self._averaged:
if mask is not None:
lengths = get_lengths_from_binary_sequence_mask(mask)
length_mask = lengths > 0
# Set any length 0 to 1, to avoid dividing by zero.
lengths = torch.max(lengths, lengths.new_ones(1))
else:
lengths = tokens.new_full((1,), fill_value=tokens.size(1))
length_mask = None
summed = summed / lengths.unsqueeze(-1).float()
if length_mask is not None:
summed = summed * (length_mask > 0).unsqueeze(-1)
return summed
| allennlp-master | allennlp/modules/seq2vec_encoders/boe_encoder.py |
from typing import Sequence, Dict, List, Callable
import torch
import numpy as np
from allennlp.common.checks import ConfigurationError
from allennlp.modules.layer_norm import LayerNorm
from allennlp.modules.highway import Highway
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
_VALID_PROJECTION_LOCATIONS = {"after_cnn", "after_highway", None}
@Seq2VecEncoder.register("cnn-highway")
class CnnHighwayEncoder(Seq2VecEncoder):
"""
The character CNN + highway encoder from
[Kim et al "Character aware neural language models"](https://arxiv.org/abs/1508.06615)
with an optional projection.
Registered as a `Seq2VecEncoder` with name "cnn-highway".
# Parameters
embedding_dim : `int`, required
The dimension of the initial character embedding.
filters : `Sequence[Sequence[int]]`, required
A sequence of pairs (filter_width, num_filters).
num_highway : `int`, required
The number of highway layers.
projection_dim : `int`, required
The output dimension of the projection layer.
activation : `str`, optional (default = `'relu'`)
The activation function for the convolutional layers.
projection_location : `str`, optional (default = `'after_highway'`)
Where to apply the projection layer. Valid values are
'after_highway', 'after_cnn', and None.
"""
def __init__(
self,
embedding_dim: int,
filters: Sequence[Sequence[int]],
num_highway: int,
projection_dim: int,
activation: str = "relu",
projection_location: str = "after_highway",
do_layer_norm: bool = False,
) -> None:
super().__init__()
if projection_location not in _VALID_PROJECTION_LOCATIONS:
raise ConfigurationError(f"unknown projection location: {projection_location}")
self.input_dim = embedding_dim
self.output_dim = projection_dim
self._projection_location = projection_location
if activation == "tanh":
self._activation = torch.nn.functional.tanh
elif activation == "relu":
self._activation = torch.nn.functional.relu
else:
raise ConfigurationError(f"unknown activation {activation}")
# Create the convolutions
self._convolutions: List[torch.nn.Module] = []
for i, (width, num) in enumerate(filters):
conv = torch.nn.Conv1d(
in_channels=embedding_dim, out_channels=num, kernel_size=width, bias=True
)
conv.weight.data.uniform_(-0.05, 0.05)
conv.bias.data.fill_(0.0)
self.add_module(f"char_conv_{i}", conv) # needs to match the old ELMo name
self._convolutions.append(conv)
# Create the highway layers
num_filters = sum(num for _, num in filters)
if projection_location == "after_cnn":
highway_dim = projection_dim
else:
# highway_dim is the number of cnn filters
highway_dim = num_filters
self._highways = Highway(highway_dim, num_highway, activation=torch.nn.functional.relu)
for highway_layer in self._highways._layers:
# highway is a linear layer for each highway layer
# with fused W and b weights
highway_layer.weight.data.normal_(mean=0.0, std=np.sqrt(1.0 / highway_dim))
highway_layer.bias[:highway_dim].data.fill_(0.0)
highway_layer.bias[highway_dim:].data.fill_(2.0)
# Projection layer: always num_filters -> projection_dim
self._projection = torch.nn.Linear(num_filters, projection_dim, bias=True)
self._projection.weight.data.normal_(mean=0.0, std=np.sqrt(1.0 / num_filters))
self._projection.bias.data.fill_(0.0)
# And add a layer norm
if do_layer_norm:
self._layer_norm: Callable = LayerNorm(self.output_dim)
else:
self._layer_norm = lambda tensor: tensor
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor) -> Dict[str, torch.Tensor]:
"""
Compute context insensitive token embeddings for ELMo representations.
# Parameters
inputs: `torch.Tensor`
Shape `(batch_size, num_characters, embedding_dim)`
Character embeddings representing the current batch.
mask: `torch.BoolTensor`
Shape `(batch_size, num_characters)`
Currently unused. The mask for characters is implicit. See TokenCharactersEncoder.forward.
# Returns
`encoding`:
Shape `(batch_size, projection_dim)` tensor with context-insensitive token representations.
"""
# convolutions want (batch_size, embedding_dim, num_characters)
inputs = inputs.transpose(1, 2)
convolutions = []
for i in range(len(self._convolutions)):
char_conv_i = getattr(self, f"char_conv_{i}")
convolved = char_conv_i(inputs)
# (batch_size, n_filters for this width)
convolved, _ = torch.max(convolved, dim=-1)
convolved = self._activation(convolved)
convolutions.append(convolved)
# (batch_size, n_filters)
token_embedding = torch.cat(convolutions, dim=-1)
if self._projection_location == "after_cnn":
token_embedding = self._projection(token_embedding)
# apply the highway layers (batch_size, highway_dim)
token_embedding = self._highways(token_embedding)
if self._projection_location == "after_highway":
# final projection (batch_size, projection_dim)
token_embedding = self._projection(token_embedding)
# Apply layer norm if appropriate
token_embedding = self._layer_norm(token_embedding)
return token_embedding
def get_input_dim(self) -> int:
return self.input_dim
def get_output_dim(self) -> int:
return self.output_dim
| allennlp-master | allennlp/modules/seq2vec_encoders/cnn_highway_encoder.py |
from overrides import overrides
import torch.nn
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.nn.util import get_final_encoder_states
@Seq2VecEncoder.register("cls_pooler")
class ClsPooler(Seq2VecEncoder):
"""
Just takes the first vector from a list of vectors (which in a transformer is typically the
[CLS] token) and returns it. For BERT, it's recommended to use `BertPooler` instead.
Registered as a `Seq2VecEncoder` with name "cls_pooler".
# Parameters
embedding_dim: `int`
This isn't needed for any computation that we do, but we sometimes rely on `get_input_dim`
and `get_output_dim` to check parameter settings, or to instantiate final linear layers. In
order to give the right values there, we need to know the embedding dimension. If you're
using this with a transformer from the `transformers` library, this can often be found with
`model.config.hidden_size`, if you're not sure.
cls_is_last_token: `bool`, optional
The [CLS] token is the first token for most of the pretrained transformer models.
For some models such as XLNet, however, it is the last token, and we therefore need to
select at the end.
"""
def __init__(self, embedding_dim: int, cls_is_last_token: bool = False):
super().__init__()
self._embedding_dim = embedding_dim
self._cls_is_last_token = cls_is_last_token
@overrides
def get_input_dim(self) -> int:
return self._embedding_dim
@overrides
def get_output_dim(self) -> int:
return self._embedding_dim
@overrides
def forward(self, tokens: torch.Tensor, mask: torch.BoolTensor = None):
# tokens is assumed to have shape (batch_size, sequence_length, embedding_dim).
# mask is assumed to have shape (batch_size, sequence_length) with all 1s preceding all 0s.
if not self._cls_is_last_token:
return tokens[:, 0, :]
else: # [CLS] at the end
if mask is None:
raise ValueError("Must provide mask for transformer models with [CLS] at the end.")
return get_final_encoder_states(tokens, mask)
| allennlp-master | allennlp/modules/seq2vec_encoders/cls_pooler.py |
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common import Registrable
class Seq2VecEncoder(_EncoderBase, Registrable):
"""
A `Seq2VecEncoder` is a `Module` that takes as input a sequence of vectors and returns a
single vector. Input shape : `(batch_size, sequence_length, input_dim)`; output shape:
`(batch_size, output_dim)`.
We add two methods to the basic `Module` API: `get_input_dim()` and `get_output_dim()`.
You might need this if you want to construct a `Linear` layer using the output of this encoder,
or to raise sensible errors for mis-matching input dimensions.
"""
def get_input_dim(self) -> int:
"""
Returns the dimension of the vector input for each element in the sequence input
to a `Seq2VecEncoder`. This is `not` the shape of the input tensor, but the
last element of that shape.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the dimension of the final vector output by this `Seq2VecEncoder`. This is `not`
the shape of the returned tensor, but the last element of that shape.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/seq2vec_encoders/seq2vec_encoder.py |
from typing import Optional, Tuple
from overrides import overrides
import torch
from torch.nn import Conv1d, Linear
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.nn import Activation
from allennlp.nn.util import min_value_of_dtype
@Seq2VecEncoder.register("cnn")
class CnnEncoder(Seq2VecEncoder):
"""
A `CnnEncoder` is a combination of multiple convolution layers and max pooling layers. As a
[`Seq2VecEncoder`](./seq2vec_encoder.md), the input to this module is of shape `(batch_size, num_tokens,
input_dim)`, and the output is of shape `(batch_size, output_dim)`.
The CNN has one convolution layer for each ngram filter size. Each convolution operation gives
out a vector of size num_filters. The number of times a convolution layer will be used
is `num_tokens - ngram_size + 1`. The corresponding maxpooling layer aggregates all these
outputs from the convolution layer and outputs the max.
This operation is repeated for every ngram size passed, and consequently the dimensionality of
the output after maxpooling is `len(ngram_filter_sizes) * num_filters`. This then gets
(optionally) projected down to a lower dimensional output, specified by `output_dim`.
We then use a fully connected layer to project in back to the desired output_dim. For more
details, refer to "A Sensitivity Analysis of (and Practitioners’ Guide to) Convolutional Neural
Networks for Sentence Classification", Zhang and Wallace 2016, particularly Figure 1.
Registered as a `Seq2VecEncoder` with name "cnn".
# Parameters
embedding_dim : `int`, required
This is the input dimension to the encoder. We need this because we can't do shape
inference in pytorch, and we need to know what size filters to construct in the CNN.
num_filters : `int`, required
This is the output dim for each convolutional layer, which is the number of "filters"
learned by that layer.
ngram_filter_sizes : `Tuple[int]`, optional (default=`(2, 3, 4, 5)`)
This specifies both the number of convolutional layers we will create and their sizes. The
default of `(2, 3, 4, 5)` will have four convolutional layers, corresponding to encoding
ngrams of size 2 to 5 with some number of filters.
conv_layer_activation : `Activation`, optional (default=`torch.nn.ReLU`)
Activation to use after the convolution layers.
output_dim : `Optional[int]`, optional (default=`None`)
After doing convolutions and pooling, we'll project the collected features into a vector of
this size. If this value is `None`, we will just return the result of the max pooling,
giving an output of shape `len(ngram_filter_sizes) * num_filters`.
"""
def __init__(
self,
embedding_dim: int,
num_filters: int,
ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),
conv_layer_activation: Activation = None,
output_dim: Optional[int] = None,
) -> None:
super().__init__()
self._embedding_dim = embedding_dim
self._num_filters = num_filters
self._ngram_filter_sizes = ngram_filter_sizes
self._activation = conv_layer_activation or Activation.by_name("relu")()
self._convolution_layers = [
Conv1d(
in_channels=self._embedding_dim,
out_channels=self._num_filters,
kernel_size=ngram_size,
)
for ngram_size in self._ngram_filter_sizes
]
for i, conv_layer in enumerate(self._convolution_layers):
self.add_module("conv_layer_%d" % i, conv_layer)
maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
if output_dim:
self.projection_layer = Linear(maxpool_output_dim, output_dim)
self._output_dim = output_dim
else:
self.projection_layer = None
self._output_dim = maxpool_output_dim
@overrides
def get_input_dim(self) -> int:
return self._embedding_dim
@overrides
def get_output_dim(self) -> int:
return self._output_dim
def forward(self, tokens: torch.Tensor, mask: torch.BoolTensor):
if mask is not None:
tokens = tokens * mask.unsqueeze(-1)
else:
# If mask doesn't exist create one of shape (batch_size, num_tokens)
mask = torch.ones(tokens.shape[0], tokens.shape[1], device=tokens.device).bool()
# Our input is expected to have shape `(batch_size, num_tokens, embedding_dim)`. The
# convolution layers expect input of shape `(batch_size, in_channels, sequence_length)`,
# where the conv layer `in_channels` is our `embedding_dim`. We thus need to transpose the
# tensor first.
tokens = torch.transpose(tokens, 1, 2)
# Each convolution layer returns output of size `(batch_size, num_filters, pool_length)`,
# where `pool_length = num_tokens - ngram_size + 1`. We then do an activation function,
# masking, then do max pooling over each filter for the whole input sequence.
# Because our max pooling is simple, we just use `torch.max`. The resultant tensor has shape
# `(batch_size, num_conv_layers * num_filters)`, which then gets projected using the
# projection layer, if requested.
# To ensure the cnn_encoder respects masking we add a large negative value to
# the activations of all filters that convolved over a masked token. We do this by
# first enumerating all filters for a given convolution size (torch.arange())
# then by comparing it to an index of the last filter that does not involve a masked
# token (.ge()) and finally adjusting dimensions to allow for addition and multiplying
# by a large negative value (.unsqueeze())
filter_outputs = []
batch_size = tokens.shape[0]
# shape: (batch_size, 1)
last_unmasked_tokens = mask.sum(dim=1).unsqueeze(dim=-1)
for i in range(len(self._convolution_layers)):
convolution_layer = getattr(self, "conv_layer_{}".format(i))
pool_length = tokens.shape[2] - convolution_layer.kernel_size[0] + 1
# Forward pass of the convolutions.
# shape: (batch_size, num_filters, pool_length)
activations = self._activation(convolution_layer(tokens))
# Create activation mask.
# shape: (batch_size, pool_length)
indices = (
torch.arange(pool_length, device=activations.device)
.unsqueeze(0)
.expand(batch_size, pool_length)
)
# shape: (batch_size, pool_length)
activations_mask = indices.ge(
last_unmasked_tokens - convolution_layer.kernel_size[0] + 1
)
# shape: (batch_size, num_filters, pool_length)
activations_mask = activations_mask.unsqueeze(1).expand_as(activations)
# Replace masked out values with smallest possible value of the dtype so
# that max pooling will ignore these activations.
# shape: (batch_size, pool_length)
activations = activations + (activations_mask * min_value_of_dtype(activations.dtype))
# Pick out the max filters
filter_outputs.append(activations.max(dim=2)[0])
# Now we have a list of `num_conv_layers` tensors of shape `(batch_size, num_filters)`.
# Concatenating them gives us a tensor of shape `(batch_size, num_filters * num_conv_layers)`.
maxpool_output = (
torch.cat(filter_outputs, dim=1) if len(filter_outputs) > 1 else filter_outputs[0]
)
# Replace the maxpool activations that picked up the masks with 0s
maxpool_output[maxpool_output == min_value_of_dtype(maxpool_output.dtype)] = 0.0
if self.projection_layer:
result = self.projection_layer(maxpool_output)
else:
result = maxpool_output
return result
| allennlp-master | allennlp/modules/seq2vec_encoders/cnn_encoder.py |
from typing import Optional, Dict, Any
from overrides import overrides
import torch
import torch.nn
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
@Seq2VecEncoder.register("bert_pooler")
class BertPooler(Seq2VecEncoder):
"""
The pooling layer at the end of the BERT model. This returns an embedding for the
[CLS] token, after passing it through a non-linear tanh activation; the non-linear layer
is also part of the BERT model. If you want to use the pretrained BERT model
to build a classifier and you want to use the AllenNLP token-indexer ->
token-embedder -> seq2vec encoder setup, this is the Seq2VecEncoder to use.
(For example, if you want to experiment with other embedding / encoding combinations.)
Registered as a `Seq2VecEncoder` with name "bert_pooler".
# Parameters
pretrained_model : `Union[str, BertModel]`, required
The pretrained BERT model to use. If this is a string,
we will call `transformers.AutoModel.from_pretrained(pretrained_model)`
and use that.
requires_grad : `bool`, optional, (default = `True`)
If True, the weights of the pooler will be updated during training.
Otherwise they will not.
dropout : `float`, optional, (default = `0.0`)
Amount of dropout to apply after pooling
transformer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/modeling_utils.py#L253)
for `AutoModel.from_pretrained`.
""" # noqa: E501
def __init__(
self,
pretrained_model: str,
*,
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None,
requires_grad: bool = True,
dropout: float = 0.0,
transformer_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
from allennlp.common import cached_transformers
model = cached_transformers.get(
pretrained_model,
False,
override_weights_file,
override_weights_strip_prefix,
**(transformer_kwargs or {}),
)
self._dropout = torch.nn.Dropout(p=dropout)
import copy
self.pooler = copy.deepcopy(model.pooler)
for param in self.pooler.parameters():
param.requires_grad = requires_grad
self._embedding_dim = model.config.hidden_size
@overrides
def get_input_dim(self) -> int:
return self._embedding_dim
@overrides
def get_output_dim(self) -> int:
return self._embedding_dim
def forward(
self, tokens: torch.Tensor, mask: torch.BoolTensor = None, num_wrapping_dims: int = 0
):
pooler = self.pooler
for _ in range(num_wrapping_dims):
from allennlp.modules import TimeDistributed
pooler = TimeDistributed(pooler)
pooled = pooler(tokens)
pooled = self._dropout(pooled)
return pooled
| allennlp-master | allennlp/modules/seq2vec_encoders/bert_pooler.py |
"""
Modules that transform a sequence of input vectors
into a single output vector.
Some are just basic wrappers around existing PyTorch modules,
others are AllenNLP modules.
The available Seq2Vec encoders are
* `"gru"` https://pytorch.org/docs/master/nn.html#torch.nn.GRU
* `"lstm"` https://pytorch.org/docs/master/nn.html#torch.nn.LSTM
* `"rnn"` https://pytorch.org/docs/master/nn.html#torch.nn.RNN
* `"cnn"` allennlp.modules.seq2vec_encoders.cnn_encoder.CnnEncoder
* `"augmented_lstm"` allennlp.modules.augmented_lstm.AugmentedLstm
* `"alternating_lstm"` allennlp.modules.stacked_alternating_lstm.StackedAlternatingLstm
* `"stacked_bidirectional_lstm"` allennlp.modules.stacked_bidirectional_lstm.StackedBidirectionalLstm
"""
from allennlp.modules.seq2vec_encoders.bert_pooler import BertPooler
from allennlp.modules.seq2vec_encoders.boe_encoder import BagOfEmbeddingsEncoder
from allennlp.modules.seq2vec_encoders.cls_pooler import ClsPooler
from allennlp.modules.seq2vec_encoders.cnn_encoder import CnnEncoder
from allennlp.modules.seq2vec_encoders.cnn_highway_encoder import CnnHighwayEncoder
from allennlp.modules.seq2vec_encoders.pytorch_seq2vec_wrapper import (
AugmentedLstmSeq2VecEncoder,
GruSeq2VecEncoder,
LstmSeq2VecEncoder,
PytorchSeq2VecWrapper,
RnnSeq2VecEncoder,
StackedAlternatingLstmSeq2VecEncoder,
StackedBidirectionalLstmSeq2VecEncoder,
)
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
| allennlp-master | allennlp/modules/seq2vec_encoders/__init__.py |
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
from allennlp.modules.stacked_bidirectional_lstm import StackedBidirectionalLstm
class PytorchSeq2VecWrapper(Seq2VecEncoder):
"""
Pytorch's RNNs have two outputs: the final hidden state for every time step,
and the hidden state at the last time step for every layer.
We just want the final hidden state of the last time step.
This wrapper pulls out that output, and adds a `get_output_dim` method, which is useful if you
want to, e.g., define a linear + softmax layer on top of this to get some distribution over a
set of labels. The linear layer needs to know its input dimension before it is called, and you
can get that from `get_output_dim`.
Also, there are lots of ways you could imagine going from an RNN hidden state at every
timestep to a single vector - you could take the last vector at all layers in the stack, do
some kind of pooling, take the last vector of the top layer in a stack, or many other options.
We just take the final hidden state vector, or in the case of a bidirectional RNN cell, we
concatenate the forward and backward final states together. TODO(mattg): allow for other ways
of wrapping RNNs.
In order to be wrapped with this wrapper, a class must have the following members:
- `self.input_size: int`
- `self.hidden_size: int`
- `def forward(inputs: PackedSequence, hidden_state: torch.tensor) ->
Tuple[PackedSequence, torch.Tensor]`.
- `self.bidirectional: bool` (optional)
This is what pytorch's RNN's look like - just make sure your class looks like those, and it
should work.
Note that we *require* you to pass sequence lengths when you call this module, to avoid subtle
bugs around masking. If you already have a `PackedSequence` you can pass `None` as the
second parameter.
"""
def __init__(self, module: torch.nn.modules.RNNBase) -> None:
# Seq2VecEncoders cannot be stateful.
super().__init__(stateful=False)
self._module = module
try:
if not self._module.batch_first:
raise ConfigurationError("Our encoder semantics assumes batch is always first!")
except AttributeError:
pass
def get_input_dim(self) -> int:
return self._module.input_size
def get_output_dim(self) -> int:
try:
is_bidirectional = self._module.bidirectional
except AttributeError:
is_bidirectional = False
return self._module.hidden_size * (2 if is_bidirectional else 1)
def forward(
self, inputs: torch.Tensor, mask: torch.BoolTensor, hidden_state: torch.Tensor = None
) -> torch.Tensor:
if mask is None:
# If a mask isn't passed, there is no padding in the batch of instances, so we can just
# return the last sequence output as the state. This doesn't work in the case of
# variable length sequences, as the last state for each element of the batch won't be
# at the end of the max sequence length, so we have to use the state of the RNN below.
return self._module(inputs, hidden_state)[0][:, -1, :]
batch_size = mask.size(0)
(
_,
state,
restoration_indices,
) = self.sort_and_run_forward(self._module, inputs, mask, hidden_state)
# Deal with the fact the LSTM state is a tuple of (state, memory).
if isinstance(state, tuple):
state = state[0]
num_layers_times_directions, num_valid, encoding_dim = state.size()
# Add back invalid rows.
if num_valid < batch_size:
# batch size is the second dimension here, because pytorch
# returns RNN state as a tensor of shape (num_layers * num_directions,
# batch_size, hidden_size)
zeros = state.new_zeros(
num_layers_times_directions, batch_size - num_valid, encoding_dim
)
state = torch.cat([state, zeros], 1)
# Restore the original indices and return the final state of the
# top layer. Pytorch's recurrent layers return state in the form
# (num_layers * num_directions, batch_size, hidden_size) regardless
# of the 'batch_first' flag, so we transpose, extract the relevant
# layer state (both forward and backward if using bidirectional layers)
# and return them as a single (batch_size, self.get_output_dim()) tensor.
# now of shape: (batch_size, num_layers * num_directions, hidden_size).
unsorted_state = state.transpose(0, 1).index_select(0, restoration_indices)
# Extract the last hidden vector, including both forward and backward states
# if the cell is bidirectional. Then reshape by concatenation (in the case
# we have bidirectional states) or just squash the 1st dimension in the non-
# bidirectional case. Return tensor has shape (batch_size, hidden_size * num_directions).
try:
last_state_index = 2 if self._module.bidirectional else 1
except AttributeError:
last_state_index = 1
last_layer_state = unsorted_state[:, -last_state_index:, :]
return last_layer_state.contiguous().view([-1, self.get_output_dim()])
@Seq2VecEncoder.register("gru")
class GruSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "gru".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
):
module = torch.nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module)
@Seq2VecEncoder.register("lstm")
class LstmSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
):
module = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module)
@Seq2VecEncoder.register("rnn")
class RnnSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "rnn".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
nonlinearity: str = "tanh",
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
):
module = torch.nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
nonlinearity=nonlinearity,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module)
@Seq2VecEncoder.register("augmented_lstm")
class AugmentedLstmSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "augmented_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
) -> None:
module = AugmentedLstm(
input_size=input_size,
hidden_size=hidden_size,
go_forward=go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module)
@Seq2VecEncoder.register("alternating_lstm")
class StackedAlternatingLstmSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "alternating_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
) -> None:
module = StackedAlternatingLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module)
@Seq2VecEncoder.register("stacked_bidirectional_lstm")
class StackedBidirectionalLstmSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "stacked_bidirectional_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
layer_dropout_probability: float = 0.0,
use_highway: bool = True,
) -> None:
module = StackedBidirectionalLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
layer_dropout_probability=layer_dropout_probability,
use_highway=use_highway,
)
super().__init__(module=module)
| allennlp-master | allennlp/modules/seq2vec_encoders/pytorch_seq2vec_wrapper.py |
import torch
from overrides import overrides
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.nn import util
@SpanExtractor.register("self_attentive")
class SelfAttentiveSpanExtractor(SpanExtractor):
"""
Computes span representations by generating an unnormalized attention score for each
word in the document. Spans representations are computed with respect to these
scores by normalising the attention scores for words inside the span.
Given these attention distributions over every span, this module weights the
corresponding vector representations of the words in the span by this distribution,
returning a weighted representation of each span.
Registered as a `SpanExtractor` with name "self_attentive".
# Parameters
input_dim : `int`, required.
The final dimension of the `sequence_tensor`.
# Returns
attended_text_embeddings : `torch.FloatTensor`.
A tensor of shape (batch_size, num_spans, input_dim), which each span representation
is formed by locally normalising a global attention over the sequence. The only way
in which the attention distribution differs over different spans is in the set of words
over which they are normalized.
"""
def __init__(self, input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
self._global_attention = TimeDistributed(torch.nn.Linear(input_dim, 1))
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._input_dim
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
span_indices_mask: torch.BoolTensor = None,
) -> torch.FloatTensor:
# shape (batch_size, sequence_length, 1)
global_attention_logits = self._global_attention(sequence_tensor)
# shape (batch_size, sequence_length, embedding_dim + 1)
concat_tensor = torch.cat([sequence_tensor, global_attention_logits], -1)
concat_output, span_mask = util.batched_span_select(concat_tensor, span_indices)
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = concat_output[:, :, :, :-1]
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_logits = concat_output[:, :, :, -1]
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_weights = util.masked_softmax(span_attention_logits, span_mask)
# Do a weighted sum of the embedded spans with
# respect to the normalised attention distributions.
# Shape: (batch_size, num_spans, embedding_dim)
attended_text_embeddings = util.weighted_sum(span_embeddings, span_attention_weights)
if span_indices_mask is not None:
# Above we were masking the widths of spans with respect to the max
# span width in the batch. Here we are masking the spans which were
# originally passed in as padding.
return attended_text_embeddings * span_indices_mask.unsqueeze(-1)
return attended_text_embeddings
| allennlp-master | allennlp/modules/span_extractors/self_attentive_span_extractor.py |
from typing import Optional
import torch
from overrides import overrides
from torch.nn.parameter import Parameter
from allennlp.common.checks import ConfigurationError
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.nn import util
@SpanExtractor.register("bidirectional_endpoint")
class BidirectionalEndpointSpanExtractor(SpanExtractor):
"""
Represents spans from a bidirectional encoder as a concatenation of two different
representations of the span endpoints, one for the forward direction of the encoder
and one from the backward direction. This type of representation encodes some subtlety,
because when you consider the forward and backward directions separately, the end index
of the span for the backward direction's representation is actually the start index.
By default, this `SpanExtractor` represents spans as
`sequence_tensor[inclusive_span_end] - sequence_tensor[exclusive_span_start]`
meaning that the representation is the difference between the the last word in the span
and the word `before` the span started. Note that the start and end indices are with
respect to the direction that the RNN is going in, so for the backward direction, the
start/end indices are reversed.
Additionally, the width of the spans can be embedded and concatenated on to the
final combination.
The following other types of representation are supported for both the forward and backward
directions, assuming that `x = span_start_embeddings` and `y = span_end_embeddings`.
`x`, `y`, `x*y`, `x+y`, `x-y`, `x/y`, where each of those binary operations
is performed elementwise. You can list as many combinations as you want, comma separated.
For example, you might give `x,y,x*y` as the `combination` parameter to this class.
The computed similarity function would then be `[x; y; x*y]`, which can then be optionally
concatenated with an embedded representation of the width of the span.
Registered as a `SpanExtractor` with name "bidirectional_endpoint".
# Parameters
input_dim : `int`, required
The final dimension of the `sequence_tensor`.
forward_combination : `str`, optional (default = `"y-x"`).
The method used to combine the `forward_start_embeddings` and `forward_end_embeddings`
for the forward direction of the bidirectional representation.
See above for a full description.
backward_combination : `str`, optional (default = `"x-y"`).
The method used to combine the `backward_start_embeddings` and `backward_end_embeddings`
for the backward direction of the bidirectional representation.
See above for a full description.
num_width_embeddings : `int`, optional (default = `None`).
Specifies the number of buckets to use when representing
span width features.
span_width_embedding_dim : `int`, optional (default = `None`).
The embedding size for the span_width features.
bucket_widths : `bool`, optional (default = `False`).
Whether to bucket the span widths into log-space buckets. If `False`,
the raw span widths are used.
use_sentinels : `bool`, optional (default = `True`).
If `True`, sentinels are used to represent exclusive span indices for the elements
in the first and last positions in the sequence (as the exclusive indices for these
elements are outside of the the sequence boundary). This is not strictly necessary,
as you may know that your exclusive start and end indices are always within your sequence
representation, such as if you have appended/prepended <START> and <END> tokens to your
sequence.
"""
def __init__(
self,
input_dim: int,
forward_combination: str = "y-x",
backward_combination: str = "x-y",
num_width_embeddings: int = None,
span_width_embedding_dim: int = None,
bucket_widths: bool = False,
use_sentinels: bool = True,
) -> None:
super().__init__()
self._input_dim = input_dim
self._forward_combination = forward_combination
self._backward_combination = backward_combination
self._num_width_embeddings = num_width_embeddings
self._bucket_widths = bucket_widths
if self._input_dim % 2 != 0:
raise ConfigurationError(
"The input dimension is not divisible by 2, but the "
"BidirectionalEndpointSpanExtractor assumes the embedded representation "
"is bidirectional (and hence divisible by 2)."
)
self._span_width_embedding: Optional[Embedding] = None
if num_width_embeddings is not None and span_width_embedding_dim is not None:
self._span_width_embedding = Embedding(
num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim
)
elif num_width_embeddings is not None or span_width_embedding_dim is not None:
raise ConfigurationError(
"To use a span width embedding representation, you must"
"specify both num_width_buckets and span_width_embedding_dim."
)
self._use_sentinels = use_sentinels
if use_sentinels:
self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
self._end_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
unidirectional_dim = int(self._input_dim / 2)
forward_combined_dim = util.get_combined_dim(
self._forward_combination, [unidirectional_dim, unidirectional_dim]
)
backward_combined_dim = util.get_combined_dim(
self._backward_combination, [unidirectional_dim, unidirectional_dim]
)
if self._span_width_embedding is not None:
return (
forward_combined_dim
+ backward_combined_dim
+ self._span_width_embedding.get_output_dim()
)
return forward_combined_dim + backward_combined_dim
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.BoolTensor = None,
span_indices_mask: torch.BoolTensor = None,
) -> torch.FloatTensor:
# Both of shape (batch_size, sequence_length, embedding_size / 2)
forward_sequence, backward_sequence = sequence_tensor.split(
int(self._input_dim / 2), dim=-1
)
forward_sequence = forward_sequence.contiguous()
backward_sequence = backward_sequence.contiguous()
# shape (batch_size, num_spans)
span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)]
if span_indices_mask is not None:
span_starts = span_starts * span_indices_mask
span_ends = span_ends * span_indices_mask
# We want `exclusive` span starts, so we remove 1 from the forward span starts
# as the AllenNLP `SpanField` is inclusive.
# shape (batch_size, num_spans)
exclusive_span_starts = span_starts - 1
# shape (batch_size, num_spans, 1)
start_sentinel_mask = (exclusive_span_starts == -1).unsqueeze(-1)
# We want `exclusive` span ends for the backward direction
# (so that the `start` of the span in that direction is exlusive), so
# we add 1 to the span ends as the AllenNLP `SpanField` is inclusive.
exclusive_span_ends = span_ends + 1
if sequence_mask is not None:
# shape (batch_size)
sequence_lengths = util.get_lengths_from_binary_sequence_mask(sequence_mask)
else:
# shape (batch_size), filled with the sequence length size of the sequence_tensor.
sequence_lengths = torch.ones_like(
sequence_tensor[:, 0, 0], dtype=torch.long
) * sequence_tensor.size(1)
# shape (batch_size, num_spans, 1)
end_sentinel_mask = (exclusive_span_ends >= sequence_lengths.unsqueeze(-1)).unsqueeze(-1)
# As we added 1 to the span_ends to make them exclusive, which might have caused indices
# equal to the sequence_length to become out of bounds, we multiply by the inverse of the
# end_sentinel mask to erase these indices (as we will replace them anyway in the block below).
# The same argument follows for the exclusive span start indices.
exclusive_span_ends = exclusive_span_ends * ~end_sentinel_mask.squeeze(-1)
exclusive_span_starts = exclusive_span_starts * ~start_sentinel_mask.squeeze(-1)
# We'll check the indices here at runtime, because it's difficult to debug
# if this goes wrong and it's tricky to get right.
if (exclusive_span_starts < 0).any() or (
exclusive_span_ends > sequence_lengths.unsqueeze(-1)
).any():
raise ValueError(
f"Adjusted span indices must lie inside the length of the sequence tensor, "
f"but found: exclusive_span_starts: {exclusive_span_starts}, "
f"exclusive_span_ends: {exclusive_span_ends} for a sequence tensor with lengths "
f"{sequence_lengths}."
)
# Forward Direction: start indices are exclusive. Shape (batch_size, num_spans, input_size / 2)
forward_start_embeddings = util.batched_index_select(
forward_sequence, exclusive_span_starts
)
# Forward Direction: end indices are inclusive, so we can just use span_ends.
# Shape (batch_size, num_spans, input_size / 2)
forward_end_embeddings = util.batched_index_select(forward_sequence, span_ends)
# Backward Direction: The backward start embeddings use the `forward` end
# indices, because we are going backwards.
# Shape (batch_size, num_spans, input_size / 2)
backward_start_embeddings = util.batched_index_select(
backward_sequence, exclusive_span_ends
)
# Backward Direction: The backward end embeddings use the `forward` start
# indices, because we are going backwards.
# Shape (batch_size, num_spans, input_size / 2)
backward_end_embeddings = util.batched_index_select(backward_sequence, span_starts)
if self._use_sentinels:
# If we're using sentinels, we need to replace all the elements which were
# outside the dimensions of the sequence_tensor with either the start sentinel,
# or the end sentinel.
forward_start_embeddings = (
forward_start_embeddings * ~start_sentinel_mask
+ start_sentinel_mask * self._start_sentinel
)
backward_start_embeddings = (
backward_start_embeddings * ~end_sentinel_mask
+ end_sentinel_mask * self._end_sentinel
)
# Now we combine the forward and backward spans in the manner specified by the
# respective combinations and concatenate these representations.
# Shape (batch_size, num_spans, forward_combination_dim)
forward_spans = util.combine_tensors(
self._forward_combination, [forward_start_embeddings, forward_end_embeddings]
)
# Shape (batch_size, num_spans, backward_combination_dim)
backward_spans = util.combine_tensors(
self._backward_combination, [backward_start_embeddings, backward_end_embeddings]
)
# Shape (batch_size, num_spans, forward_combination_dim + backward_combination_dim)
span_embeddings = torch.cat([forward_spans, backward_spans], -1)
if self._span_width_embedding is not None:
# Embed the span widths and concatenate to the rest of the representations.
if self._bucket_widths:
span_widths = util.bucket_values(
span_ends - span_starts, num_total_buckets=self._num_width_embeddings # type: ignore
)
else:
span_widths = span_ends - span_starts
span_width_embeddings = self._span_width_embedding(span_widths)
return torch.cat([span_embeddings, span_width_embeddings], -1)
if span_indices_mask is not None:
return span_embeddings * span_indices_mask.unsqueeze(-1)
return span_embeddings
| allennlp-master | allennlp/modules/span_extractors/bidirectional_endpoint_span_extractor.py |
from typing import Optional
import torch
from torch.nn.parameter import Parameter
from overrides import overrides
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.nn import util
from allennlp.common.checks import ConfigurationError
@SpanExtractor.register("endpoint")
class EndpointSpanExtractor(SpanExtractor):
"""
Represents spans as a combination of the embeddings of their endpoints. Additionally,
the width of the spans can be embedded and concatenated on to the final combination.
The following types of representation are supported, assuming that
`x = span_start_embeddings` and `y = span_end_embeddings`.
`x`, `y`, `x*y`, `x+y`, `x-y`, `x/y`, where each of those binary operations
is performed elementwise. You can list as many combinations as you want, comma separated.
For example, you might give `x,y,x*y` as the `combination` parameter to this class.
The computed similarity function would then be `[x; y; x*y]`, which can then be optionally
concatenated with an embedded representation of the width of the span.
Registered as a `SpanExtractor` with name "endpoint".
# Parameters
input_dim : `int`, required.
The final dimension of the `sequence_tensor`.
combination : `str`, optional (default = `"x,y"`).
The method used to combine the `start_embedding` and `end_embedding`
representations. See above for a full description.
num_width_embeddings : `int`, optional (default = `None`).
Specifies the number of buckets to use when representing
span width features.
span_width_embedding_dim : `int`, optional (default = `None`).
The embedding size for the span_width features.
bucket_widths : `bool`, optional (default = `False`).
Whether to bucket the span widths into log-space buckets. If `False`,
the raw span widths are used.
use_exclusive_start_indices : `bool`, optional (default = `False`).
If `True`, the start indices extracted are converted to exclusive indices. Sentinels
are used to represent exclusive span indices for the elements in the first
position in the sequence (as the exclusive indices for these elements are outside
of the the sequence boundary) so that start indices can be exclusive.
NOTE: This option can be helpful to avoid the pathological case in which you
want span differences for length 1 spans - if you use inclusive indices, you
will end up with an `x - x` operation for length 1 spans, which is not good.
"""
def __init__(
self,
input_dim: int,
combination: str = "x,y",
num_width_embeddings: int = None,
span_width_embedding_dim: int = None,
bucket_widths: bool = False,
use_exclusive_start_indices: bool = False,
) -> None:
super().__init__()
self._input_dim = input_dim
self._combination = combination
self._num_width_embeddings = num_width_embeddings
self._bucket_widths = bucket_widths
self._use_exclusive_start_indices = use_exclusive_start_indices
if use_exclusive_start_indices:
self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim)]))
self._span_width_embedding: Optional[Embedding] = None
if num_width_embeddings is not None and span_width_embedding_dim is not None:
self._span_width_embedding = Embedding(
num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim
)
elif num_width_embeddings is not None or span_width_embedding_dim is not None:
raise ConfigurationError(
"To use a span width embedding representation, you must"
"specify both num_width_buckets and span_width_embedding_dim."
)
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
combined_dim = util.get_combined_dim(self._combination, [self._input_dim, self._input_dim])
if self._span_width_embedding is not None:
return combined_dim + self._span_width_embedding.get_output_dim()
return combined_dim
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.BoolTensor = None,
span_indices_mask: torch.BoolTensor = None,
) -> None:
# shape (batch_size, num_spans)
span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)]
if span_indices_mask is not None:
# It's not strictly necessary to multiply the span indices by the mask here,
# but it's possible that the span representation was padded with something other
# than 0 (such as -1, which would be an invalid index), so we do so anyway to
# be safe.
span_starts = span_starts * span_indices_mask
span_ends = span_ends * span_indices_mask
if not self._use_exclusive_start_indices:
if sequence_tensor.size(-1) != self._input_dim:
raise ValueError(
f"Dimension mismatch expected ({sequence_tensor.size(-1)}) "
f"received ({self._input_dim})."
)
start_embeddings = util.batched_index_select(sequence_tensor, span_starts)
end_embeddings = util.batched_index_select(sequence_tensor, span_ends)
else:
# We want `exclusive` span starts, so we remove 1 from the forward span starts
# as the AllenNLP `SpanField` is inclusive.
# shape (batch_size, num_spans)
exclusive_span_starts = span_starts - 1
# shape (batch_size, num_spans, 1)
start_sentinel_mask = (exclusive_span_starts == -1).unsqueeze(-1)
exclusive_span_starts = exclusive_span_starts * ~start_sentinel_mask.squeeze(-1)
# We'll check the indices here at runtime, because it's difficult to debug
# if this goes wrong and it's tricky to get right.
if (exclusive_span_starts < 0).any():
raise ValueError(
f"Adjusted span indices must lie inside the the sequence tensor, "
f"but found: exclusive_span_starts: {exclusive_span_starts}."
)
start_embeddings = util.batched_index_select(sequence_tensor, exclusive_span_starts)
end_embeddings = util.batched_index_select(sequence_tensor, span_ends)
# We're using sentinels, so we need to replace all the elements which were
# outside the dimensions of the sequence_tensor with the start sentinel.
start_embeddings = (
start_embeddings * ~start_sentinel_mask + start_sentinel_mask * self._start_sentinel
)
combined_tensors = util.combine_tensors(
self._combination, [start_embeddings, end_embeddings]
)
if self._span_width_embedding is not None:
# Embed the span widths and concatenate to the rest of the representations.
if self._bucket_widths:
span_widths = util.bucket_values(
span_ends - span_starts, num_total_buckets=self._num_width_embeddings # type: ignore
)
else:
span_widths = span_ends - span_starts
span_width_embeddings = self._span_width_embedding(span_widths)
combined_tensors = torch.cat([combined_tensors, span_width_embeddings], -1)
if span_indices_mask is not None:
return combined_tensors * span_indices_mask.unsqueeze(-1)
return combined_tensors
| allennlp-master | allennlp/modules/span_extractors/endpoint_span_extractor.py |
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.span_extractors.endpoint_span_extractor import EndpointSpanExtractor
from allennlp.modules.span_extractors.self_attentive_span_extractor import (
SelfAttentiveSpanExtractor,
)
from allennlp.modules.span_extractors.bidirectional_endpoint_span_extractor import (
BidirectionalEndpointSpanExtractor,
)
| allennlp-master | allennlp/modules/span_extractors/__init__.py |
import torch
from overrides import overrides
from allennlp.common.registrable import Registrable
class SpanExtractor(torch.nn.Module, Registrable):
"""
Many NLP models deal with representations of spans inside a sentence.
SpanExtractors define methods for extracting and representing spans
from a sentence.
SpanExtractors take a sequence tensor of shape (batch_size, timesteps, embedding_dim)
and indices of shape (batch_size, num_spans, 2) and return a tensor of
shape (batch_size, num_spans, ...), forming some representation of the
spans.
"""
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.BoolTensor = None,
span_indices_mask: torch.BoolTensor = None,
):
"""
Given a sequence tensor, extract spans and return representations of
them. Span representation can be computed in many different ways,
such as concatenation of the start and end spans, attention over the
vectors contained inside the span, etc.
# Parameters
sequence_tensor : `torch.FloatTensor`, required.
A tensor of shape (batch_size, sequence_length, embedding_size)
representing an embedded sequence of words.
span_indices : `torch.LongTensor`, required.
A tensor of shape `(batch_size, num_spans, 2)`, where the last
dimension represents the inclusive start and end indices of the
span to be extracted from the `sequence_tensor`.
sequence_mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, sequence_length) representing padded
elements of the sequence.
span_indices_mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, num_spans) representing the valid
spans in the `indices` tensor. This mask is optional because
sometimes it's easier to worry about masking after calling this
function, rather than passing a mask directly.
# Returns
A tensor of shape `(batch_size, num_spans, embedded_span_size)`,
where `embedded_span_size` depends on the way spans are represented.
"""
raise NotImplementedError
def get_input_dim(self) -> int:
"""
Returns the expected final dimension of the `sequence_tensor`.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the expected final dimension of the returned span representation.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/span_extractors/span_extractor.py |
import math
import torch
from torch.nn import Parameter
from overrides import overrides
from allennlp.nn import util
from allennlp.nn.activations import Activation
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
@MatrixAttention.register("linear")
class LinearMatrixAttention(MatrixAttention):
"""
This `MatrixAttention` takes two matrices as input and returns a matrix of attentions
by performing a dot product between a vector of weights and some
combination of the two input matrices, followed by an (optional) activation function. The
combination used is configurable.
If the two vectors are `x` and `y`, we allow the following kinds of combinations : `x`,
`y`, `x*y`, `x+y`, `x-y`, `x/y`, where each of those binary operations is performed
elementwise. You can list as many combinations as you want, comma separated. For example, you
might give `x,y,x*y` as the `combination` parameter to this class. The computed similarity
function would then be `w^T [x; y; x*y] + b`, where `w` is a vector of weights, `b` is a
bias parameter, and `[;]` is vector concatenation.
Note that if you want a bilinear similarity function with a diagonal weight matrix W, where the
similarity function is computed as `x * w * y + b` (with `w` the diagonal of `W`), you can
accomplish that with this class by using "x*y" for `combination`.
Registered as a `MatrixAttention` with name "linear".
# Parameters
tensor_1_dim : `int`, required
The dimension of the first tensor, `x`, described above. This is `x.size()[-1]` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
tensor_2_dim : `int`, required
The dimension of the second tensor, `y`, described above. This is `y.size()[-1]` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
combination : `str`, optional (default=`"x,y"`)
Described above.
activation : `Activation`, optional (default=`linear`)
An activation function applied after the `w^T * [x;y] + b` calculation. Default is
linear, i.e. no activation.
"""
def __init__(
self,
tensor_1_dim: int,
tensor_2_dim: int,
combination: str = "x,y",
activation: Activation = None,
) -> None:
super().__init__()
self._combination = combination
combined_dim = util.get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])
self._weight_vector = Parameter(torch.Tensor(combined_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(6 / (self._weight_vector.size(0) + 1))
self._weight_vector.data.uniform_(-std, std)
self._bias.data.fill_(0)
@overrides
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
combined_tensors = util.combine_tensors_and_multiply(
self._combination, [matrix_1.unsqueeze(2), matrix_2.unsqueeze(1)], self._weight_vector
)
return self._activation(combined_tensors + self._bias)
| allennlp-master | allennlp/modules/matrix_attention/linear_matrix_attention.py |
from overrides import overrides
import torch
from torch.nn.parameter import Parameter
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import Activation
@MatrixAttention.register("bilinear")
class BilinearMatrixAttention(MatrixAttention):
"""
Computes attention between two matrices using a bilinear attention function. This function has
a matrix of weights `W` and a bias `b`, and the similarity between the two matrices `X`
and `Y` is computed as `X W Y^T + b`.
Registered as a `MatrixAttention` with name "bilinear".
# Parameters
matrix_1_dim : `int`, required
The dimension of the matrix `X`, described above. This is `X.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
matrix_2_dim : `int`, required
The dimension of the matrix `Y`, described above. This is `Y.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
activation : `Activation`, optional (default=`linear`)
An activation function applied after the `X W Y^T + b` calculation. Default is
linear, i.e. no activation.
use_input_biases : `bool`, optional (default = `False`)
If True, we add biases to the inputs such that the final computation
is equivalent to the original bilinear matrix multiplication plus a
projection of both inputs.
label_dim : `int`, optional (default = `1`)
The number of output classes. Typically in an attention setting this will be one,
but this parameter allows this class to function as an equivalent to `torch.nn.Bilinear`
for matrices, rather than vectors.
"""
def __init__(
self,
matrix_1_dim: int,
matrix_2_dim: int,
activation: Activation = None,
use_input_biases: bool = False,
label_dim: int = 1,
) -> None:
super().__init__()
if use_input_biases:
matrix_1_dim += 1
matrix_2_dim += 1
if label_dim == 1:
self._weight_matrix = Parameter(torch.Tensor(matrix_1_dim, matrix_2_dim))
else:
self._weight_matrix = Parameter(torch.Tensor(label_dim, matrix_1_dim, matrix_2_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self._use_input_biases = use_input_biases
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._weight_matrix)
self._bias.data.fill_(0)
@overrides
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
if self._use_input_biases:
bias1 = matrix_1.new_ones(matrix_1.size()[:-1] + (1,))
bias2 = matrix_2.new_ones(matrix_2.size()[:-1] + (1,))
matrix_1 = torch.cat([matrix_1, bias1], -1)
matrix_2 = torch.cat([matrix_2, bias2], -1)
weight = self._weight_matrix
if weight.dim() == 2:
weight = weight.unsqueeze(0)
intermediate = torch.matmul(matrix_1.unsqueeze(1), weight)
final = torch.matmul(intermediate, matrix_2.unsqueeze(1).transpose(2, 3))
return self._activation(final.squeeze(1) + self._bias)
| allennlp-master | allennlp/modules/matrix_attention/bilinear_matrix_attention.py |
import torch
from overrides import overrides
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
@MatrixAttention.register("dot_product")
class DotProductMatrixAttention(MatrixAttention):
"""
Computes attention between every entry in matrix_1 with every entry in matrix_2 using a dot
product.
Registered as a `MatrixAttention` with name "dot_product".
"""
@overrides
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
return matrix_1.bmm(matrix_2.transpose(2, 1))
| allennlp-master | allennlp/modules/matrix_attention/dot_product_matrix_attention.py |
import torch
from allennlp.common.registrable import Registrable
class MatrixAttention(torch.nn.Module, Registrable):
"""
`MatrixAttention` takes two matrices as input and returns a matrix of attentions.
We compute the similarity between each row in each matrix and return unnormalized similarity
scores. Because these scores are unnormalized, we don't take a mask as input; it's up to the
caller to deal with masking properly when this output is used.
Input:
- matrix_1 : `(batch_size, num_rows_1, embedding_dim_1)`
- matrix_2 : `(batch_size, num_rows_2, embedding_dim_2)`
Output:
- `(batch_size, num_rows_1, num_rows_2)`
"""
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
| allennlp-master | allennlp/modules/matrix_attention/matrix_attention.py |
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.modules.matrix_attention.bilinear_matrix_attention import BilinearMatrixAttention
from allennlp.modules.matrix_attention.cosine_matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.dot_product_matrix_attention import DotProductMatrixAttention
from allennlp.modules.matrix_attention.linear_matrix_attention import LinearMatrixAttention
| allennlp-master | allennlp/modules/matrix_attention/__init__.py |
import torch
from overrides import overrides
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import util
@MatrixAttention.register("cosine")
class CosineMatrixAttention(MatrixAttention):
"""
Computes attention between every entry in matrix_1 with every entry in matrix_2 using cosine
similarity.
Registered as a `MatrixAttention` with name "cosine".
"""
@overrides
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
a_norm = matrix_1 / (
matrix_1.norm(p=2, dim=-1, keepdim=True) + util.tiny_value_of_dtype(matrix_1.dtype)
)
b_norm = matrix_2 / (
matrix_2.norm(p=2, dim=-1, keepdim=True) + util.tiny_value_of_dtype(matrix_2.dtype)
)
return torch.bmm(a_norm, b_norm.transpose(-1, -2))
| allennlp-master | allennlp/modules/matrix_attention/cosine_matrix_attention.py |
import math
from typing import Optional, Tuple, Dict, Any
from overrides import overrides
import torch
import torch.nn.functional as F
from transformers import XLNetConfig
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn.util import batched_index_select
@TokenEmbedder.register("pretrained_transformer")
class PretrainedTransformerEmbedder(TokenEmbedder):
"""
Uses a pretrained model from `transformers` as a `TokenEmbedder`.
Registered as a `TokenEmbedder` with name "pretrained_transformer".
# Parameters
model_name : `str`
The name of the `transformers` model to use. Should be the same as the corresponding
`PretrainedTransformerIndexer`.
max_length : `int`, optional (default = `None`)
If positive, folds input token IDs into multiple segments of this length, pass them
through the transformer model independently, and concatenate the final representations.
Should be set to the same value as the `max_length` option on the
`PretrainedTransformerIndexer`.
sub_module: `str`, optional (default = `None`)
The name of a submodule of the transformer to be used as the embedder. Some transformers naturally act
as embedders such as BERT. However, other models consist of encoder and decoder, in which case we just
want to use the encoder.
train_parameters: `bool`, optional (default = `True`)
If this is `True`, the transformer weights get updated during training.
last_layer_only: `bool`, optional (default = `True`)
When `True` (the default), only the final layer of the pretrained transformer is taken
for the embeddings. But if set to `False`, a scalar mix of all of the layers
is used.
gradient_checkpointing: `bool`, optional (default = `None`)
Enable or disable gradient checkpointing.
tokenizer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)
for `AutoTokenizer.from_pretrained`.
transformer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/modeling_utils.py#L253)
for `AutoModel.from_pretrained`.
""" # noqa: E501
authorized_missing_keys = [r"position_ids$"]
def __init__(
self,
model_name: str,
*,
max_length: int = None,
sub_module: str = None,
train_parameters: bool = True,
last_layer_only: bool = True,
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None,
gradient_checkpointing: Optional[bool] = None,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
transformer_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
from allennlp.common import cached_transformers
self.transformer_model = cached_transformers.get(
model_name,
True,
override_weights_file=override_weights_file,
override_weights_strip_prefix=override_weights_strip_prefix,
**(transformer_kwargs or {}),
)
if gradient_checkpointing is not None:
self.transformer_model.config.update({"gradient_checkpointing": gradient_checkpointing})
self.config = self.transformer_model.config
if sub_module:
assert hasattr(self.transformer_model, sub_module)
self.transformer_model = getattr(self.transformer_model, sub_module)
self._max_length = max_length
# I'm not sure if this works for all models; open an issue on github if you find a case
# where it doesn't work.
self.output_dim = self.config.hidden_size
self._scalar_mix: Optional[ScalarMix] = None
if not last_layer_only:
self._scalar_mix = ScalarMix(self.config.num_hidden_layers)
self.config.output_hidden_states = True
tokenizer = PretrainedTransformerTokenizer(
model_name,
tokenizer_kwargs=tokenizer_kwargs,
)
self._num_added_start_tokens = len(tokenizer.single_sequence_start_tokens)
self._num_added_end_tokens = len(tokenizer.single_sequence_end_tokens)
self._num_added_tokens = self._num_added_start_tokens + self._num_added_end_tokens
if not train_parameters:
for param in self.transformer_model.parameters():
param.requires_grad = False
@overrides
def get_output_dim(self):
return self.output_dim
def _number_of_token_type_embeddings(self):
if isinstance(self.config, XLNetConfig):
return 3 # XLNet has 3 type ids
elif hasattr(self.config, "type_vocab_size"):
return self.config.type_vocab_size
else:
return 0
@overrides
def forward(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
segment_concat_mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor: # type: ignore
"""
# Parameters
token_ids: `torch.LongTensor`
Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.
num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the
middle, e.g. the length of: "[CLS] A B C [SEP] [CLS] D E F [SEP]" (see indexer logic).
mask: `torch.BoolTensor`
Shape: [batch_size, num_wordpieces].
type_ids: `Optional[torch.LongTensor]`
Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.
segment_concat_mask: `Optional[torch.BoolTensor]`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
# Returns
`torch.Tensor`
Shape: `[batch_size, num_wordpieces, embedding_size]`.
"""
# Some of the huggingface transformers don't support type ids at all and crash when you supply
# them. For others, you can supply a tensor of zeros, and if you don't, they act as if you did.
# There is no practical difference to the caller, so here we pretend that one case is the same
# as another case.
if type_ids is not None:
max_type_id = type_ids.max()
if max_type_id == 0:
type_ids = None
else:
if max_type_id >= self._number_of_token_type_embeddings():
raise ValueError("Found type ids too large for the chosen transformer model.")
assert token_ids.shape == type_ids.shape
fold_long_sequences = self._max_length is not None and token_ids.size(1) > self._max_length
if fold_long_sequences:
batch_size, num_segment_concat_wordpieces = token_ids.size()
token_ids, segment_concat_mask, type_ids = self._fold_long_sequences(
token_ids, segment_concat_mask, type_ids
)
transformer_mask = segment_concat_mask if self._max_length is not None else mask
assert transformer_mask is not None
# Shape: [batch_size, num_wordpieces, embedding_size],
# or if self._max_length is not None:
# [batch_size * num_segments, self._max_length, embedding_size]
# We call this with kwargs because some of the huggingface models don't have the
# token_type_ids parameter and fail even when it's given as None.
# Also, as of transformers v2.5.1, they are taking FloatTensor masks.
parameters = {"input_ids": token_ids, "attention_mask": transformer_mask.float()}
if type_ids is not None:
parameters["token_type_ids"] = type_ids
transformer_output = self.transformer_model(**parameters)
if self._scalar_mix is not None:
# As far as I can tell, the hidden states will always be the last element
# in the output tuple as long as the model is not also configured to return
# attention scores.
# See, for example, the return value description for BERT:
# https://huggingface.co/transformers/model_doc/bert.html#transformers.BertModel.forward
# These hidden states will also include the embedding layer, which we don't
# include in the scalar mix. Hence the `[1:]` slicing.
hidden_states = transformer_output[-1][1:]
embeddings = self._scalar_mix(hidden_states)
else:
embeddings = transformer_output[0]
if fold_long_sequences:
embeddings = self._unfold_long_sequences(
embeddings, segment_concat_mask, batch_size, num_segment_concat_wordpieces
)
return embeddings
def _fold_long_sequences(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
) -> Tuple[torch.LongTensor, torch.LongTensor, Optional[torch.LongTensor]]:
"""
We fold 1D sequences (for each element in batch), returned by `PretrainedTransformerIndexer`
that are in reality multiple segments concatenated together, to 2D tensors, e.g.
[ [CLS] A B C [SEP] [CLS] D E [SEP] ]
-> [ [ [CLS] A B C [SEP] ], [ [CLS] D E [SEP] [PAD] ] ]
The [PAD] positions can be found in the returned `mask`.
# Parameters
token_ids: `torch.LongTensor`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the
middle, i.e. the length of: "[CLS] A B C [SEP] [CLS] D E F [SEP]" (see indexer logic).
mask: `torch.BoolTensor`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`
in `forward()`.
type_ids: `Optional[torch.LongTensor]`
Shape: [batch_size, num_segment_concat_wordpieces].
# Returns:
token_ids: `torch.LongTensor`
Shape: [batch_size * num_segments, self._max_length].
mask: `torch.BoolTensor`
Shape: [batch_size * num_segments, self._max_length].
"""
num_segment_concat_wordpieces = token_ids.size(1)
num_segments = math.ceil(num_segment_concat_wordpieces / self._max_length) # type: ignore
padded_length = num_segments * self._max_length # type: ignore
length_to_pad = padded_length - num_segment_concat_wordpieces
def fold(tensor): # Shape: [batch_size, num_segment_concat_wordpieces]
# Shape: [batch_size, num_segments * self._max_length]
tensor = F.pad(tensor, [0, length_to_pad], value=0)
# Shape: [batch_size * num_segments, self._max_length]
return tensor.reshape(-1, self._max_length)
return fold(token_ids), fold(mask), fold(type_ids) if type_ids is not None else None
def _unfold_long_sequences(
self,
embeddings: torch.FloatTensor,
mask: torch.BoolTensor,
batch_size: int,
num_segment_concat_wordpieces: int,
) -> torch.FloatTensor:
"""
We take 2D segments of a long sequence and flatten them out to get the whole sequence
representation while remove unnecessary special tokens.
[ [ [CLS]_emb A_emb B_emb C_emb [SEP]_emb ], [ [CLS]_emb D_emb E_emb [SEP]_emb [PAD]_emb ] ]
-> [ [CLS]_emb A_emb B_emb C_emb D_emb E_emb [SEP]_emb ]
We truncate the start and end tokens for all segments, recombine the segments,
and manually add back the start and end tokens.
# Parameters
embeddings: `torch.FloatTensor`
Shape: [batch_size * num_segments, self._max_length, embedding_size].
mask: `torch.BoolTensor`
Shape: [batch_size * num_segments, self._max_length].
The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`
in `forward()`.
batch_size: `int`
num_segment_concat_wordpieces: `int`
The length of the original "[ [CLS] A B C [SEP] [CLS] D E F [SEP] ]", i.e.
the original `token_ids.size(1)`.
# Returns:
embeddings: `torch.FloatTensor`
Shape: [batch_size, self._num_wordpieces, embedding_size].
"""
def lengths_to_mask(lengths, max_len, device):
return torch.arange(max_len, device=device).expand(
lengths.size(0), max_len
) < lengths.unsqueeze(1)
device = embeddings.device
num_segments = int(embeddings.size(0) / batch_size)
embedding_size = embeddings.size(2)
# We want to remove all segment-level special tokens but maintain sequence-level ones
num_wordpieces = num_segment_concat_wordpieces - (num_segments - 1) * self._num_added_tokens
embeddings = embeddings.reshape(
batch_size, num_segments * self._max_length, embedding_size # type: ignore
)
mask = mask.reshape(batch_size, num_segments * self._max_length) # type: ignore
# We assume that all 1s in the mask precede all 0s, and add an assert for that.
# Open an issue on GitHub if this breaks for you.
# Shape: (batch_size,)
seq_lengths = mask.sum(-1)
if not (lengths_to_mask(seq_lengths, mask.size(1), device) == mask).all():
raise ValueError(
"Long sequence splitting only supports masks with all 1s preceding all 0s."
)
# Shape: (batch_size, self._num_added_end_tokens); this is a broadcast op
end_token_indices = (
seq_lengths.unsqueeze(-1) - torch.arange(self._num_added_end_tokens, device=device) - 1
)
# Shape: (batch_size, self._num_added_start_tokens, embedding_size)
start_token_embeddings = embeddings[:, : self._num_added_start_tokens, :]
# Shape: (batch_size, self._num_added_end_tokens, embedding_size)
end_token_embeddings = batched_index_select(embeddings, end_token_indices)
embeddings = embeddings.reshape(batch_size, num_segments, self._max_length, embedding_size)
embeddings = embeddings[
:, :, self._num_added_start_tokens : embeddings.size(2) - self._num_added_end_tokens, :
] # truncate segment-level start/end tokens
embeddings = embeddings.reshape(batch_size, -1, embedding_size) # flatten
# Now try to put end token embeddings back which is a little tricky.
# The number of segment each sequence spans, excluding padding. Mimicking ceiling operation.
# Shape: (batch_size,)
num_effective_segments = (seq_lengths + self._max_length - 1) // self._max_length
# The number of indices that end tokens should shift back.
num_removed_non_end_tokens = (
num_effective_segments * self._num_added_tokens - self._num_added_end_tokens
)
# Shape: (batch_size, self._num_added_end_tokens)
end_token_indices -= num_removed_non_end_tokens.unsqueeze(-1)
assert (end_token_indices >= self._num_added_start_tokens).all()
# Add space for end embeddings
embeddings = torch.cat([embeddings, torch.zeros_like(end_token_embeddings)], 1)
# Add end token embeddings back
embeddings.scatter_(
1, end_token_indices.unsqueeze(-1).expand_as(end_token_embeddings), end_token_embeddings
)
# Now put back start tokens. We can do this before putting back end tokens, but then
# we need to change `num_removed_non_end_tokens` a little.
embeddings = torch.cat([start_token_embeddings, embeddings], 1)
# Truncate to original length
embeddings = embeddings[:, :num_wordpieces, :]
return embeddings
| allennlp-master | allennlp/modules/token_embedders/pretrained_transformer_embedder.py |
from typing import Optional, Dict, Any
from overrides import overrides
import torch
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder, TokenEmbedder
from allennlp.nn import util
@TokenEmbedder.register("pretrained_transformer_mismatched")
class PretrainedTransformerMismatchedEmbedder(TokenEmbedder):
"""
Use this embedder to embed wordpieces given by `PretrainedTransformerMismatchedIndexer`
and to pool the resulting vectors to get word-level representations.
Registered as a `TokenEmbedder` with name "pretrained_transformer_mismatched".
# Parameters
model_name : `str`
The name of the `transformers` model to use. Should be the same as the corresponding
`PretrainedTransformerMismatchedIndexer`.
max_length : `int`, optional (default = `None`)
If positive, folds input token IDs into multiple segments of this length, pass them
through the transformer model independently, and concatenate the final representations.
Should be set to the same value as the `max_length` option on the
`PretrainedTransformerMismatchedIndexer`.
train_parameters: `bool`, optional (default = `True`)
If this is `True`, the transformer weights get updated during training.
last_layer_only: `bool`, optional (default = `True`)
When `True` (the default), only the final layer of the pretrained transformer is taken
for the embeddings. But if set to `False`, a scalar mix of all of the layers
is used.
gradient_checkpointing: `bool`, optional (default = `None`)
Enable or disable gradient checkpointing.
tokenizer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)
for `AutoTokenizer.from_pretrained`.
transformer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/modeling_utils.py#L253)
for `AutoModel.from_pretrained`.
""" # noqa: E501
def __init__(
self,
model_name: str,
max_length: int = None,
train_parameters: bool = True,
last_layer_only: bool = True,
gradient_checkpointing: Optional[bool] = None,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
transformer_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
# The matched version v.s. mismatched
self._matched_embedder = PretrainedTransformerEmbedder(
model_name,
max_length=max_length,
train_parameters=train_parameters,
last_layer_only=last_layer_only,
gradient_checkpointing=gradient_checkpointing,
tokenizer_kwargs=tokenizer_kwargs,
transformer_kwargs=transformer_kwargs,
)
@overrides
def get_output_dim(self):
return self._matched_embedder.get_output_dim()
@overrides
def forward(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
offsets: torch.LongTensor,
wordpiece_mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
segment_concat_mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor: # type: ignore
"""
# Parameters
token_ids: `torch.LongTensor`
Shape: [batch_size, num_wordpieces] (for exception see `PretrainedTransformerEmbedder`).
mask: `torch.BoolTensor`
Shape: [batch_size, num_orig_tokens].
offsets: `torch.LongTensor`
Shape: [batch_size, num_orig_tokens, 2].
Maps indices for the original tokens, i.e. those given as input to the indexer,
to a span in token_ids. `token_ids[i][offsets[i][j][0]:offsets[i][j][1] + 1]`
corresponds to the original j-th token from the i-th batch.
wordpiece_mask: `torch.BoolTensor`
Shape: [batch_size, num_wordpieces].
type_ids: `Optional[torch.LongTensor]`
Shape: [batch_size, num_wordpieces].
segment_concat_mask: `Optional[torch.BoolTensor]`
See `PretrainedTransformerEmbedder`.
# Returns
`torch.Tensor`
Shape: [batch_size, num_orig_tokens, embedding_size].
"""
# Shape: [batch_size, num_wordpieces, embedding_size].
embeddings = self._matched_embedder(
token_ids, wordpiece_mask, type_ids=type_ids, segment_concat_mask=segment_concat_mask
)
# span_embeddings: (batch_size, num_orig_tokens, max_span_length, embedding_size)
# span_mask: (batch_size, num_orig_tokens, max_span_length)
span_embeddings, span_mask = util.batched_span_select(embeddings.contiguous(), offsets)
span_mask = span_mask.unsqueeze(-1)
span_embeddings *= span_mask # zero out paddings
span_embeddings_sum = span_embeddings.sum(2)
span_embeddings_len = span_mask.sum(2)
# Shape: (batch_size, num_orig_tokens, embedding_size)
orig_embeddings = span_embeddings_sum / torch.clamp_min(span_embeddings_len, 1)
# All the places where the span length is zero, write in zeros.
orig_embeddings[(span_embeddings_len == 0).expand(orig_embeddings.shape)] = 0
return orig_embeddings
| allennlp-master | allennlp/modules/token_embedders/pretrained_transformer_mismatched_embedder.py |
import io
import itertools
import logging
import re
import tarfile
import warnings
import zipfile
from typing import Any, cast, Iterator, NamedTuple, Optional, Sequence, Tuple, BinaryIO
import numpy
import torch
from overrides import overrides
from torch.nn.functional import embedding
from allennlp.common import Tqdm
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path, get_file_extension, is_url_or_existing_file
from allennlp.data import Vocabulary
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn import util
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
logger = logging.getLogger(__name__)
@TokenEmbedder.register("embedding")
class Embedding(TokenEmbedder):
"""
A more featureful embedding module than the default in Pytorch. Adds the ability to:
1. embed higher-order inputs
2. pre-specify the weight matrix
3. use a non-trainable embedding
4. project the resultant embeddings to some other dimension (which only makes sense with
non-trainable embeddings).
Note that if you are using our data API and are trying to embed a
[`TextField`](../../data/fields/text_field.md), you should use a
[`TextFieldEmbedder`](../text_field_embedders/text_field_embedder.md) instead of using this directly.
Registered as a `TokenEmbedder` with name "embedding".
# Parameters
num_embeddings : `int`
Size of the dictionary of embeddings (vocabulary size).
embedding_dim : `int`
The size of each embedding vector.
projection_dim : `int`, optional (default=`None`)
If given, we add a projection layer after the embedding layer. This really only makes
sense if `trainable` is `False`.
weight : `torch.FloatTensor`, optional (default=`None`)
A pre-initialised weight matrix for the embedding lookup, allowing the use of
pretrained vectors.
padding_index : `int`, optional (default=`None`)
If given, pads the output with zeros whenever it encounters the index.
trainable : `bool`, optional (default=`True`)
Whether or not to optimize the embedding parameters.
max_norm : `float`, optional (default=`None`)
If given, will renormalize the embeddings to always have a norm lesser than this
norm_type : `float`, optional (default=`2`)
The p of the p-norm to compute for the max_norm option
scale_grad_by_freq : `bool`, optional (default=`False`)
If given, this will scale gradients by the frequency of the words in the mini-batch.
sparse : `bool`, optional (default=`False`)
Whether or not the Pytorch backend should use a sparse representation of the embedding weight.
vocab_namespace : `str`, optional (default=`None`)
In case of fine-tuning/transfer learning, the model's embedding matrix needs to be
extended according to the size of extended-vocabulary. To be able to know how much to
extend the embedding-matrix, it's necessary to know which vocab_namspace was used to
construct it in the original training. We store vocab_namespace used during the original
training as an attribute, so that it can be retrieved during fine-tuning.
pretrained_file : `str`, optional (default=`None`)
Path to a file of word vectors to initialize the embedding matrix. It can be the
path to a local file or a URL of a (cached) remote file. Two formats are supported:
* hdf5 file - containing an embedding matrix in the form of a torch.Tensor;
* text file - an utf-8 encoded text file with space separated fields.
vocab : `Vocabulary`, optional (default = `None`)
Used to construct an embedding from a pretrained file.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"embedding", it gets specified as a top-level parameter, then is passed in to this module
separately.
# Returns
An Embedding module.
"""
def __init__(
self,
embedding_dim: int,
num_embeddings: int = None,
projection_dim: int = None,
weight: torch.FloatTensor = None,
padding_index: int = None,
trainable: bool = True,
max_norm: float = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
vocab_namespace: str = "tokens",
pretrained_file: str = None,
vocab: Vocabulary = None,
) -> None:
super().__init__()
if num_embeddings is None and vocab is None:
raise ConfigurationError(
"Embedding must be constructed with either num_embeddings or a vocabulary."
)
_vocab_namespace: Optional[str] = vocab_namespace
if num_embeddings is None:
num_embeddings = vocab.get_vocab_size(_vocab_namespace) # type: ignore
else:
# If num_embeddings is present, set default namespace to None so that extend_vocab
# call doesn't misinterpret that some namespace was originally used.
_vocab_namespace = None # type: ignore
self.num_embeddings = num_embeddings
self.padding_index = padding_index
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self._vocab_namespace = _vocab_namespace
self._pretrained_file = pretrained_file
self.output_dim = projection_dim or embedding_dim
if weight is not None and pretrained_file:
raise ConfigurationError(
"Embedding was constructed with both a weight and a pretrained file."
)
elif pretrained_file is not None:
if vocab is None:
raise ConfigurationError(
"To construct an Embedding from a pretrained file, you must also pass a vocabulary."
)
# If we're loading a saved model, we don't want to actually read a pre-trained
# embedding file - the embeddings will just be in our saved weights, and we might not
# have the original embedding file anymore, anyway.
# TODO: having to pass tokens here is SUPER gross, but otherwise this breaks the
# extend_vocab method, which relies on the value of vocab_namespace being None
# to infer at what stage the embedding has been constructed. Phew.
weight = _read_pretrained_embeddings_file(
pretrained_file, embedding_dim, vocab, vocab_namespace
)
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
elif weight is not None:
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
else:
weight = torch.FloatTensor(num_embeddings, embedding_dim)
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
torch.nn.init.xavier_uniform_(self.weight)
# Whatever way we have constructed the embedding, it should be consistent with
# num_embeddings and embedding_dim.
if self.weight.size() != (num_embeddings, embedding_dim):
raise ConfigurationError(
"A weight matrix was passed with contradictory embedding shapes."
)
if self.padding_index is not None:
self.weight.data[self.padding_index].fill_(0)
if projection_dim:
self._projection = torch.nn.Linear(embedding_dim, projection_dim)
else:
self._projection = None
@overrides
def get_output_dim(self) -> int:
return self.output_dim
@overrides
def forward(self, tokens: torch.Tensor) -> torch.Tensor:
# tokens may have extra dimensions (batch_size, d1, ..., dn, sequence_length),
# but embedding expects (batch_size, sequence_length), so pass tokens to
# util.combine_initial_dims (which is a no-op if there are no extra dimensions).
# Remember the original size.
original_size = tokens.size()
tokens = util.combine_initial_dims(tokens)
embedded = embedding(
tokens,
self.weight,
padding_idx=self.padding_index,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse,
)
# Now (if necessary) add back in the extra dimensions.
embedded = util.uncombine_initial_dims(embedded, original_size)
if self._projection:
projection = self._projection
for _ in range(embedded.dim() - 2):
projection = TimeDistributed(projection)
embedded = projection(embedded)
return embedded
def extend_vocab(
self,
extended_vocab: Vocabulary,
vocab_namespace: str = None,
extension_pretrained_file: str = None,
model_path: str = None,
):
"""
Extends the embedding matrix according to the extended vocabulary.
If extension_pretrained_file is available, it will be used for initializing the new words
embeddings in the extended vocabulary; otherwise we will check if _pretrained_file attribute
is already available. If none is available, they will be initialized with xavier uniform.
# Parameters
extended_vocab : `Vocabulary`
Vocabulary extended from original vocabulary used to construct
this `Embedding`.
vocab_namespace : `str`, (optional, default=`None`)
In case you know what vocab_namespace should be used for extension, you
can pass it. If not passed, it will check if vocab_namespace used at the
time of `Embedding` construction is available. If so, this namespace
will be used or else extend_vocab will be a no-op.
extension_pretrained_file : `str`, (optional, default=`None`)
A file containing pretrained embeddings can be specified here. It can be
the path to a local file or an URL of a (cached) remote file. Check format
details in `from_params` of `Embedding` class.
model_path : `str`, (optional, default=`None`)
Path traversing the model attributes upto this embedding module.
Eg. "_text_field_embedder.token_embedder_tokens". This is only useful
to give a helpful error message when extend_vocab is implicitly called
by train or any other command.
"""
# Caveat: For allennlp v0.8.1 and below, we weren't storing vocab_namespace as an attribute,
# knowing which is necessary at time of embedding vocab extension. So old archive models are
# currently unextendable.
vocab_namespace = vocab_namespace or self._vocab_namespace
if not vocab_namespace:
# It's not safe to default to "tokens" or any other namespace.
logger.info(
"Loading a model trained before embedding extension was implemented; "
"pass an explicit vocab namespace if you want to extend the vocabulary."
)
return
extended_num_embeddings = extended_vocab.get_vocab_size(vocab_namespace)
if extended_num_embeddings == self.num_embeddings:
# It's already been extended. No need to initialize / read pretrained file in first place (no-op)
return
if extended_num_embeddings < self.num_embeddings:
raise ConfigurationError(
f"Size of namespace, {vocab_namespace} for extended_vocab is smaller than "
f"embedding. You likely passed incorrect vocab or namespace for extension."
)
# Case 1: user passed extension_pretrained_file and it's available.
if extension_pretrained_file and is_url_or_existing_file(extension_pretrained_file):
# Don't have to do anything here, this is the happy case.
pass
# Case 2: user passed extension_pretrained_file and it's not available
elif extension_pretrained_file:
raise ConfigurationError(
f"You passed pretrained embedding file {extension_pretrained_file} "
f"for model_path {model_path} but it's not available."
)
# Case 3: user didn't pass extension_pretrained_file, but pretrained_file attribute was
# saved during training and is available.
elif is_url_or_existing_file(self._pretrained_file):
extension_pretrained_file = self._pretrained_file
# Case 4: no file is available, hope that pretrained embeddings weren't used in the first place and warn
elif self._pretrained_file is not None:
# Warn here instead of an exception to allow a fine-tuning even without the original pretrained_file
logger.warning(
f"Embedding at model_path, {model_path} cannot locate the pretrained_file. "
f"Originally pretrained_file was at '{self._pretrained_file}'."
)
else:
# When loading a model from archive there is no way to distinguish between whether a pretrained-file
# was or wasn't used during the original training. So we leave an info here.
logger.info(
"If you are fine-tuning and want to use a pretrained_file for "
"embedding extension, please pass the mapping by --embedding-sources argument."
)
embedding_dim = self.weight.data.shape[-1]
if not extension_pretrained_file:
extra_num_embeddings = extended_num_embeddings - self.num_embeddings
extra_weight = torch.FloatTensor(extra_num_embeddings, embedding_dim)
torch.nn.init.xavier_uniform_(extra_weight)
else:
# It's easiest to just reload the embeddings for the entire vocab,
# then only keep the ones we need.
whole_weight = _read_pretrained_embeddings_file(
extension_pretrained_file, embedding_dim, extended_vocab, vocab_namespace
)
extra_weight = whole_weight[self.num_embeddings :, :]
device = self.weight.data.device
extended_weight = torch.cat([self.weight.data, extra_weight.to(device)], dim=0)
self.weight = torch.nn.Parameter(extended_weight, requires_grad=self.weight.requires_grad)
self.num_embeddings = extended_num_embeddings
def _read_pretrained_embeddings_file(
file_uri: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens"
) -> torch.FloatTensor:
"""
Returns and embedding matrix for the given vocabulary using the pretrained embeddings
contained in the given file. Embeddings for tokens not found in the pretrained embedding file
are randomly initialized using a normal distribution with mean and standard deviation equal to
those of the pretrained embeddings.
We support two file formats:
* text format - utf-8 encoded text file with space separated fields: [word] [dim 1] [dim 2] ...
The text file can eventually be compressed, and even resides in an archive with multiple files.
If the file resides in an archive with other files, then `embeddings_filename` must
be a URI "(archive_uri)#file_path_inside_the_archive"
* hdf5 format - hdf5 file containing an embedding matrix in the form of a torch.Tensor.
If the filename ends with '.hdf5' or '.h5' then we load from hdf5, otherwise we assume
text format.
# Parameters
file_uri : `str`, required.
It can be:
* a file system path or a URL of an eventually compressed text file or a zip/tar archive
containing a single file.
* URI of the type `(archive_path_or_url)#file_path_inside_archive` if the text file
is contained in a multi-file archive.
vocab : `Vocabulary`, required.
A Vocabulary object.
namespace : `str`, (optional, default=`"tokens"`)
The namespace of the vocabulary to find pretrained embeddings for.
trainable : `bool`, (optional, default=`True`)
Whether or not the embedding parameters should be optimized.
# Returns
A weight matrix with embeddings initialized from the read file. The matrix has shape
`(vocab.get_vocab_size(namespace), embedding_dim)`, where the indices of words appearing in
the pretrained embedding file are initialized to the pretrained embedding value.
"""
file_ext = get_file_extension(file_uri)
if file_ext in [".h5", ".hdf5"]:
return _read_embeddings_from_hdf5(file_uri, embedding_dim, vocab, namespace)
return _read_embeddings_from_text_file(file_uri, embedding_dim, vocab, namespace)
def _read_embeddings_from_text_file(
file_uri: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens"
) -> torch.FloatTensor:
"""
Read pre-trained word vectors from an eventually compressed text file, possibly contained
inside an archive with multiple files. The text file is assumed to be utf-8 encoded with
space-separated fields: [word] [dim 1] [dim 2] ...
Lines that contain more numerical tokens than `embedding_dim` raise a warning and are skipped.
The remainder of the docstring is identical to `_read_pretrained_embeddings_file`.
"""
tokens_to_keep = set(vocab.get_index_to_token_vocabulary(namespace).values())
vocab_size = vocab.get_vocab_size(namespace)
embeddings = {}
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading pretrained embeddings from file")
with EmbeddingsTextFile(file_uri) as embeddings_file:
for line in Tqdm.tqdm(embeddings_file):
token = line.split(" ", 1)[0]
if token in tokens_to_keep:
fields = line.rstrip().split(" ")
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
logger.warning(
"Found line with wrong number of dimensions (expected: %d; actual: %d): %s",
embedding_dim,
len(fields) - 1,
line,
)
continue
vector = numpy.asarray(fields[1:], dtype="float32")
embeddings[token] = vector
if not embeddings:
raise ConfigurationError(
"No embeddings of correct dimension found; you probably "
"misspecified your embedding_dim parameter, or didn't "
"pre-populate your Vocabulary"
)
all_embeddings = numpy.asarray(list(embeddings.values()))
embeddings_mean = float(numpy.mean(all_embeddings))
embeddings_std = float(numpy.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(
embeddings_mean, embeddings_std
)
num_tokens_found = 0
index_to_token = vocab.get_index_to_token_vocabulary(namespace)
for i in range(vocab_size):
token = index_to_token[i]
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[token])
num_tokens_found += 1
else:
logger.debug(
"Token %s was not found in the embedding file. Initialising randomly.", token
)
logger.info(
"Pretrained embeddings were found for %d out of %d tokens", num_tokens_found, vocab_size
)
return embedding_matrix
def _read_embeddings_from_hdf5(
embeddings_filename: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens"
) -> torch.FloatTensor:
"""
Reads from a hdf5 formatted file. The embedding matrix is assumed to
be keyed by 'embedding' and of size `(num_tokens, embedding_dim)`.
"""
with h5py.File(embeddings_filename, "r") as fin:
embeddings = fin["embedding"][...]
if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]:
raise ConfigurationError(
"Read shape {0} embeddings from the file, but expected {1}".format(
list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim]
)
)
return torch.FloatTensor(embeddings)
def format_embeddings_file_uri(
main_file_path_or_url: str, path_inside_archive: Optional[str] = None
) -> str:
if path_inside_archive:
return "({})#{}".format(main_file_path_or_url, path_inside_archive)
return main_file_path_or_url
class EmbeddingsFileURI(NamedTuple):
main_file_uri: str
path_inside_archive: Optional[str] = None
def parse_embeddings_file_uri(uri: str) -> "EmbeddingsFileURI":
match = re.fullmatch(r"\((.*)\)#(.*)", uri)
if match:
fields = cast(Tuple[str, str], match.groups())
return EmbeddingsFileURI(*fields)
else:
return EmbeddingsFileURI(uri, None)
class EmbeddingsTextFile(Iterator[str]):
"""
Utility class for opening embeddings text files. Handles various compression formats,
as well as context management.
# Parameters
file_uri : `str`
It can be:
* a file system path or a URL of an eventually compressed text file or a zip/tar archive
containing a single file.
* URI of the type `(archive_path_or_url)#file_path_inside_archive` if the text file
is contained in a multi-file archive.
encoding : `str`
cache_dir : `str`
"""
DEFAULT_ENCODING = "utf-8"
def __init__(
self, file_uri: str, encoding: str = DEFAULT_ENCODING, cache_dir: str = None
) -> None:
self.uri = file_uri
self._encoding = encoding
self._cache_dir = cache_dir
self._archive_handle: Any = None # only if the file is inside an archive
main_file_uri, path_inside_archive = parse_embeddings_file_uri(file_uri)
main_file_local_path = cached_path(main_file_uri, cache_dir=cache_dir)
if zipfile.is_zipfile(main_file_local_path): # ZIP archive
self._open_inside_zip(main_file_uri, path_inside_archive)
elif tarfile.is_tarfile(main_file_local_path): # TAR archive
self._open_inside_tar(main_file_uri, path_inside_archive)
else: # all the other supported formats, including uncompressed files
if path_inside_archive:
raise ValueError("Unsupported archive format: %s" + main_file_uri)
# All the python packages for compressed files share the same interface of io.open
extension = get_file_extension(main_file_uri)
# Some systems don't have support for all of these libraries, so we import them only
# when necessary.
package = None
if extension in [".txt", ".vec"]:
package = io
elif extension == ".gz":
import gzip
package = gzip
elif extension == ".bz2":
import bz2
package = bz2
elif extension == ".lzma":
import lzma
package = lzma
if package is None:
logger.warning(
'The embeddings file has an unknown file extension "%s". '
"We will assume the file is an (uncompressed) text file",
extension,
)
package = io
self._handle = package.open( # type: ignore
main_file_local_path, "rt", encoding=encoding
)
# To use this with tqdm we'd like to know the number of tokens. It's possible that the
# first line of the embeddings file contains this: if it does, we want to start iteration
# from the 2nd line, otherwise we want to start from the 1st.
# Unfortunately, once we read the first line, we cannot move back the file iterator
# because the underlying file may be "not seekable"; we use itertools.chain instead.
first_line = next(self._handle) # this moves the iterator forward
self.num_tokens = EmbeddingsTextFile._get_num_tokens_from_first_line(first_line)
if self.num_tokens:
# the first line is a header line: start iterating from the 2nd line
self._iterator = self._handle
else:
# the first line is not a header line: start iterating from the 1st line
self._iterator = itertools.chain([first_line], self._handle)
def _open_inside_zip(self, archive_path: str, member_path: Optional[str] = None) -> None:
cached_archive_path = cached_path(archive_path, cache_dir=self._cache_dir)
archive = zipfile.ZipFile(cached_archive_path, "r")
if member_path is None:
members_list = archive.namelist()
member_path = self._get_the_only_file_in_the_archive(members_list, archive_path)
member_path = cast(str, member_path)
member_file = cast(BinaryIO, archive.open(member_path, "r"))
self._handle = io.TextIOWrapper(member_file, encoding=self._encoding)
self._archive_handle = archive
def _open_inside_tar(self, archive_path: str, member_path: Optional[str] = None) -> None:
cached_archive_path = cached_path(archive_path, cache_dir=self._cache_dir)
archive = tarfile.open(cached_archive_path, "r")
if member_path is None:
members_list = archive.getnames()
member_path = self._get_the_only_file_in_the_archive(members_list, archive_path)
member_path = cast(str, member_path)
member = archive.getmember(member_path) # raises exception if not present
member_file = cast(BinaryIO, archive.extractfile(member))
self._handle = io.TextIOWrapper(member_file, encoding=self._encoding)
self._archive_handle = archive
def read(self) -> str:
return "".join(self._iterator)
def readline(self) -> str:
return next(self._iterator)
def close(self) -> None:
self._handle.close()
if self._archive_handle:
self._archive_handle.close()
def __enter__(self) -> "EmbeddingsTextFile":
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close()
def __iter__(self) -> "EmbeddingsTextFile":
return self
def __next__(self) -> str:
return next(self._iterator)
def __len__(self) -> Optional[int]:
if self.num_tokens:
return self.num_tokens
raise AttributeError(
"an object of type EmbeddingsTextFile implements `__len__` only if the underlying "
"text file declares the number of tokens (i.e. the number of lines following)"
"in the first line. That is not the case of this particular instance."
)
@staticmethod
def _get_the_only_file_in_the_archive(members_list: Sequence[str], archive_path: str) -> str:
if len(members_list) > 1:
raise ValueError(
"The archive %s contains multiple files, so you must select "
"one of the files inside providing a uri of the type: %s."
% (
archive_path,
format_embeddings_file_uri("path_or_url_to_archive", "path_inside_archive"),
)
)
return members_list[0]
@staticmethod
def _get_num_tokens_from_first_line(line: str) -> Optional[int]:
"""This function takes in input a string and if it contains 1 or 2 integers, it assumes the
largest one it the number of tokens. Returns None if the line doesn't match that pattern."""
fields = line.split(" ")
if 1 <= len(fields) <= 2:
try:
int_fields = [int(x) for x in fields]
except ValueError:
return None
else:
num_tokens = max(int_fields)
logger.info(
"Recognized a header line in the embedding file with number of tokens: %d",
num_tokens,
)
return num_tokens
return None
| allennlp-master | allennlp/modules/token_embedders/embedding.py |
import torch
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("character_encoding")
class TokenCharactersEncoder(TokenEmbedder):
"""
A `TokenCharactersEncoder` takes the output of a
[`TokenCharactersIndexer`](../../data/token_indexers/token_characters_indexer.md), which is a tensor of shape
(batch_size, num_tokens, num_characters), embeds the characters, runs a token-level encoder, and
returns the result, which is a tensor of shape (batch_size, num_tokens, encoding_dim). We also
optionally apply dropout after the token-level encoder.
We take the embedding and encoding modules as input, so this class is itself quite simple.
Registered as a `TokenEmbedder` with name "character_encoding".
"""
def __init__(self, embedding: Embedding, encoder: Seq2VecEncoder, dropout: float = 0.0) -> None:
super().__init__()
self._embedding = TimeDistributed(embedding)
self._encoder = TimeDistributed(encoder)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
def get_output_dim(self) -> int:
return self._encoder._module.get_output_dim()
def forward(self, token_characters: torch.Tensor) -> torch.Tensor:
mask = (token_characters != 0).long()
return self._dropout(self._encoder(self._embedding(token_characters), mask))
| allennlp-master | allennlp/modules/token_embedders/token_characters_encoder.py |
"""
A `TokenEmbedder` is a `Module` that
embeds one-hot-encoded tokens as vectors.
"""
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.modules.token_embedders.token_characters_encoder import TokenCharactersEncoder
from allennlp.modules.token_embedders.elmo_token_embedder import ElmoTokenEmbedder
from allennlp.modules.token_embedders.empty_embedder import EmptyEmbedder
from allennlp.modules.token_embedders.bag_of_word_counts_token_embedder import (
BagOfWordCountsTokenEmbedder,
)
from allennlp.modules.token_embedders.pass_through_token_embedder import PassThroughTokenEmbedder
from allennlp.modules.token_embedders.pretrained_transformer_embedder import (
PretrainedTransformerEmbedder,
)
from allennlp.modules.token_embedders.pretrained_transformer_mismatched_embedder import (
PretrainedTransformerMismatchedEmbedder,
)
| allennlp-master | allennlp/modules/token_embedders/__init__.py |
import torch
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("pass_through")
class PassThroughTokenEmbedder(TokenEmbedder):
"""
Assumes that the input is already vectorized in some way,
and just returns it.
Registered as a `TokenEmbedder` with name "pass_through".
# Parameters
hidden_dim : `int`, required.
"""
def __init__(self, hidden_dim: int) -> None:
self.hidden_dim = hidden_dim
super().__init__()
def get_output_dim(self):
return self.hidden_dim
def forward(self, tokens: torch.Tensor) -> torch.Tensor:
return tokens
| allennlp-master | allennlp/modules/token_embedders/pass_through_token_embedder.py |
import torch
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("empty")
class EmptyEmbedder(TokenEmbedder):
"""
Assumes you want to completely ignore the output of a `TokenIndexer` for some reason, and does
not return anything when asked to embed it.
You should almost never need to use this; normally you would just not use a particular
`TokenIndexer`. It's only in very rare cases, like simplicity in data processing for language
modeling (where we use just one `TextField` to handle input embedding and computing target ids),
where you might want to use this.
Registered as a `TokenEmbedder` with name "empty".
"""
def __init__(self) -> None:
super().__init__()
def get_output_dim(self):
return 0
def forward(self, *inputs, **kwargs) -> torch.Tensor:
return None
| allennlp-master | allennlp/modules/token_embedders/empty_embedder.py |
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn.util import get_text_field_mask
@TokenEmbedder.register("bag_of_word_counts")
class BagOfWordCountsTokenEmbedder(TokenEmbedder):
"""
Represents a sequence of tokens as a bag of (discrete) word ids, as it was done
in the pre-neural days.
Each sequence gets a vector of length vocabulary size, where the i'th entry in the vector
corresponds to number of times the i'th token in the vocabulary appears in the sequence.
By default, we ignore padding tokens.
Registered as a `TokenEmbedder` with name "bag_of_word_counts".
# Parameters
vocab : `Vocabulary`
vocab_namespace : `str`, optional (default = `"tokens"`)
namespace of vocabulary to embed
projection_dim : `int`, optional (default = `None`)
if specified, will project the resulting bag of words representation
to specified dimension.
ignore_oov : `bool`, optional (default = `False`)
If true, we ignore the OOV token.
"""
def __init__(
self,
vocab: Vocabulary,
vocab_namespace: str = "tokens",
projection_dim: int = None,
ignore_oov: bool = False,
) -> None:
super().__init__()
self.vocab = vocab
self.vocab_size = vocab.get_vocab_size(vocab_namespace)
if projection_dim:
self._projection = torch.nn.Linear(self.vocab_size, projection_dim)
else:
self._projection = None
self._ignore_oov = ignore_oov
oov_token = vocab._oov_token
self._oov_idx = vocab.get_token_to_index_vocabulary(vocab_namespace).get(oov_token)
if self._oov_idx is None:
raise ConfigurationError(
"OOV token does not exist in vocabulary namespace {}".format(vocab_namespace)
)
self.output_dim = projection_dim or self.vocab_size
def get_output_dim(self):
return self.output_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`
Shape `(batch_size, timesteps, sequence_length)` of word ids
representing the current batch.
# Returns
`torch.Tensor`
The bag-of-words representations for the input sequence, shape
`(batch_size, vocab_size)`
"""
bag_of_words_vectors = []
mask = get_text_field_mask({"tokens": {"tokens": inputs}})
if self._ignore_oov:
# also mask out positions corresponding to oov
mask &= inputs != self._oov_idx
for document, doc_mask in zip(inputs, mask):
document = torch.masked_select(document, doc_mask)
vec = torch.bincount(document, minlength=self.vocab_size).float()
vec = vec.view(1, -1)
bag_of_words_vectors.append(vec)
bag_of_words_output = torch.cat(bag_of_words_vectors, 0)
if self._projection:
projection = self._projection
bag_of_words_output = projection(bag_of_words_output)
return bag_of_words_output
| allennlp-master | allennlp/modules/token_embedders/bag_of_word_counts_token_embedder.py |
from typing import List
import torch
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.elmo import Elmo
from allennlp.modules.time_distributed import TimeDistributed
@TokenEmbedder.register("elmo_token_embedder")
class ElmoTokenEmbedder(TokenEmbedder):
"""
Compute a single layer of ELMo representations.
This class serves as a convenience when you only want to use one layer of
ELMo representations at the input of your network. It's essentially a wrapper
around Elmo(num_output_representations=1, ...)
Registered as a `TokenEmbedder` with name "elmo_token_embedder".
# Parameters
options_file : `str`, required.
An ELMo JSON options file.
weight_file : `str`, required.
An ELMo hdf5 weight file.
do_layer_norm : `bool`, optional.
Should we apply layer normalization (passed to `ScalarMix`)?
dropout : `float`, optional, (default = `0.5`).
The dropout value to be applied to the ELMo representations.
requires_grad : `bool`, optional
If True, compute gradient of ELMo parameters for fine tuning.
projection_dim : `int`, optional
If given, we will project the ELMo embedding down to this dimension. We recommend that you
try using ELMo with a lot of dropout and no projection first, but we have found a few cases
where projection helps (particularly where there is very limited training data).
vocab_to_cache : `List[str]`, optional.
A list of words to pre-compute and cache character convolutions
for. If you use this option, the ElmoTokenEmbedder expects that you pass word
indices of shape (batch_size, timesteps) to forward, instead
of character indices. If you use this option and pass a word which
wasn't pre-cached, this will break.
scalar_mix_parameters : `List[int]`, optional, (default=`None`)
If not `None`, use these scalar mix parameters to weight the representations
produced by different layers. These mixing weights are not updated during
training. The mixing weights here should be the unnormalized (i.e., pre-softmax)
weights. So, if you wanted to use only the 1st layer of a 2-layer ELMo,
you can set this to [-9e10, 1, -9e10 ].
"""
def __init__(
self,
options_file: str = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/"
+ "elmo_2x4096_512_2048cnn_2xhighway_options.json",
weight_file: str = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/"
+ "elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5",
do_layer_norm: bool = False,
dropout: float = 0.5,
requires_grad: bool = False,
projection_dim: int = None,
vocab_to_cache: List[str] = None,
scalar_mix_parameters: List[float] = None,
) -> None:
super().__init__()
self._elmo = Elmo(
options_file,
weight_file,
1,
do_layer_norm=do_layer_norm,
dropout=dropout,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache,
scalar_mix_parameters=scalar_mix_parameters,
)
if projection_dim:
self._projection = torch.nn.Linear(self._elmo.get_output_dim(), projection_dim)
self.output_dim = projection_dim
else:
self._projection = None
self.output_dim = self._elmo.get_output_dim()
def get_output_dim(self) -> int:
return self.output_dim
def forward(self, elmo_tokens: torch.Tensor, word_inputs: torch.Tensor = None) -> torch.Tensor:
"""
# Parameters
elmo_tokens : `torch.Tensor`
Shape `(batch_size, timesteps, 50)` of character ids representing the current batch.
word_inputs : `torch.Tensor`, optional.
If you passed a cached vocab, you can in addition pass a tensor of shape
`(batch_size, timesteps)`, which represent word ids which have been pre-cached.
# Returns
`torch.Tensor`
The ELMo representations for the input sequence, shape
`(batch_size, timesteps, embedding_dim)`
"""
elmo_output = self._elmo(elmo_tokens, word_inputs)
elmo_representations = elmo_output["elmo_representations"][0]
if self._projection:
projection = self._projection
for _ in range(elmo_representations.dim() - 2):
projection = TimeDistributed(projection)
elmo_representations = projection(elmo_representations)
return elmo_representations
| allennlp-master | allennlp/modules/token_embedders/elmo_token_embedder.py |
import torch
from allennlp.common import Registrable
class TokenEmbedder(torch.nn.Module, Registrable):
"""
A `TokenEmbedder` is a `Module` that takes as input a tensor with integer ids that have
been output from a [`TokenIndexer`](/api/data/token_indexers/token_indexer.md) and outputs
a vector per token in the input. The input typically has shape `(batch_size, num_tokens)`
or `(batch_size, num_tokens, num_characters)`, and the output is of shape `(batch_size, num_tokens,
output_dim)`. The simplest `TokenEmbedder` is just an embedding layer, but for
character-level input, it could also be some kind of character encoder.
We add a single method to the basic `Module` API: `get_output_dim()`. This lets us
more easily compute output dimensions for the
[`TextFieldEmbedder`](/api/modules/text_field_embedders/text_field_embedder.md),
which we might need when defining model parameters such as LSTMs or linear layers, which need
to know their input dimension before the layers are called.
"""
default_implementation = "embedding"
def get_output_dim(self) -> int:
"""
Returns the final output dimension that this `TokenEmbedder` uses to represent each
token. This is `not` the shape of the returned tensor, but the last element of that shape.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/token_embedders/token_embedder.py |
from typing import Dict, MutableMapping, Mapping
from allennlp.data.fields.field import DataArray, Field
from allennlp.data.vocabulary import Vocabulary
class Instance(Mapping[str, Field]):
"""
An `Instance` is a collection of :class:`~allennlp.data.fields.field.Field` objects,
specifying the inputs and outputs to
some model. We don't make a distinction between inputs and outputs here, though - all
operations are done on all fields, and when we return arrays, we return them as dictionaries
keyed by field name. A model can then decide which fields it wants to use as inputs as which
as outputs.
The `Fields` in an `Instance` can start out either indexed or un-indexed. During the data
processing pipeline, all fields will be indexed, after which multiple instances can be combined
into a `Batch` and then converted into padded arrays.
# Parameters
fields : `Dict[str, Field]`
The `Field` objects that will be used to produce data arrays for this instance.
"""
__slots__ = ["fields", "indexed"]
def __init__(self, fields: MutableMapping[str, Field]) -> None:
self.fields = fields
self.indexed = False
# Add methods for `Mapping`. Note, even though the fields are
# mutable, we don't implement `MutableMapping` because we want
# you to use `add_field` and supply a vocabulary.
def __getitem__(self, key: str) -> Field:
return self.fields[key]
def __iter__(self):
return iter(self.fields)
def __len__(self) -> int:
return len(self.fields)
def add_field(self, field_name: str, field: Field, vocab: Vocabulary = None) -> None:
"""
Add the field to the existing fields mapping.
If we have already indexed the Instance, then we also index `field`, so
it is necessary to supply the vocab.
"""
self.fields[field_name] = field
if self.indexed and vocab is not None:
field.index(vocab)
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
"""
Increments counts in the given `counter` for all of the vocabulary items in all of the
`Fields` in this `Instance`.
"""
for field in self.fields.values():
field.count_vocab_items(counter)
def index_fields(self, vocab: Vocabulary) -> None:
"""
Indexes all fields in this `Instance` using the provided `Vocabulary`.
This `mutates` the current object, it does not return a new `Instance`.
A `DataLoader` will call this on each pass through a dataset; we use the `indexed`
flag to make sure that indexing only happens once.
This means that if for some reason you modify your vocabulary after you've
indexed your instances, you might get unexpected behavior.
"""
if not self.indexed:
self.indexed = True
for field in self.fields.values():
field.index(vocab)
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Returns a dictionary of padding lengths, keyed by field name. Each `Field` returns a
mapping from padding keys to actual lengths, and we just key that dictionary by field name.
"""
lengths = {}
for field_name, field in self.fields.items():
lengths[field_name] = field.get_padding_lengths()
return lengths
def as_tensor_dict(
self, padding_lengths: Dict[str, Dict[str, int]] = None
) -> Dict[str, DataArray]:
"""
Pads each `Field` in this instance to the lengths given in `padding_lengths` (which is
keyed by field name, then by padding key, the same as the return value in
:func:`get_padding_lengths`), returning a list of torch tensors for each field.
If `padding_lengths` is omitted, we will call `self.get_padding_lengths()` to get the
sizes of the tensors to create.
"""
padding_lengths = padding_lengths or self.get_padding_lengths()
tensors = {}
for field_name, field in self.fields.items():
tensors[field_name] = field.as_tensor(padding_lengths[field_name])
return tensors
def __str__(self) -> str:
base_string = "Instance with fields:\n"
return " ".join(
[base_string] + [f"\t {name}: {field} \n" for name, field in self.fields.items()]
)
def duplicate(self) -> "Instance":
new = Instance({k: field.duplicate() for k, field in self.fields.items()})
new.indexed = self.indexed
return new
| allennlp-master | allennlp/data/instance.py |
"""
A :class:`Batch` represents a collection of `Instance` s to be fed
through a model.
"""
import logging
from collections import defaultdict
from typing import Dict, Iterable, Iterator, List, Union
import numpy
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import ensure_list
from allennlp.data.instance import Instance
from allennlp.data.vocabulary import Vocabulary
logger = logging.getLogger(__name__)
class Batch(Iterable):
"""
A batch of Instances. In addition to containing the instances themselves,
it contains helper functions for converting the data into tensors.
A Batch just takes an iterable of instances in its constructor and hangs onto them
in a list.
"""
__slots__ = ["instances"]
def __init__(self, instances: Iterable[Instance]) -> None:
super().__init__()
self.instances = ensure_list(instances)
self._check_types()
def _check_types(self) -> None:
"""
Check that all the instances have the same types.
"""
all_instance_fields_and_types: List[Dict[str, str]] = [
{k: v.__class__.__name__ for k, v in x.fields.items()} for x in self.instances
]
# Check all the field names and Field types are the same for every instance.
if not all(all_instance_fields_and_types[0] == x for x in all_instance_fields_and_types):
raise ConfigurationError("You cannot construct a Batch with non-homogeneous Instances.")
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Gets the maximum padding lengths from all `Instances` in this batch. Each `Instance`
has multiple `Fields`, and each `Field` could have multiple things that need padding.
We look at all fields in all instances, and find the max values for each (field_name,
padding_key) pair, returning them in a dictionary.
This can then be used to convert this batch into arrays of consistent length, or to set
model parameters, etc.
"""
padding_lengths: Dict[str, Dict[str, int]] = defaultdict(dict)
all_instance_lengths: List[Dict[str, Dict[str, int]]] = [
instance.get_padding_lengths() for instance in self.instances
]
all_field_lengths: Dict[str, List[Dict[str, int]]] = defaultdict(list)
for instance_lengths in all_instance_lengths:
for field_name, instance_field_lengths in instance_lengths.items():
all_field_lengths[field_name].append(instance_field_lengths)
for field_name, field_lengths in all_field_lengths.items():
for padding_key in field_lengths[0].keys():
max_value = max(x.get(padding_key, 0) for x in field_lengths)
padding_lengths[field_name][padding_key] = max_value
return {**padding_lengths}
def as_tensor_dict(
self, padding_lengths: Dict[str, Dict[str, int]] = None, verbose: bool = False
) -> Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]]:
# This complex return type is actually predefined elsewhere as a DataArray,
# but we can't use it because mypy doesn't like it.
"""
This method converts this `Batch` into a set of pytorch Tensors that can be passed
through a model. In order for the tensors to be valid tensors, all `Instances` in this
batch need to be padded to the same lengths wherever padding is necessary, so we do that
first, then we combine all of the tensors for each field in each instance into a set of
batched tensors for each field.
# Parameters
padding_lengths : `Dict[str, Dict[str, int]]`
If a key is present in this dictionary with a non-`None` value, we will pad to that
length instead of the length calculated from the data. This lets you, e.g., set a
maximum value for sentence length if you want to throw out long sequences.
Entries in this dictionary are keyed first by field name (e.g., "question"), then by
padding key (e.g., "num_tokens").
verbose : `bool`, optional (default=`False`)
Should we output logging information when we're doing this padding? If the batch is
large, this is nice to have, because padding a large batch could take a long time.
But if you're doing this inside of a data generator, having all of this output per
batch is a bit obnoxious (and really slow).
# Returns
tensors : `Dict[str, DataArray]`
A dictionary of tensors, keyed by field name, suitable for passing as input to a model.
This is a `batch` of instances, so, e.g., if the instances have a "question" field and
an "answer" field, the "question" fields for all of the instances will be grouped
together into a single tensor, and the "answer" fields for all instances will be
similarly grouped in a parallel set of tensors, for batched computation. Additionally,
for complex `Fields`, the value of the dictionary key is not necessarily a single
tensor. For example, with the `TextField`, the output is a dictionary mapping
`TokenIndexer` keys to tensors. The number of elements in this sub-dictionary
therefore corresponds to the number of `TokenIndexers` used to index the
`TextField`. Each `Field` class is responsible for batching its own output.
"""
padding_lengths = padding_lengths or defaultdict(dict)
# First we need to decide _how much_ to pad. To do that, we find the max length for all
# relevant padding decisions from the instances themselves. Then we check whether we were
# given a max length for a particular field and padding key. If we were, we use that
# instead of the instance-based one.
if verbose:
logger.info(f"Padding batch of size {len(self.instances)} to lengths {padding_lengths}")
logger.info("Getting max lengths from instances")
instance_padding_lengths = self.get_padding_lengths()
if verbose:
logger.info(f"Instance max lengths: {instance_padding_lengths}")
lengths_to_use: Dict[str, Dict[str, int]] = defaultdict(dict)
for field_name, instance_field_lengths in instance_padding_lengths.items():
for padding_key in instance_field_lengths.keys():
if padding_key in padding_lengths[field_name]:
lengths_to_use[field_name][padding_key] = padding_lengths[field_name][
padding_key
]
else:
lengths_to_use[field_name][padding_key] = instance_field_lengths[padding_key]
# Now we actually pad the instances to tensors.
field_tensors: Dict[str, list] = defaultdict(list)
if verbose:
logger.info(f"Now actually padding instances to length: {lengths_to_use}")
for instance in self.instances:
for field, tensors in instance.as_tensor_dict(lengths_to_use).items():
field_tensors[field].append(tensors)
# Finally, we combine the tensors that we got for each instance into one big tensor (or set
# of tensors) per field. The `Field` classes themselves have the logic for batching the
# tensors together, so we grab a dictionary of field_name -> field class from the first
# instance in the batch.
field_classes = self.instances[0].fields
return {
field_name: field_classes[field_name].batch_tensors(field_tensor_list)
for field_name, field_tensor_list in field_tensors.items()
}
def __iter__(self) -> Iterator[Instance]:
return iter(self.instances)
def index_instances(self, vocab: Vocabulary) -> None:
for instance in self.instances:
instance.index_fields(vocab)
def print_statistics(self) -> None:
# Make sure if has been indexed first
sequence_field_lengths: Dict[str, List] = defaultdict(list)
for instance in self.instances:
if not instance.indexed:
raise ConfigurationError(
"Instances must be indexed with vocabulary "
"before asking to print dataset statistics."
)
for field, field_padding_lengths in instance.get_padding_lengths().items():
for key, value in field_padding_lengths.items():
sequence_field_lengths[f"{field}.{key}"].append(value)
print("\n\n----Dataset Statistics----\n")
for name, lengths in sequence_field_lengths.items():
print(f"Statistics for {name}:")
print(
f"\tLengths: Mean: {numpy.mean(lengths)}, Standard Dev: {numpy.std(lengths)}, "
f"Max: {numpy.max(lengths)}, Min: {numpy.min(lengths)}"
)
print("\n10 Random instances:")
for i in numpy.random.randint(len(self.instances), size=10):
print(f"Instance {i}:")
print(f"\t{self.instances[i]}")
| allennlp-master | allennlp/data/batch.py |
from allennlp.data.dataloader import DataLoader, PyTorchDataLoader, allennlp_collate
from allennlp.data.dataset_readers.dataset_reader import (
DatasetReader,
AllennlpDataset,
AllennlpLazyDataset,
)
from allennlp.data.fields.field import DataArray, Field
from allennlp.data.fields.text_field import TextFieldTensors
from allennlp.data.instance import Instance
from allennlp.data.samplers import BatchSampler, Sampler
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
from allennlp.data.tokenizers import Token, Tokenizer
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.batch import Batch
| allennlp-master | allennlp/data/__init__.py |
from typing import List, Dict, Union, Iterator
import torch
from torch.utils import data
from allennlp.common.registrable import Registrable
from allennlp.common.lazy import Lazy
from allennlp.data.instance import Instance
from allennlp.data.batch import Batch
from allennlp.data.samplers import Sampler, BatchSampler
TensorDict = Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]]
def allennlp_collate(instances: List[Instance]) -> TensorDict:
batch = Batch(instances)
return batch.as_tensor_dict(batch.get_padding_lengths())
class DataLoader(Registrable):
"""
A `DataLoader` is responsible for generating batches of instances from a `Dataset`,
or another source of data. This is essentially just an abstraction over `torch.utils.data.DataLoader`.
This class only has one required method, `__iter__()`, that creates an iterable
of `TensorDict`s. Additionally, this class comes with a `__len__()` method
that just raises a `TypeError` by default. When possible, this should be overriden
to return the number of batches that will be generated by the `__iter__()` method.
"""
default_implementation = "pytorch_dataloader"
def __len__(self) -> int:
raise TypeError
def __iter__(self) -> Iterator[TensorDict]:
raise NotImplementedError
@DataLoader.register("pytorch_dataloader", constructor="from_partial_objects")
class PyTorchDataLoader(data.DataLoader, DataLoader):
"""
A registrable version of the pytorch
[DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader).
Firstly, this class exists is so that we can construct a DataLoader
from a configuration file and have a different default `collate_fn`.
You can use this class directly in python code, but it is identical to using
pytorch dataloader with allennlp's custom collate function:
```
from torch.utils.data import DataLoader
from allennlp.data import allennlp_collate
# Construct a dataloader directly for a dataset which contains allennlp
# Instances which have _already_ been indexed.
my_loader = DataLoader(dataset, batch_size=32, collate_fn=allennlp_collate)
```
Secondly, this class adds a `batches_per_epoch` parameter which, if given, determines the number
of batches after which an epoch ends. If this is `None`, then an epoch is set to be one full pass
through your data. You might use this if you have a very large dataset and want more frequent
checkpoints and evaluations on validation data, for instance.
In a typical AllenNLP configuration file, the `dataset` parameter does not get an entry under
the "data_loader", it gets constructed separately.
"""
def __init__(
self,
dataset: data.Dataset,
batch_size: int = 1,
shuffle: bool = False,
sampler: Sampler = None,
batch_sampler: BatchSampler = None,
num_workers: int = 0,
# NOTE: The default for collate_fn is different from the normal `None`.
# We assume that if you are using this class you are using an
# allennlp dataset of instances, which would require this.
collate_fn=allennlp_collate,
pin_memory: bool = False,
drop_last: bool = False,
timeout: int = 0,
worker_init_fn=None,
multiprocessing_context: str = None,
batches_per_epoch: int = None,
):
super().__init__(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
multiprocessing_context=multiprocessing_context,
)
self._data_generator = super().__iter__()
self._batches_per_epoch = batches_per_epoch
def __len__(self):
if self._batches_per_epoch is not None:
return self._batches_per_epoch
return super().__len__()
def __iter__(self):
if self._batches_per_epoch is None:
# NOTE: since torch's DataLoader is listed as the first super class of this class,
# super().__iter__() will resolve to the __iter__ method from torch's DataLoader,
# which is what we want.
yield from super().__iter__()
else:
for i in range(self._batches_per_epoch):
try:
yield next(self._data_generator)
except StopIteration: # data_generator is exhausted
self._data_generator = super().__iter__() # so refresh it
yield next(self._data_generator) # and yield required instance
@classmethod
def from_partial_objects(
cls,
dataset: data.Dataset,
batch_size: int = 1,
shuffle: bool = False,
sampler: Lazy[Sampler] = None,
batch_sampler: Lazy[BatchSampler] = None,
num_workers: int = 0,
pin_memory: bool = False,
drop_last: bool = False,
timeout: int = 0,
worker_init_fn=None,
multiprocessing_context: str = None,
batches_per_epoch: int = None,
) -> "PyTorchDataLoader":
batch_sampler_ = (
None if batch_sampler is None else batch_sampler.construct(data_source=dataset)
)
sampler_ = None if sampler is None else sampler.construct(data_source=dataset)
return cls(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler_,
batch_sampler=batch_sampler_,
num_workers=num_workers,
# NOTE: The default for collate_fn is different from the normal `None`.
# We assume that if you are using this class you are using an
# allennlp dataset of instances, which would require this.
collate_fn=allennlp_collate,
pin_memory=pin_memory,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
multiprocessing_context=multiprocessing_context,
batches_per_epoch=batches_per_epoch,
)
| allennlp-master | allennlp/data/dataloader.py |
"""
A Vocabulary maps strings to integers, allowing for strings to be mapped to an
out-of-vocabulary token.
"""
import codecs
import copy
import logging
import os
import re
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Union, TYPE_CHECKING
from filelock import FileLock
from allennlp.common import Registrable
from allennlp.common.file_utils import cached_path
from allennlp.common.checks import ConfigurationError
from allennlp.common.tqdm import Tqdm
from allennlp.common.util import namespace_match
if TYPE_CHECKING:
from allennlp.data import instance as adi # noqa
logger = logging.getLogger(__name__)
DEFAULT_NON_PADDED_NAMESPACES = ("*tags", "*labels")
DEFAULT_PADDING_TOKEN = "@@PADDING@@"
DEFAULT_OOV_TOKEN = "@@UNKNOWN@@"
NAMESPACE_PADDING_FILE = "non_padded_namespaces.txt"
_NEW_LINE_REGEX = re.compile(r"\n|\r\n")
class _NamespaceDependentDefaultDict(defaultdict):
"""
This is a [defaultdict]
(https://docs.python.org/2/library/collections.html#collections.defaultdict) where the
default value is dependent on the key that is passed.
We use "namespaces" in the :class:`Vocabulary` object to keep track of several different
mappings from strings to integers, so that we have a consistent API for mapping words, tags,
labels, characters, or whatever else you want, into integers. The issue is that some of those
namespaces (words and characters) should have integers reserved for padding and
out-of-vocabulary tokens, while others (labels and tags) shouldn't. This class allows you to
specify filters on the namespace (the key used in the `defaultdict`), and use different
default values depending on whether the namespace passes the filter.
To do filtering, we take a set of `non_padded_namespaces`. This is a set of strings
that are either matched exactly against the keys, or treated as suffixes, if the
string starts with `*`. In other words, if `*tags` is in `non_padded_namespaces` then
`passage_tags`, `question_tags`, etc. (anything that ends with `tags`) will have the
`non_padded` default value.
# Parameters
non_padded_namespaces : `Iterable[str]`
A set / list / tuple of strings describing which namespaces are not padded. If a namespace
(key) is missing from this dictionary, we will use :func:`namespace_match` to see whether
the namespace should be padded. If the given namespace matches any of the strings in this
list, we will use `non_padded_function` to initialize the value for that namespace, and
we will use `padded_function` otherwise.
padded_function : `Callable[[], Any]`
A zero-argument function to call to initialize a value for a namespace that `should` be
padded.
non_padded_function : `Callable[[], Any]`
A zero-argument function to call to initialize a value for a namespace that should `not` be
padded.
"""
def __init__(
self,
non_padded_namespaces: Iterable[str],
padded_function: Callable[[], Any],
non_padded_function: Callable[[], Any],
) -> None:
self._non_padded_namespaces = set(non_padded_namespaces)
self._padded_function = padded_function
self._non_padded_function = non_padded_function
super().__init__()
def __missing__(self, key: str):
if any(namespace_match(pattern, key) for pattern in self._non_padded_namespaces):
value = self._non_padded_function()
else:
value = self._padded_function()
dict.__setitem__(self, key, value)
return value
def add_non_padded_namespaces(self, non_padded_namespaces: Set[str]):
# add non_padded_namespaces which weren't already present
self._non_padded_namespaces.update(non_padded_namespaces)
class _TokenToIndexDefaultDict(_NamespaceDependentDefaultDict):
def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
super().__init__(
non_padded_namespaces, lambda: {padding_token: 0, oov_token: 1}, lambda: {}
)
class _IndexToTokenDefaultDict(_NamespaceDependentDefaultDict):
def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
super().__init__(
non_padded_namespaces, lambda: {0: padding_token, 1: oov_token}, lambda: {}
)
def _read_pretrained_tokens(embeddings_file_uri: str) -> List[str]:
# Moving this import to the top breaks everything (cycling import, I guess)
from allennlp.modules.token_embedders.embedding import EmbeddingsTextFile
logger.info("Reading pretrained tokens from: %s", embeddings_file_uri)
tokens: List[str] = []
with EmbeddingsTextFile(embeddings_file_uri) as embeddings_file:
for line_number, line in enumerate(Tqdm.tqdm(embeddings_file), start=1):
token_end = line.find(" ")
if token_end >= 0:
token = line[:token_end]
tokens.append(token)
else:
line_begin = line[:20] + "..." if len(line) > 20 else line
logger.warning("Skipping line number %d: %s", line_number, line_begin)
return tokens
class Vocabulary(Registrable):
"""
A Vocabulary maps strings to integers, allowing for strings to be mapped to an
out-of-vocabulary token.
Vocabularies are fit to a particular dataset, which we use to decide which tokens are
in-vocabulary.
Vocabularies also allow for several different namespaces, so you can have separate indices for
'a' as a word, and 'a' as a character, for instance, and so we can use this object to also map
tag and label strings to indices, for a unified :class:`~.fields.field.Field` API. Most of the
methods on this class allow you to pass in a namespace; by default we use the 'tokens'
namespace, and you can omit the namespace argument everywhere and just use the default.
This class is registered as a `Vocabulary` with four different names, which all point to
different `@classmethod` constructors found in this class. `from_instances` is registered as
"from_instances", `from_files` is registered as "from_files", `from_files_and_instances` is
registered as "extend", and `empty` is registered as "empty". If you are using a configuration
file to construct a vocabulary, you can use any of those strings as the "type" key in the
configuration file to use the corresponding `@classmethod` to construct the object.
"from_instances" is the default. Look at the docstring for the `@classmethod` to see what keys
are allowed in the configuration file (when there is an `instances` argument to the
`@classmethod`, it will be passed in separately and does not need a corresponding key in the
configuration file).
# Parameters
counter : `Dict[str, Dict[str, int]]`, optional (default=`None`)
A collection of counts from which to initialize this vocabulary. We will examine the
counts and, together with the other parameters to this class, use them to decide which
words are in-vocabulary. If this is `None`, we just won't initialize the vocabulary with
anything.
min_count : `Dict[str, int]`, optional (default=`None`)
When initializing the vocab from a counter, you can specify a minimum count, and every
token with a count less than this will not be added to the dictionary. These minimum
counts are `namespace-specific`, so you can specify different minimums for labels versus
words tokens, for example. If a namespace does not have a key in the given dictionary, we
will add all seen tokens to that namespace.
max_vocab_size : `Union[int, Dict[str, int]]`, optional (default=`None`)
If you want to cap the number of tokens in your vocabulary, you can do so with this
parameter. If you specify a single integer, every namespace will have its vocabulary fixed
to be no larger than this. If you specify a dictionary, then each namespace in the
`counter` can have a separate maximum vocabulary size. Any missing key will have a value
of `None`, which means no cap on the vocabulary size.
non_padded_namespaces : `Iterable[str]`, optional
By default, we assume you are mapping word / character tokens to integers, and so you want
to reserve word indices for padding and out-of-vocabulary tokens. However, if you are
mapping NER or SRL tags, or class labels, to integers, you probably do not want to reserve
indices for padding and out-of-vocabulary tokens. Use this field to specify which
namespaces should `not` have padding and OOV tokens added.
The format of each element of this is either a string, which must match field names
exactly, or `*` followed by a string, which we match as a suffix against field names.
We try to make the default here reasonable, so that you don't have to think about this.
The default is `("*tags", "*labels")`, so as long as your namespace ends in "tags" or
"labels" (which is true by default for all tag and label fields in this code), you don't
have to specify anything here.
pretrained_files : `Dict[str, str]`, optional
If provided, this map specifies the path to optional pretrained embedding files for each
namespace. This can be used to either restrict the vocabulary to only words which appear
in this file, or to ensure that any words in this file are included in the vocabulary
regardless of their count, depending on the value of `only_include_pretrained_words`.
Words which appear in the pretrained embedding file but not in the data are NOT included
in the Vocabulary.
min_pretrained_embeddings : `Dict[str, int]`, optional
If provided, specifies for each namespace a minimum number of lines (typically the
most common words) to keep from pretrained embedding files, even for words not
appearing in the data.
only_include_pretrained_words : `bool`, optional (default=`False`)
This defines the strategy for using any pretrained embedding files which may have been
specified in `pretrained_files`. If False, an inclusive strategy is used: and words
which are in the `counter` and in the pretrained file are added to the `Vocabulary`,
regardless of whether their count exceeds `min_count` or not. If True, we use an
exclusive strategy: words are only included in the Vocabulary if they are in the pretrained
embedding file (their count must still be at least `min_count`).
tokens_to_add : `Dict[str, List[str]]`, optional (default=`None`)
If given, this is a list of tokens to add to the vocabulary, keyed by the namespace to add
the tokens to. This is a way to be sure that certain items appear in your vocabulary,
regardless of any other vocabulary computation.
padding_token : `str`, optional (default=`DEFAULT_PADDING_TOKEN`)
If given, this the string used for padding.
oov_token : `str`, optional (default=`DEFAULT_OOV_TOKEN`)
If given, this the string used for the out of vocabulary (OOVs) tokens.
"""
default_implementation = "from_instances"
def __init__(
self,
counter: Dict[str, Dict[str, int]] = None,
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
tokens_to_add: Dict[str, List[str]] = None,
min_pretrained_embeddings: Dict[str, int] = None,
padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
) -> None:
self._padding_token = padding_token if padding_token is not None else DEFAULT_PADDING_TOKEN
self._oov_token = oov_token if oov_token is not None else DEFAULT_OOV_TOKEN
self._non_padded_namespaces = set(non_padded_namespaces)
self._token_to_index = _TokenToIndexDefaultDict(
self._non_padded_namespaces, self._padding_token, self._oov_token
)
self._index_to_token = _IndexToTokenDefaultDict(
self._non_padded_namespaces, self._padding_token, self._oov_token
)
self._retained_counter: Optional[Dict[str, Dict[str, int]]] = None
# Made an empty vocabulary, now extend it.
self._extend(
counter,
min_count,
max_vocab_size,
non_padded_namespaces,
pretrained_files,
only_include_pretrained_words,
tokens_to_add,
min_pretrained_embeddings,
)
@classmethod
def from_instances(
cls,
instances: Iterable["adi.Instance"],
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
tokens_to_add: Dict[str, List[str]] = None,
min_pretrained_embeddings: Dict[str, int] = None,
padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
) -> "Vocabulary":
"""
Constructs a vocabulary given a collection of `Instances` and some parameters.
We count all of the vocabulary items in the instances, then pass those counts
and the other parameters, to :func:`__init__`. See that method for a description
of what the other parameters do.
The `instances` parameter does not get an entry in a typical AllenNLP configuration file,
but the other parameters do (if you want non-default parameters).
"""
logger.info("Fitting token dictionary from dataset.")
padding_token = padding_token if padding_token is not None else DEFAULT_PADDING_TOKEN
oov_token = oov_token if oov_token is not None else DEFAULT_OOV_TOKEN
namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
for instance in Tqdm.tqdm(instances, desc="building vocab"):
instance.count_vocab_items(namespace_token_counts)
return cls(
counter=namespace_token_counts,
min_count=min_count,
max_vocab_size=max_vocab_size,
non_padded_namespaces=non_padded_namespaces,
pretrained_files=pretrained_files,
only_include_pretrained_words=only_include_pretrained_words,
tokens_to_add=tokens_to_add,
min_pretrained_embeddings=min_pretrained_embeddings,
padding_token=padding_token,
oov_token=oov_token,
)
@classmethod
def from_files(
cls,
directory: Union[str, os.PathLike],
padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
) -> "Vocabulary":
"""
Loads a `Vocabulary` that was serialized either using `save_to_files` or inside
a model archive file.
# Parameters
directory : `str`
The directory or archive file containing the serialized vocabulary.
"""
logger.info("Loading token dictionary from %s.", directory)
padding_token = padding_token if padding_token is not None else DEFAULT_PADDING_TOKEN
oov_token = oov_token if oov_token is not None else DEFAULT_OOV_TOKEN
if not os.path.isdir(directory):
base_directory = cached_path(directory, extract_archive=True)
# For convenience we'll check for a 'vocabulary' subdirectory of the archive.
# That way you can use model archives directly.
vocab_subdir = os.path.join(base_directory, "vocabulary")
if os.path.isdir(vocab_subdir):
directory = vocab_subdir
elif os.path.isdir(base_directory):
directory = base_directory
else:
raise ConfigurationError(f"{directory} is neither a directory nor an archive")
# We use a lock file to avoid race conditions where multiple processes
# might be reading/writing from/to the same vocab files at once.
with FileLock(os.path.join(directory, ".lock")):
with codecs.open(
os.path.join(directory, NAMESPACE_PADDING_FILE), "r", "utf-8"
) as namespace_file:
non_padded_namespaces = [namespace_str.strip() for namespace_str in namespace_file]
vocab = cls(
non_padded_namespaces=non_padded_namespaces,
padding_token=padding_token,
oov_token=oov_token,
)
# Check every file in the directory.
for namespace_filename in os.listdir(directory):
if namespace_filename == NAMESPACE_PADDING_FILE:
continue
if namespace_filename.startswith("."):
continue
namespace = namespace_filename.replace(".txt", "")
if any(namespace_match(pattern, namespace) for pattern in non_padded_namespaces):
is_padded = False
else:
is_padded = True
filename = os.path.join(directory, namespace_filename)
vocab.set_from_file(filename, is_padded, namespace=namespace, oov_token=oov_token)
return vocab
@classmethod
def from_files_and_instances(
cls,
instances: Iterable["adi.Instance"],
directory: str,
padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
tokens_to_add: Dict[str, List[str]] = None,
min_pretrained_embeddings: Dict[str, int] = None,
) -> "Vocabulary":
"""
Extends an already generated vocabulary using a collection of instances.
The `instances` parameter does not get an entry in a typical AllenNLP configuration file,
but the other parameters do (if you want non-default parameters). See `__init__` for a
description of what the other parameters mean.
"""
vocab = cls.from_files(directory, padding_token, oov_token)
logger.info("Fitting token dictionary from dataset.")
namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
for instance in Tqdm.tqdm(instances):
instance.count_vocab_items(namespace_token_counts)
vocab._extend(
counter=namespace_token_counts,
min_count=min_count,
max_vocab_size=max_vocab_size,
non_padded_namespaces=non_padded_namespaces,
pretrained_files=pretrained_files,
only_include_pretrained_words=only_include_pretrained_words,
tokens_to_add=tokens_to_add,
min_pretrained_embeddings=min_pretrained_embeddings,
)
return vocab
@classmethod
def empty(cls) -> "Vocabulary":
"""
This method returns a bare vocabulary instantiated with `cls()` (so, `Vocabulary()` if you
haven't made a subclass of this object). The only reason to call `Vocabulary.empty()`
instead of `Vocabulary()` is if you are instantiating this object from a config file. We
register this constructor with the key "empty", so if you know that you don't need to
compute a vocabulary (either because you're loading a pre-trained model from an archive
file, you're using a pre-trained transformer that has its own vocabulary, or something
else), you can use this to avoid having the default vocabulary construction code iterate
through the data.
"""
return cls()
def set_from_file(
self,
filename: str,
is_padded: bool = True,
oov_token: str = DEFAULT_OOV_TOKEN,
namespace: str = "tokens",
):
"""
If you already have a vocabulary file for a trained model somewhere, and you really want to
use that vocabulary file instead of just setting the vocabulary from a dataset, for
whatever reason, you can do that with this method. You must specify the namespace to use,
and we assume that you want to use padding and OOV tokens for this.
# Parameters
filename : `str`
The file containing the vocabulary to load. It should be formatted as one token per
line, with nothing else in the line. The index we assign to the token is the line
number in the file (1-indexed if `is_padded`, 0-indexed otherwise). Note that this
file should contain the OOV token string!
is_padded : `bool`, optional (default=`True`)
Is this vocabulary padded? For token / word / character vocabularies, this should be
`True`; while for tag or label vocabularies, this should typically be `False`. If
`True`, we add a padding token with index 0, and we enforce that the `oov_token` is
present in the file.
oov_token : `str`, optional (default=`DEFAULT_OOV_TOKEN`)
What token does this vocabulary use to represent out-of-vocabulary characters? This
must show up as a line in the vocabulary file. When we find it, we replace
`oov_token` with `self._oov_token`, because we only use one OOV token across
namespaces.
namespace : `str`, optional (default=`"tokens"`)
What namespace should we overwrite with this vocab file?
"""
if is_padded:
self._token_to_index[namespace] = {self._padding_token: 0}
self._index_to_token[namespace] = {0: self._padding_token}
else:
self._token_to_index[namespace] = {}
self._index_to_token[namespace] = {}
with codecs.open(filename, "r", "utf-8") as input_file:
lines = _NEW_LINE_REGEX.split(input_file.read())
# Be flexible about having final newline or not
if lines and lines[-1] == "":
lines = lines[:-1]
for i, line in enumerate(lines):
index = i + 1 if is_padded else i
token = line.replace("@@NEWLINE@@", "\n")
if token == oov_token:
token = self._oov_token
self._token_to_index[namespace][token] = index
self._index_to_token[namespace][index] = token
if is_padded:
assert self._oov_token in self._token_to_index[namespace], "OOV token not found!"
def extend_from_instances(self, instances: Iterable["adi.Instance"]) -> None:
logger.info("Fitting token dictionary from dataset.")
namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
for instance in Tqdm.tqdm(instances):
instance.count_vocab_items(namespace_token_counts)
self._extend(counter=namespace_token_counts)
def extend_from_vocab(self, vocab: "Vocabulary") -> None:
"""
Adds all vocabulary items from all namespaces in the given vocabulary to this vocabulary.
Useful if you want to load a model and extends its vocabulary from new instances.
We also add all non-padded namespaces from the given vocabulary to this vocabulary.
"""
self._non_padded_namespaces.update(vocab._non_padded_namespaces)
self._token_to_index._non_padded_namespaces.update(vocab._non_padded_namespaces)
self._index_to_token._non_padded_namespaces.update(vocab._non_padded_namespaces)
for namespace in vocab.get_namespaces():
for token in vocab.get_token_to_index_vocabulary(namespace):
self.add_token_to_namespace(token, namespace)
def _extend(
self,
counter: Dict[str, Dict[str, int]] = None,
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
tokens_to_add: Dict[str, List[str]] = None,
min_pretrained_embeddings: Dict[str, int] = None,
) -> None:
"""
This method can be used for extending already generated vocabulary. It takes same
parameters as Vocabulary initializer. The `_token_to_index` and `_index_to_token`
mappings of calling vocabulary will be retained. It is an inplace operation so None will be
returned.
"""
if not isinstance(max_vocab_size, dict):
int_max_vocab_size = max_vocab_size
max_vocab_size = defaultdict(lambda: int_max_vocab_size) # type: ignore
min_count = min_count or {}
pretrained_files = pretrained_files or {}
min_pretrained_embeddings = min_pretrained_embeddings or {}
non_padded_namespaces = set(non_padded_namespaces)
counter = counter or {}
tokens_to_add = tokens_to_add or {}
self._retained_counter = counter
# Make sure vocabulary extension is safe.
current_namespaces = {*self._token_to_index}
extension_namespaces = {*counter, *tokens_to_add}
for namespace in current_namespaces & extension_namespaces:
# if new namespace was already present
# Either both should be padded or none should be.
original_padded = not any(
namespace_match(pattern, namespace) for pattern in self._non_padded_namespaces
)
extension_padded = not any(
namespace_match(pattern, namespace) for pattern in non_padded_namespaces
)
if original_padded != extension_padded:
raise ConfigurationError(
"Common namespace {} has conflicting ".format(namespace)
+ "setting of padded = True/False. "
+ "Hence extension cannot be done."
)
# Add new non-padded namespaces for extension
self._token_to_index.add_non_padded_namespaces(non_padded_namespaces)
self._index_to_token.add_non_padded_namespaces(non_padded_namespaces)
self._non_padded_namespaces.update(non_padded_namespaces)
for namespace in counter:
pretrained_set: Optional[Set] = None
if namespace in pretrained_files:
pretrained_list = _read_pretrained_tokens(pretrained_files[namespace])
min_embeddings = min_pretrained_embeddings.get(namespace, 0)
if min_embeddings > 0:
tokens_old = tokens_to_add.get(namespace, [])
tokens_new = pretrained_list[:min_embeddings]
tokens_to_add[namespace] = tokens_old + tokens_new
pretrained_set = set(pretrained_list)
token_counts = list(counter[namespace].items())
token_counts.sort(key=lambda x: x[1], reverse=True)
max_vocab: Optional[int]
try:
max_vocab = max_vocab_size[namespace]
except KeyError:
max_vocab = None
if max_vocab:
token_counts = token_counts[:max_vocab]
for token, count in token_counts:
if pretrained_set is not None:
if only_include_pretrained_words:
if token in pretrained_set and count >= min_count.get(namespace, 1):
self.add_token_to_namespace(token, namespace)
elif token in pretrained_set or count >= min_count.get(namespace, 1):
self.add_token_to_namespace(token, namespace)
elif count >= min_count.get(namespace, 1):
self.add_token_to_namespace(token, namespace)
for namespace, tokens in tokens_to_add.items():
for token in tokens:
self.add_token_to_namespace(token, namespace)
def __getstate__(self):
"""
Need to sanitize defaultdict and defaultdict-like objects
by converting them to vanilla dicts when we pickle the vocabulary.
"""
state = copy.copy(self.__dict__)
state["_token_to_index"] = dict(state["_token_to_index"])
state["_index_to_token"] = dict(state["_index_to_token"])
if "_retained_counter" in state:
state["_retained_counter"] = {
key: dict(value) for key, value in state["_retained_counter"].items()
}
return state
def __setstate__(self, state):
"""
Conversely, when we unpickle, we need to reload the plain dicts
into our special DefaultDict subclasses.
"""
self.__dict__ = copy.copy(state)
self._token_to_index = _TokenToIndexDefaultDict(
self._non_padded_namespaces, self._padding_token, self._oov_token
)
self._token_to_index.update(state["_token_to_index"])
self._index_to_token = _IndexToTokenDefaultDict(
self._non_padded_namespaces, self._padding_token, self._oov_token
)
self._index_to_token.update(state["_index_to_token"])
def save_to_files(self, directory: str) -> None:
"""
Persist this Vocabulary to files so it can be reloaded later.
Each namespace corresponds to one file.
# Parameters
directory : `str`
The directory where we save the serialized vocabulary.
"""
os.makedirs(directory, exist_ok=True)
if os.listdir(directory):
logger.warning("vocabulary serialization directory %s is not empty", directory)
# We use a lock file to avoid race conditions where multiple processes
# might be reading/writing from/to the same vocab files at once.
with FileLock(os.path.join(directory, ".lock")):
with codecs.open(
os.path.join(directory, NAMESPACE_PADDING_FILE), "w", "utf-8"
) as namespace_file:
for namespace_str in self._non_padded_namespaces:
print(namespace_str, file=namespace_file)
for namespace, mapping in self._index_to_token.items():
# Each namespace gets written to its own file, in index order.
with codecs.open(
os.path.join(directory, namespace + ".txt"), "w", "utf-8"
) as token_file:
num_tokens = len(mapping)
start_index = 1 if mapping[0] == self._padding_token else 0
for i in range(start_index, num_tokens):
print(mapping[i].replace("\n", "@@NEWLINE@@"), file=token_file)
def is_padded(self, namespace: str) -> bool:
"""
Returns whether or not there are padding and OOV tokens added to the given namespace.
"""
return self._index_to_token[namespace][0] == self._padding_token
def add_token_to_namespace(self, token: str, namespace: str = "tokens") -> int:
"""
Adds `token` to the index, if it is not already present. Either way, we return the index of
the token.
"""
if not isinstance(token, str):
raise ValueError(
"Vocabulary tokens must be strings, or saving and loading will break."
" Got %s (with type %s)" % (repr(token), type(token))
)
if token not in self._token_to_index[namespace]:
index = len(self._token_to_index[namespace])
self._token_to_index[namespace][token] = index
self._index_to_token[namespace][index] = token
return index
else:
return self._token_to_index[namespace][token]
def add_tokens_to_namespace(self, tokens: List[str], namespace: str = "tokens") -> List[int]:
"""
Adds `tokens` to the index, if they are not already present. Either way, we return the
indices of the tokens in the order that they were given.
"""
return [self.add_token_to_namespace(token, namespace) for token in tokens]
def get_index_to_token_vocabulary(self, namespace: str = "tokens") -> Dict[int, str]:
return self._index_to_token[namespace]
def get_token_to_index_vocabulary(self, namespace: str = "tokens") -> Dict[str, int]:
return self._token_to_index[namespace]
def get_token_index(self, token: str, namespace: str = "tokens") -> int:
try:
return self._token_to_index[namespace][token]
except KeyError:
try:
return self._token_to_index[namespace][self._oov_token]
except KeyError:
logger.error("Namespace: %s", namespace)
logger.error("Token: %s", token)
raise KeyError(
f"'{token}' not found in vocab namespace '{namespace}', and namespace "
f"does not contain the default OOV token ('{self._oov_token}')"
)
def get_token_from_index(self, index: int, namespace: str = "tokens") -> str:
return self._index_to_token[namespace][index]
def get_vocab_size(self, namespace: str = "tokens") -> int:
return len(self._token_to_index[namespace])
def get_namespaces(self) -> Set[str]:
return set(self._index_to_token.keys())
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def __str__(self) -> str:
base_string = "Vocabulary with namespaces:\n"
non_padded_namespaces = f"\tNon Padded Namespaces: {self._non_padded_namespaces}\n"
namespaces = [
f"\tNamespace: {name}, Size: {self.get_vocab_size(name)} \n"
for name in self._index_to_token
]
return " ".join([base_string, non_padded_namespaces] + namespaces)
def __repr__(self) -> str:
# This is essentially the same as __str__, but with no newlines
base_string = "Vocabulary with namespaces: "
namespaces = [
f"{name}, Size: {self.get_vocab_size(name)} ||" for name in self._index_to_token
]
non_padded_namespaces = f"Non Padded Namespaces: {self._non_padded_namespaces}"
return " ".join([base_string] + namespaces + [non_padded_namespaces])
def print_statistics(self) -> None:
if self._retained_counter:
logger.info(
"Printed vocabulary statistics are only for the part of the vocabulary generated "
"from instances. If vocabulary is constructed by extending saved vocabulary with "
"dataset instances, the directly loaded portion won't be considered here."
)
print("\n\n----Vocabulary Statistics----\n")
# Since we don't saved counter info, it is impossible to consider pre-saved portion.
for namespace in self._retained_counter:
tokens_with_counts = list(self._retained_counter[namespace].items())
tokens_with_counts.sort(key=lambda x: x[1], reverse=True)
print(f"\nTop 10 most frequent tokens in namespace '{namespace}':")
for token, freq in tokens_with_counts[:10]:
print(f"\tToken: {token}\t\tFrequency: {freq}")
# Now sort by token length, not frequency
tokens_with_counts.sort(key=lambda x: len(x[0]), reverse=True)
print(f"\nTop 10 longest tokens in namespace '{namespace}':")
for token, freq in tokens_with_counts[:10]:
print(f"\tToken: {token}\t\tlength: {len(token)}\tFrequency: {freq}")
print(f"\nTop 10 shortest tokens in namespace '{namespace}':")
for token, freq in reversed(tokens_with_counts[-10:]):
print(f"\tToken: {token}\t\tlength: {len(token)}\tFrequency: {freq}")
else:
# _retained_counter would be set only if instances were used for vocabulary construction.
logger.info(
"Vocabulary statistics cannot be printed since "
"dataset instances were not used for its construction."
)
# We can't decorate `Vocabulary` with `Vocabulary.register()`, because `Vocabulary` hasn't been
# defined yet. So we put these down here.
Vocabulary.register("from_instances", constructor="from_instances")(Vocabulary)
Vocabulary.register("from_files", constructor="from_files")(Vocabulary)
Vocabulary.register("extend", constructor="from_files_and_instances")(Vocabulary)
Vocabulary.register("empty", constructor="empty")(Vocabulary)
| allennlp-master | allennlp/data/vocabulary.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.