python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
from typing import Dict
from allennlp.data import DatasetReader, Instance, TokenIndexer
from allennlp.data.data_loaders import MultiProcessDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.samplers.bucket_batch_sampler import BucketBatchSampler
from allennlp.data.vocabulary import Vocabulary
class MyDatasetReader(DatasetReader):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer()
}
def _read(self, file_path):
for tokens, label in zip(
[["a", "b", "c", "d"], ["e"], ["f", "g", "h"], ["i", "j"]],
["a", "b", "c", "d"],
):
yield Instance(
{
"tokens": TextField(
[Token(t) for t in tokens], self._token_indexers
),
"label": LabelField(label),
}
)
| allennlp-guide-master | exercises/part2/reading-data/data_loader_setup.py |
# To create fields, simply pass the data to constructor.
# NOTE: Don't worry about the token_indexers too much for now. We have a whole
# chapter on why TextFields are set up this way, and how they work.
tokens = [Token("The"), Token("best"), Token("movie"), Token("ever"), Token("!")]
token_indexers: Dict[str, TokenIndexer] = {"tokens": SingleIdTokenIndexer()}
text_field = TextField(tokens, token_indexers=token_indexers)
label_field = LabelField("pos")
sequence_label_field = SequenceLabelField(
["DET", "ADJ", "NOUN", "ADV", "PUNKT"], text_field
)
# You can use print() fields to see their content
print(text_field)
print(label_field)
print(sequence_label_field)
# Many of the fields implement native python methods in intuitive ways
print(len(sequence_label_field))
print(label for label in sequence_label_field)
# Fields know how to create empty fields of the same type
print(text_field.empty_field())
print(label_field.empty_field())
print(sequence_label_field.empty_field())
# You can count vocabulary items in fields
counter: Dict[str, Dict[str, int]] = defaultdict(Counter)
text_field.count_vocab_items(counter)
print(counter)
label_field.count_vocab_items(counter)
print(counter)
sequence_label_field.count_vocab_items(counter)
print(counter)
# Create Vocabulary for indexing fields
vocab = Vocabulary(counter)
# Fields know how to turn themselves into tensors
text_field.index(vocab)
# NOTE: in practice, we will batch together instances and use the maximum padding
# lengths, instead of getting them from a single instance.
# You can print this if you want to see what the padding_lengths dictionary looks
# like, but it can sometimes be a bit cryptic.
padding_lengths = text_field.get_padding_lengths()
print(text_field.as_tensor(padding_lengths))
label_field.index(vocab)
print(label_field.as_tensor(label_field.get_padding_lengths()))
sequence_label_field.index(vocab)
padding_lengths = sequence_label_field.get_padding_lengths()
print(sequence_label_field.as_tensor(padding_lengths))
# Fields know how to batch tensors
tensor1 = label_field.as_tensor(label_field.get_padding_lengths())
label_field2 = LabelField("pos")
label_field2.index(vocab)
tensor2 = label_field2.as_tensor(label_field2.get_padding_lengths())
batched_tensors = label_field.batch_tensors([tensor1, tensor2])
print(batched_tensors)
| allennlp-guide-master | exercises/part2/reading-data/fields_source.py |
reader = MyDatasetReader()
vocab = Vocabulary.from_instances(reader.read("path_to_data"))
print("Default:")
data_loader = MultiProcessDataLoader(reader, "path_to_data", batch_size=4)
data_loader.index_with(vocab)
for batch in data_loader:
print(batch)
print("Shuffle, and drop last batch if incomplete:")
data_loader = MultiProcessDataLoader(
reader, "path_to_data", batch_size=4, shuffle=True, drop_last=True
)
data_loader.index_with(vocab)
for batch in data_loader:
print(batch)
| allennlp-guide-master | exercises/part2/reading-data/data_loader_basic.py |
from collections import Counter, defaultdict
from typing import Dict
from allennlp.data.fields import TextField, LabelField, SequenceLabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
| allennlp-guide-master | exercises/part2/reading-data/fields_setup.py |
# Create fields and instances
token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer(namespace="tokens")
}
text_field_pos = TextField(
[Token("The"), Token("best"), Token("movie"), Token("ever"), Token("!")],
token_indexers=token_indexers,
)
text_field_neg = TextField(
[Token("Such"), Token("an"), Token("awful"), Token("movie"), Token(".")],
token_indexers=token_indexers,
)
label_field_pos = LabelField("pos", label_namespace="labels")
label_field_neg = LabelField("neg", label_namespace="labels")
instance_pos = Instance({"tokens": text_field_pos, "label": label_field_pos})
instance_neg = Instance({"tokens": text_field_neg, "label": label_field_neg})
# Create a Vocabulary with min_count=2 for tokens, but not for labels
vocab = Vocabulary.from_instances([instance_pos, instance_neg], min_count={"tokens": 2})
print("Created a Vocabulary:", vocab)
# Getting the entire mapping for "tokens." Notice only 'movie' is in the
# vocabulary.
print(
'index-to-token for "tokens":',
vocab.get_index_to_token_vocabulary(namespace="tokens"),
)
# Getting the entire mapping for "labels." All the tokens that appeared in the
# dataset are present
print(
'index-to-token for "labels":',
vocab.get_index_to_token_vocabulary(namespace="labels"),
)
| allennlp-guide-master | exercises/part2/reading-data/vocabulary_count_source.py |
from typing import Dict
from allennlp.data.instance import Instance
from allennlp.data.fields import TextField, LabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
| allennlp-guide-master | exercises/part2/reading-data/vocabulary_count_setup.py |
from typing import Dict
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, LabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
| allennlp-guide-master | exercises/part2/reading-data/vocabulary_creation_setup.py |
# Create fields and instances
# We will use the namespace 'tokens' to map tokens to integers. This is the
# default value, but we are passing it here explicitly to make it clear.
token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer(namespace="tokens")
}
text_field_pos = TextField(
[Token("The"), Token("best"), Token("movie"), Token("ever"), Token("!")],
token_indexers=token_indexers,
)
text_field_neg = TextField(
[Token("Such"), Token("an"), Token("awful"), Token("movie"), Token(".")],
token_indexers=token_indexers,
)
# Similarly, we will use the default namespace 'labels' to map labels to integers.
label_field_pos = LabelField("pos", label_namespace="labels")
label_field_neg = LabelField("neg", label_namespace="labels")
instance_pos = Instance({"tokens": text_field_pos, "label": label_field_pos})
instance_neg = Instance({"tokens": text_field_neg, "label": label_field_neg})
# Create a Vocabulary
# Tokens from text fields are managed by the 'tokens' namespace, while
# labels are stored under the `labels` namespace, as we specified above.
vocab = Vocabulary.from_instances([instance_pos, instance_neg])
print("Created a Vocabulary:", vocab)
# Looking up indices. namespace='tokens' is used by default. It turns out that we
# add vocabulary items in frequency order, so "movie", which showed up twice, will
# get the lowest index, other than padding and OOV.
print('index for token "movie":', vocab.get_token_index("movie"))
print('index for token "!":', vocab.get_token_index("!"))
# 'tokens' is a padded namespace, and unknown tokens get mapped to @@UNKNOWN@@
# (index = 1).
print('index for token "unknown":', vocab.get_token_index("unknown"))
print('index for label "pos":', vocab.get_token_index("pos", namespace="labels"))
print('index for label "neg":', vocab.get_token_index("neg", namespace="labels"))
# 'labels' is a non-padded namespace; looking up unknown labels throws an error
try:
vocab.get_token_index("unknown", namespace="labels")
except KeyError:
print('index for label "unknown": caught KeyError')
# Looking up tokens and labels by indices
# Notice that for padded namespaces, '@@PADDING@@' and '@@UNKNOWN@@' are
# automatically added.
print("token for index=0:", vocab.get_token_from_index(0))
print("token for index=1:", vocab.get_token_from_index(1))
print("token for index=2:", vocab.get_token_from_index(2))
print("label for index=0:", vocab.get_token_from_index(0, namespace="labels"))
print("label for index=1:", vocab.get_token_from_index(1, namespace="labels"))
try:
vocab.get_token_from_index(2, namespace="labels")
except KeyError:
print("label for index=2: caught KeyError")
| allennlp-guide-master | exercises/part2/reading-data/vocabulary_creation_source.py |
from typing import Dict, Iterable, List
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import Field, LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
| allennlp-guide-master | exercises/part2/reading-data/dataset_reader_basic_setup.py |
# Create Fields
tokens = [Token("The"), Token("best"), Token("movie"), Token("ever"), Token("!")]
token_indexers: Dict[str, TokenIndexer] = {"tokens": SingleIdTokenIndexer()}
text_field = TextField(tokens, token_indexers=token_indexers)
label_field = LabelField("pos")
sequence_label_field = SequenceLabelField(
["DET", "ADJ", "NOUN", "ADV", "PUNKT"], text_field
)
# Create an Instance
fields: Dict[str, Field] = {
"tokens": text_field,
"label": label_field,
}
instance = Instance(fields)
# You can add fields later
instance.add_field("label_seq", sequence_label_field)
# You can simply use print() to see the instance's content
print(instance)
# Create a Vocabulary
counter: Dict[str, Dict[str, int]] = defaultdict(Counter)
instance.count_vocab_items(counter)
vocab = Vocabulary(counter)
# Convert all strings in all of the fields into integer IDs by calling index_fields()
instance.index_fields(vocab)
# Instances know how to turn themselves into a dict of tensors. When we call this
# method in our data code, we additionally give a `padding_lengths` argument.
# We will pass this dictionary to the model as **tensors, so be sure the keys
# match what the model expects.
tensors = instance.as_tensor_dict()
print(tensors)
| allennlp-guide-master | exercises/part2/reading-data/instances_source.py |
reader = MyDatasetReader()
vocab = Vocabulary.from_instances(reader.read("path_to_data"))
print("Using the BucketBatchSampler:")
# The sorting_keys argument is unnecessary here, because the sampler will
# automatically detect that 'tokens' is the right sorting key, but we are
# including it in our example for completeness. You can remove it and see
# that the output is the same.
data_loader = MultiProcessDataLoader(
reader,
"path_to_data",
batch_sampler=BucketBatchSampler(batch_size=4, sorting_keys=["tokens"]),
)
data_loader.index_with(vocab)
for batch in data_loader:
print(batch)
| allennlp-guide-master | exercises/part2/reading-data/data_loader_bucket.py |
# Splits text into words (instead of wordpieces or characters).
tokenizer: Tokenizer = WhitespaceTokenizer()
# Represents each token with a single ID from a vocabulary.
token_indexer: TokenIndexer = SingleIdTokenIndexer(namespace="token_vocab")
vocab = Vocabulary()
vocab.add_tokens_to_namespace(
["This", "is", "some", "text", "."], namespace="token_vocab"
)
vocab.add_tokens_to_namespace(
["T", "h", "i", "s", " ", "o", "m", "e", "t", "x", "."], namespace="character_vocab"
)
text = "This is some text ."
tokens = tokenizer.tokenize(text)
print("Word tokens:", tokens)
text_field = TextField(tokens, {"tokens": token_indexer})
# In order to convert the token strings into integer ids, we need to tell the
# TextField what Vocabulary to use.
text_field.index(vocab)
# We typically batch things together when making tensors, which requires some
# padding computation. Don't worry too much about the padding for now.
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
# This output is pretty nested and might look complex. The reason it is so
# nested is that we need to (1) align each indexer with a corresponding
# embedder in the model, and (2) pass a dictionary of arguments to the
# embedder by name. This will be more clear when we get to the embedder.
print("With single id indexer:", tensor_dict)
# Represents each token with a sequence of characters.
token_indexer = TokenCharactersIndexer(namespace="character_vocab")
text_field = TextField(tokens, {"token_characters": token_indexer})
text_field.index(vocab)
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
# Notice here that we've now got an extra dimension on the tensor - there's
# a sequence of characters for every token.
print("With token characters indexer:", tensor_dict)
# Splits text into characters (instead of words or wordpieces).
tokenizer = CharacterTokenizer()
tokens = tokenizer.tokenize(text)
print("Character tokens:", tokens)
# Represents each token (which is a character) as a single id from a vocabulary.
token_indexer = SingleIdTokenIndexer(namespace="character_vocab")
text_field = TextField(tokens, {"token_characters": token_indexer})
text_field.index(vocab)
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
print("With single id indexer:", tensor_dict)
| allennlp-guide-master | exercises/part2/representing-text-as-features/token_indexers_simple.py |
# This is what gets created by TextField.as_tensor with a SingleIdTokenIndexer
# and a TokenCharactersIndexer; see the code snippet above. This time we're using
# more intuitive names for the indexers and embedders.
token_tensor = {
"tokens": {"tokens": torch.LongTensor([[2, 4, 3, 5]])},
"token_characters": {
"token_characters": torch.LongTensor(
[[[2, 5, 3], [4, 0, 0], [2, 1, 4], [5, 4, 0]]]
)
},
}
# This is for embedding each token.
embedding = Embedding(num_embeddings=6, embedding_dim=3)
# This is for encoding the characters in each token.
character_embedding = Embedding(num_embeddings=6, embedding_dim=3)
cnn_encoder = CnnEncoder(embedding_dim=3, num_filters=4, ngram_filter_sizes=(3,))
token_encoder = TokenCharactersEncoder(character_embedding, cnn_encoder)
embedder = BasicTextFieldEmbedder(
token_embedders={"tokens": embedding, "token_characters": token_encoder}
)
embedded_tokens = embedder(token_tensor)
print(embedded_tokens)
# This is what gets created by TextField.as_tensor with a SingleIdTokenIndexer,
# a TokenCharactersIndexer, and another SingleIdTokenIndexer for PoS tags;
# see the code above.
token_tensor = {
"tokens": {"tokens": torch.LongTensor([[2, 4, 3, 5]])},
"token_characters": {
"token_characters": torch.LongTensor(
[[[2, 5, 3], [4, 0, 0], [2, 1, 4], [5, 4, 0]]]
)
},
"pos_tag_tokens": {"tokens": torch.LongTensor([[2, 5, 3, 4]])},
}
vocab = Vocabulary()
vocab.add_tokens_to_namespace(
["This", "is", "some", "text", "."], namespace="token_vocab"
)
vocab.add_tokens_to_namespace(
["T", "h", "i", "s", " ", "o", "m", "e", "t", "x", "."], namespace="character_vocab"
)
vocab.add_tokens_to_namespace(["DT", "VBZ", "NN", "."], namespace="pos_tag_vocab")
# Notice below how the 'vocab_namespace' parameter matches the name used above.
# We're showing here how the code works when we're constructing the Embedding from
# a configuration file, where the vocabulary object gets passed in behind the
# scenes (but the vocab_namespace parameter must be set in the config). If you are
# using a `build_model` method (see the quick start chapter) or instantiating the
# Embedding yourself directly, you can just grab the vocab size yourself and pass
# in num_embeddings, as we do above.
# This is for embedding each token.
embedding = Embedding(embedding_dim=3, vocab_namespace="token_vocab", vocab=vocab)
# This is for encoding the characters in each token.
character_embedding = Embedding(
embedding_dim=4, vocab_namespace="character_vocab", vocab=vocab
)
cnn_encoder = CnnEncoder(embedding_dim=4, num_filters=5, ngram_filter_sizes=(3,))
token_encoder = TokenCharactersEncoder(character_embedding, cnn_encoder)
# This is for embedding the part of speech tag of each token.
pos_tag_embedding = Embedding(
embedding_dim=6, vocab_namespace="pos_tag_vocab", vocab=vocab
)
# Notice how these keys match the keys in the token_tensor dictionary above;
# these are the keys that you give to your TokenIndexers when constructing
# your TextFields in the DatasetReader.
embedder = BasicTextFieldEmbedder(
token_embedders={
"tokens": embedding,
"token_characters": token_encoder,
"pos_tag_tokens": pos_tag_embedding,
}
)
embedded_tokens = embedder(token_tensor)
print(embedded_tokens)
| allennlp-guide-master | exercises/part2/representing-text-as-features/token_embedders_combined.py |
# Splits text into words (instead of wordpieces or characters).
tokenizer: Tokenizer = WhitespaceTokenizer()
# Represents each token with both an id from a vocabulary and a sequence of
# characters.
token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer(namespace="token_vocab"),
"token_characters": TokenCharactersIndexer(namespace="character_vocab"),
}
vocab = Vocabulary()
vocab.add_tokens_to_namespace(
["This", "is", "some", "text", "."], namespace="token_vocab"
)
vocab.add_tokens_to_namespace(
["T", "h", "i", "s", " ", "o", "m", "e", "t", "x", "."], namespace="character_vocab"
)
text = "This is some text ."
tokens = tokenizer.tokenize(text)
print("Tokens:", tokens)
# The setup here is the same as what we saw above.
text_field = TextField(tokens, token_indexers)
text_field.index(vocab)
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
# Note now that we have two entries in this output dictionary,
# one for each indexer that we specified.
print("Combined tensor dictionary:", tensor_dict)
# Now we split text into words with part-of-speech tags, using Spacy's POS tagger.
# This will result in the `tag_` variable being set on each `Token` object, which
# we will read in the indexer.
tokenizer = SpacyTokenizer(pos_tags=True)
vocab.add_tokens_to_namespace(["DT", "VBZ", "NN", "."], namespace="pos_tag_vocab")
# Represents each token with (1) an id from a vocabulary, (2) a sequence of
# characters, and (3) part of speech tag ids.
token_indexers = {
"tokens": SingleIdTokenIndexer(namespace="token_vocab"),
"token_characters": TokenCharactersIndexer(namespace="character_vocab"),
"pos_tags": SingleIdTokenIndexer(namespace="pos_tag_vocab", feature_name="tag_"),
}
tokens = tokenizer.tokenize(text)
print("Spacy tokens:", tokens)
print("POS tags:", [token.tag_ for token in tokens])
text_field = TextField(tokens, token_indexers)
text_field.index(vocab)
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
print("Tensor dict with POS tags:", tensor_dict)
| allennlp-guide-master | exercises/part2/representing-text-as-features/token_indexers_combined.py |
# This is what gets created by TextField.as_tensor with a SingleIdTokenIndexer;
# Note that we added the batch dimension at the front. You choose the 'indexer1'
# name when you configure your data processing code.
token_tensor = {"indexer1": {"tokens": torch.LongTensor([[1, 3, 2, 9, 4, 3]])}}
# You would typically get the number of embeddings here from the vocabulary;
# if you use `allennlp train`, there is a separate process for instantiating the
# Embedding object using the vocabulary that you don't need to worry about for
# now.
embedding = Embedding(num_embeddings=10, embedding_dim=3)
# This 'indexer1' key must match the 'indexer1' key in the `token_tensor` above.
# We use these names to align the TokenIndexers used in the data code with the
# TokenEmbedders that do the work on the model side.
embedder = BasicTextFieldEmbedder(token_embedders={"indexer1": embedding})
embedded_tokens = embedder(token_tensor)
print("Using the TextFieldEmbedder:", embedded_tokens)
# As we've said a few times, what's going on inside is that we match keys between
# the token tensor and the token embedders, then pass the inner dictionary to the
# token embedder. The above lines perform the following logic:
embedded_tokens = embedding(**token_tensor["indexer1"])
print("Using the Embedding directly:", embedded_tokens)
# This is what gets created by TextField.as_tensor with a TokenCharactersIndexer
# Note that we added the batch dimension at the front. Don't worry too much
# about the magic 'token_characters' key - that is hard-coded to be produced
# by the TokenCharactersIndexer, and accepted by TokenCharactersEncoder;
# you don't have to produce those yourself in normal settings, it's done for you.
token_tensor = {
"indexer2": {
"token_characters": torch.LongTensor(
[[[1, 3, 0], [4, 2, 3], [1, 9, 5], [6, 0, 0]]]
)
}
}
character_embedding = Embedding(num_embeddings=10, embedding_dim=3)
cnn_encoder = CnnEncoder(embedding_dim=3, num_filters=4, ngram_filter_sizes=(3,))
token_encoder = TokenCharactersEncoder(character_embedding, cnn_encoder)
# Again here, the 'indexer2' key is arbitrary. It just has to match whatever key
# you gave to the corresponding TokenIndexer in your data code, which ends up
# as the top-level key in the token_tensor dictionary.
embedder = BasicTextFieldEmbedder(token_embedders={"indexer2": token_encoder})
embedded_tokens = embedder(token_tensor)
print("With a character CNN:", embedded_tokens)
| allennlp-guide-master | exercises/part2/representing-text-as-features/token_embedders_simple.py |
import warnings
from typing import Dict
import torch
from allennlp.data import Token, Vocabulary, TokenIndexer, Tokenizer
from allennlp.data.fields import ListField, TextField
from allennlp.data.token_indexers import (
SingleIdTokenIndexer,
TokenCharactersIndexer,
ELMoTokenCharactersIndexer,
PretrainedTransformerIndexer,
PretrainedTransformerMismatchedIndexer,
)
from allennlp.data.tokenizers import (
CharacterTokenizer,
PretrainedTransformerTokenizer,
SpacyTokenizer,
WhitespaceTokenizer,
)
from allennlp.modules.seq2vec_encoders import CnnEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import (
Embedding,
TokenCharactersEncoder,
ElmoTokenEmbedder,
PretrainedTransformerEmbedder,
PretrainedTransformerMismatchedEmbedder,
)
from allennlp.nn import util as nn_util
warnings.filterwarnings("ignore")
| allennlp-guide-master | exercises/part2/representing-text-as-features/setup.py |
# Splits text into words (instead of wordpieces or characters). For ELMo, you can
# just use any word-level tokenizer that you like, though for best results you
# should use the same tokenizer that was used with ELMo, which is an older version
# of spacy. We're using a whitespace tokenizer here for ease of demonstration
# with binder.
tokenizer: Tokenizer = WhitespaceTokenizer()
# Represents each token with an array of characters in a way that ELMo expects.
token_indexer: TokenIndexer = ELMoTokenCharactersIndexer()
# Both ELMo and BERT do their own thing with vocabularies, so we don't need to add
# anything, but we do need to construct the vocab object so we can use it below.
# (And if you have any labels in your data that need indexing, you'll still need
# this.)
vocab = Vocabulary()
text = "This is some text ."
tokens = tokenizer.tokenize(text)
print("ELMo tokens:", tokens)
text_field = TextField(tokens, {"elmo_tokens": token_indexer})
text_field.index(vocab)
# We typically batch things together when making tensors, which requires some
# padding computation. Don't worry too much about the padding for now.
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
print("ELMo tensors:", tensor_dict)
# Any transformer model name that huggingface's transformers library supports will
# work here. Under the hood, we're grabbing pieces from huggingface for this
# part.
transformer_model = "bert-base-cased"
# To do modeling with BERT correctly, we can't use just any tokenizer; we need to
# use BERT's tokenizer.
tokenizer = PretrainedTransformerTokenizer(model_name=transformer_model)
# Represents each wordpiece with an id from BERT's vocabulary.
token_indexer = PretrainedTransformerIndexer(model_name=transformer_model)
text = "Some text with an extraordinarily long identifier."
tokens = tokenizer.tokenize(text)
print("BERT tokens:", tokens)
text_field = TextField(tokens, {"bert_tokens": token_indexer})
text_field.index(vocab)
tensor_dict = text_field.as_tensor(text_field.get_padding_lengths())
print("BERT tensors:", tensor_dict)
# Now we'll do an example with paired text, to show the right way to handle [SEP]
# tokens in AllenNLP. We have built-in ways of handling this for two text pieces.
# If you have more than two text pieces, you'll have to manually add the special
# tokens. The way we're doing this requires that you use a
# PretrainedTransformerTokenizer, not the abstract Tokenizer class.
# Splits text into wordpieces, but without adding special tokens.
tokenizer = PretrainedTransformerTokenizer(
model_name=transformer_model,
add_special_tokens=False,
)
context_text = "This context is frandibulous."
question_text = "What is the context like?"
context_tokens = tokenizer.tokenize(context_text)
question_tokens = tokenizer.tokenize(question_text)
print("Context tokens:", context_tokens)
print("Question tokens:", question_tokens)
combined_tokens = tokenizer.add_special_tokens(context_tokens, question_tokens)
print("Combined tokens:", combined_tokens)
text_field = TextField(combined_tokens, {"bert_tokens": token_indexer})
text_field.index(vocab)
tensor_dict = text_field.as_tensor(text_field.get_padding_lengths())
print("Combined BERT tensors:", tensor_dict)
| allennlp-guide-master | exercises/part2/representing-text-as-features/token_indexers_contextual.py |
# It's easiest to get ELMo input by just running the data code. See the
# exercise above for an explanation of this code.
tokenizer: Tokenizer = WhitespaceTokenizer()
token_indexer: TokenIndexer = ELMoTokenCharactersIndexer()
vocab = Vocabulary()
text = "This is some text."
tokens = tokenizer.tokenize(text)
print("ELMo tokens:", tokens)
text_field = TextField(tokens, {"elmo_tokens": token_indexer})
text_field.index(vocab)
token_tensor = text_field.as_tensor(text_field.get_padding_lengths())
print("ELMo tensors:", token_tensor)
# We're using a tiny, toy version of ELMo to demonstrate this.
elmo_options_file = (
"https://allennlp.s3.amazonaws.com/models/elmo/test_fixture/options.json"
)
elmo_weight_file = (
"https://allennlp.s3.amazonaws.com/models/elmo/test_fixture/lm_weights.hdf5"
)
elmo_embedding = ElmoTokenEmbedder(
options_file=elmo_options_file, weight_file=elmo_weight_file
)
embedder = BasicTextFieldEmbedder(token_embedders={"elmo_tokens": elmo_embedding})
tensor_dict = text_field.batch_tensors([token_tensor])
embedded_tokens = embedder(tensor_dict)
print("ELMo embedded tokens:", embedded_tokens)
# Again, it's easier to just run the data code to get the right output.
# We're using the smallest transformer model we can here, so that it runs on
# binder.
transformer_model = "google/reformer-crime-and-punishment"
tokenizer = PretrainedTransformerTokenizer(model_name=transformer_model)
token_indexer = PretrainedTransformerIndexer(model_name=transformer_model)
text = "Some text with an extraordinarily long identifier."
tokens = tokenizer.tokenize(text)
print("Transformer tokens:", tokens)
text_field = TextField(tokens, {"bert_tokens": token_indexer})
text_field.index(vocab)
token_tensor = text_field.as_tensor(text_field.get_padding_lengths())
print("Transformer tensors:", token_tensor)
embedding = PretrainedTransformerEmbedder(model_name=transformer_model)
embedder = BasicTextFieldEmbedder(token_embedders={"bert_tokens": embedding})
tensor_dict = text_field.batch_tensors([token_tensor])
embedded_tokens = embedder(tensor_dict)
print("Transformer embedded tokens:", embedded_tokens)
| allennlp-guide-master | exercises/part2/representing-text-as-features/token_embedders_contextual.py |
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
import torch
# This is what gets created by TextField.as_tensor with a SingleIdTokenIndexer;
# see the exercises above.
token_tensor = {"tokens": {"tokens": torch.LongTensor([1, 3, 2, 1, 4, 3])}}
vocab = Vocabulary()
vocab.add_tokens_to_namespace(
["This", "is", "some", "text", "."], namespace="token_vocab"
)
glove_file = "https://allennlp.s3.amazonaws.com/datasets/glove/glove.6B.50d.txt.gz"
# This is for embedding each token.
embedding = Embedding(
vocab=vocab,
vocab_namespace="token_vocab",
embedding_dim=50,
pretrained_file=glove_file,
)
embedder = BasicTextFieldEmbedder(token_embedders={"tokens": embedding})
embedded_tokens = embedder(token_tensor)
print(embedded_tokens.size())
| allennlp-guide-master | exercises/part2/representing-text-as-features/pretrained_embedding.py |
# We're following the logic from the "Combining multiple TokenIndexers" example
# above.
tokenizer = SpacyTokenizer(pos_tags=True)
vocab = Vocabulary()
vocab.add_tokens_to_namespace(
["This", "is", "some", "text", "."], namespace="token_vocab"
)
vocab.add_tokens_to_namespace(
["T", "h", "i", "s", " ", "o", "m", "e", "t", "x", "."], namespace="character_vocab"
)
vocab.add_tokens_to_namespace(["DT", "VBZ", "NN", "."], namespace="pos_tag_vocab")
text = "This is some text."
text2 = "This is some text with more tokens."
tokens = tokenizer.tokenize(text)
tokens2 = tokenizer.tokenize(text2)
print("Tokens:", tokens)
print("Tokens 2:", tokens2)
# Represents each token with (1) an id from a vocabulary, (2) a sequence of
# characters, and (3) part of speech tag ids.
token_indexers = {
"tokens": SingleIdTokenIndexer(namespace="token_vocab"),
"token_characters": TokenCharactersIndexer(namespace="character_vocab"),
"pos_tags": SingleIdTokenIndexer(namespace="pos_tag_vocab", feature_name="tag_"),
}
text_field = TextField(tokens, token_indexers)
text_field.index(vocab)
text_field2 = TextField(tokens2, token_indexers)
text_field2.index(vocab)
# We're using the longer padding lengths here; we'd typically be relying on our
# collate function to figure out what the longest values are to use.
padding_lengths = text_field2.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
tensor_dict2 = text_field2.as_tensor(padding_lengths)
print("Combined tensor dictionary:", tensor_dict)
print("Combined tensor dictionary 2:", tensor_dict2)
text_field_tensors = text_field.batch_tensors([tensor_dict, tensor_dict2])
print("Batched tensor dictionary:", text_field_tensors)
# We've seen plenty of examples of using a TextFieldEmbedder, so we'll just show
# the utility methods here.
mask = nn_util.get_text_field_mask(text_field_tensors)
print("Mask:", mask)
print("Mask size:", mask.size())
token_ids = nn_util.get_token_ids_from_text_field_tensors(text_field_tensors)
print("Token ids:", token_ids)
# We can also handle getting masks when you have lists of TextFields, but there's
# an important parameter that you need to pass, which we'll show here. The
# difference in output that you see between here and above is just that there's an
# extra dimension in this output. Where shapes used to be (batch_size=2, ...),
# now they are (batch_size=1, list_length=2, ...).
list_field = ListField([text_field, text_field2])
tensor_dict = list_field.as_tensor(list_field.get_padding_lengths())
text_field_tensors = list_field.batch_tensors([tensor_dict])
print("Batched tensors for ListField[TextField]:", text_field_tensors)
# The num_wrapping_dims argument tells get_text_field_mask how many nested lists
# there are around the TextField, which we need for our heuristics that guess
# which tensor to use when computing a mask.
mask = nn_util.get_text_field_mask(text_field_tensors, num_wrapping_dims=1)
print("Mask:", mask)
print("Mask:", mask.size())
| allennlp-guide-master | exercises/part2/representing-text-as-features/interacting_with_tensors.py |
# This pattern is typically used in cases where your input data is already
# tokenized, so we're showing that here.
text_tokens = ["This", "is", "some", "frandibulous", "text", "."]
tokens = [Token(x) for x in text_tokens]
print(tokens)
# We're using a very small transformer here so that it runs quickly in binder. You
# can change this to any transformer model name supported by Hugging Face.
transformer_model = "google/reformer-crime-and-punishment"
# Represents the list of word tokens with a sequences of wordpieces as determined
# by the transformer's tokenizer. This actually results in a pretty complex data
# type, which you can see by running this. It's complicated because we need to
# know how to combine the wordpieces back into words after running the
# transformer.
indexer = PretrainedTransformerMismatchedIndexer(model_name=transformer_model)
text_field = TextField(tokens, {"transformer": indexer})
text_field.index(Vocabulary())
token_tensor = text_field.as_tensor(text_field.get_padding_lengths())
# There are two key things to notice in this output. First, there are two masks:
# `mask` is a word-level mask that gets used in the utility functions described in
# the last section of this chapter. `wordpiece_mask` gets used by the `Embedder`
# itself. Second, there is an `offsets` tensor that gives start and end wordpiece
# indices for the original tokens. In the embedder, we grab these, average all of
# the wordpieces for each token, and return the result.
print("Indexed tensors:", token_tensor)
embedding = PretrainedTransformerMismatchedEmbedder(model_name=transformer_model)
embedder = BasicTextFieldEmbedder(token_embedders={"transformer": embedding})
tensor_dict = text_field.batch_tensors([token_tensor])
embedded_tokens = embedder(tensor_dict)
print("Embedded tokens size:", embedded_tokens.size())
print("Embedded tokens:", embedded_tokens)
| allennlp-guide-master | exercises/part2/representing-text-as-features/mismatched_tokenization.py |
import torch
from allennlp.modules.seq2vec_encoders import (
Seq2VecEncoder,
CnnEncoder,
LstmSeq2VecEncoder,
)
batch_size = 8
sequence_length = 10
input_size = 5
hidden_size = 2
x = torch.rand(batch_size, sequence_length, input_size)
mask = torch.ones(batch_size, sequence_length)
print("shape of input:", x.shape)
encoder: Seq2VecEncoder
encoder = LstmSeq2VecEncoder(
input_size=input_size, hidden_size=hidden_size, num_layers=1
)
y = encoder(x, mask=mask)
print("shape of output (LSTM):", y.shape)
encoder = CnnEncoder(embedding_dim=input_size, num_filters=1, output_dim=hidden_size)
y = encoder(x, mask=mask)
print("shape of output (CNN):", y.shape)
| allennlp-guide-master | exercises/part2/common-architectures/seq2vec.py |
# Create an instance with multiple spans
tokens = [
Token(token)
for token in ["I", "shot", "an", "elephant", "in", "my", "pajamas", "."]
]
token_indexers: Dict[str, TokenIndexer] = {"tokens": SingleIdTokenIndexer()}
text_field = TextField(tokens, token_indexers=token_indexers)
spans = [(2, 3), (5, 6)] # ('an', 'elephant') and ('my', 'pajamas)
span_fields = ListField([SpanField(start, end, text_field) for start, end in spans])
instance = Instance({"tokens": text_field, "spans": span_fields})
# Alternatively, you can also enumerate all spans
spans = enumerate_spans(tokens, max_span_width=3)
print("all spans up to length 3:")
print(spans)
def filter_function(span_tokens):
return not any(t == Token(".") for t in span_tokens)
spans = enumerate_spans(tokens, max_span_width=3, filter_function=filter_function)
print("all spans up to length 3, excluding punctuation:")
print(spans)
# Index and convert to tensors
vocab = Vocabulary.from_instances([instance])
instance.index_fields(vocab)
tensors = Batch([instance]).as_tensor_dict()
tokens_tensor, spans_tensor = tensors["tokens"], tensors["spans"]
# Embed the input
embedding_dim = 8
token_embedder = Embedding(embedding_dim=embedding_dim, vocab=vocab)
embedder = BasicTextFieldEmbedder({"tokens": token_embedder})
embedded_tokens = embedder(tokens_tensor)
print("shape of embedded_tokens", embedded_tokens.shape)
print("shape of spans_tensor:", spans_tensor.shape) # type: ignore
# Embed the spans using two different span extractors
# combination='x,y' is the default value, but we are making it explicit here
span_extractor = EndpointSpanExtractor(input_dim=embedding_dim, combination="x,y")
embedded_spans = span_extractor(embedded_tokens, spans_tensor)
print("shape of embedded spans (x,y):", embedded_spans.shape)
span_extractor = EndpointSpanExtractor(input_dim=embedding_dim, combination="x-y")
embedded_spans = span_extractor(embedded_tokens, spans_tensor)
print("shape of embedded spans (x-y):", embedded_spans.shape)
| allennlp-guide-master | exercises/part2/common-architectures/span_source.py |
embedding_dim1 = 8
embedding_dim2 = 16
sequence_length = 10
# Attention
attention: Attention
# dot product attention only allows vector/matrix of the same size
vector = torch.rand(
(
1,
embedding_dim1,
)
)
matrix = torch.rand((1, sequence_length, embedding_dim1))
attention = DotProductAttention()
output = attention(vector, matrix)
print("Output from DotProductAttention:", output)
# bilinear & linear attention allows inputs of different sizes
vector = torch.rand(
(
1,
embedding_dim1,
)
)
matrix = torch.rand((1, sequence_length, embedding_dim2))
attention = BilinearAttention(vector_dim=embedding_dim1, matrix_dim=embedding_dim2)
output = attention(vector, matrix)
print("Output from BilinearAttention:", output)
tanh = Activation.by_name("tanh")()
attention = LinearAttention(
tensor_1_dim=embedding_dim1,
tensor_2_dim=embedding_dim2,
combination="x,y",
activation=tanh,
)
output = attention(vector, matrix)
print("Output from LinearAttention:", output)
# MatrixAttention
sequence_length1 = 10
sequence_length2 = 15
matrix_attention: MatrixAttention
# dot product attention only allows matrices of the same size
matrix1 = torch.rand((1, sequence_length1, embedding_dim1))
matrix2 = torch.rand((1, sequence_length2, embedding_dim1))
matrix_attention = DotProductMatrixAttention()
output = matrix_attention(matrix1, matrix2)
print("Output shape of DotProductMatrixAttention:", output.shape)
# bilinear & linear attention allows inputs of different sizes
matrix1 = torch.rand((1, sequence_length1, embedding_dim1))
matrix2 = torch.rand((1, sequence_length2, embedding_dim2))
matrix_attention = BilinearMatrixAttention(
matrix_1_dim=embedding_dim1, matrix_2_dim=embedding_dim2
)
output = matrix_attention(matrix1, matrix2)
print("Output shape of BilinearMatrixAttention:", output.shape)
matrix_attention = LinearMatrixAttention(
tensor_1_dim=embedding_dim1,
tensor_2_dim=embedding_dim2,
combination="x,y",
activation=tanh,
)
output = matrix_attention(matrix1, matrix2)
print("Output shape of LinearMatrixAttention:", output.shape)
| allennlp-guide-master | exercises/part2/common-architectures/attention_source.py |
from typing import Dict
import torch
from allennlp.data import Batch, Instance, Token, Vocabulary
from allennlp.data.dataset_readers.dataset_utils.span_utils import enumerate_spans
from allennlp.data.fields import TextField, ListField, SpanField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.modules.span_extractors import EndpointSpanExtractor
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
| allennlp-guide-master | exercises/part2/common-architectures/span_setup.py |
import torch
from allennlp.modules.attention import (
Attention,
DotProductAttention,
BilinearAttention,
LinearAttention,
)
from allennlp.modules.matrix_attention import (
MatrixAttention,
DotProductMatrixAttention,
BilinearMatrixAttention,
LinearMatrixAttention,
)
from allennlp.nn import Activation
| allennlp-guide-master | exercises/part2/common-architectures/attention_setup.py |
import torch
from allennlp.modules.seq2seq_encoders import (
Seq2SeqEncoder,
PassThroughEncoder,
LstmSeq2SeqEncoder,
)
batch_size = 8
sequence_length = 10
input_size = 5
hidden_size = 2
x = torch.rand(batch_size, sequence_length, input_size)
mask = torch.ones(batch_size, sequence_length)
print("shape of input:", x.shape)
encoder: Seq2SeqEncoder
encoder = PassThroughEncoder(input_dim=input_size)
y = encoder(x, mask=mask)
print("shape of output (PassThrough):", y.shape)
encoder = LstmSeq2SeqEncoder(input_size=input_size, hidden_size=hidden_size)
y = encoder(x, mask=mask)
print("shape of output (LSTM):", y.shape)
| allennlp-guide-master | exercises/part2/common-architectures/seq2seq.py |
import torch
from allennlp.nn.initializers import ConstantInitializer
from allennlp.nn.regularizers import L1Regularizer, L2Regularizer, RegularizerApplicator
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(2, 3)
self.linear2 = torch.nn.Linear(3, 2)
self.conv = torch.nn.Conv1d(2, 2, 2)
def forward(self, inputs):
pass
print("Using individual regularizers:")
model = Net()
init_const = ConstantInitializer(val=10.0)
init_const(model.linear1.weight)
init_const(model.linear2.weight)
l1_regularizer = L1Regularizer(alpha=0.01)
print(l1_regularizer(model.linear1.weight)) # 0.01 * 10 * 6 = 0.6
l2_regularizer = L2Regularizer(alpha=0.01)
print(l2_regularizer(model.linear2.weight)) # 0.01 * (10)^2 * 6
print("Using an applicator:")
applicator = RegularizerApplicator(
regexes=[
("linear1.weight", L1Regularizer(alpha=0.01)),
("linear2.weight", L2Regularizer()),
]
)
print(applicator(model)) # 0.6 + 6
| allennlp-guide-master | exercises/part2/building-your-model/model_regularization.py |
import json
import os
import tempfile
from copy import deepcopy
from typing import Dict, Iterable, List
import torch
from allennlp.common import JsonDict
from allennlp.common.params import Params
from allennlp.data import (
DataLoader,
DatasetReader,
Field,
Instance,
TextFieldTensors,
Vocabulary,
)
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.models.archival import archive_model, load_archive
from allennlp.modules import Seq2VecEncoder, TextFieldEmbedder
from allennlp.nn import util
from allennlp.predictors import Predictor
from allennlp.training import Trainer
from allennlp.training.metrics import CategoricalAccuracy
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
):
super().__init__(lazy)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, text: str, label: str = None) -> Instance:
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields: Dict[str, Field] = {"text": text_field}
if label:
fields["label"] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
yield self.text_to_instance(text, sentiment)
@Model.register("simple_classifier")
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(
self, text: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
# Shape: (1,)
output = {"probs": probs}
if label is not None:
self.accuracy(logits, label)
output["loss"] = torch.nn.functional.cross_entropy(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
@Predictor.register("sentence_classifier")
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence)
def run_config(config):
params = Params(json.loads(config))
params_copy = params.duplicate()
if "dataset_reader" in params:
reader = DatasetReader.from_params(params.pop("dataset_reader"))
else:
raise RuntimeError("`dataset_reader` section is required")
loader_params = params.pop("data_loader")
train_data_loader = DataLoader.from_params(
reader=reader,
data_path=params.pop("train_data_path"),
params=loader_params.duplicate(),
)
dev_data_loader = DataLoader.from_params(
reader=reader,
data_path=params.pop("validation_data_path"),
params=loader_params,
)
print("Building the vocabulary...")
vocab = Vocabulary.from_instances(train_data_loader.iter_instances())
if "model" not in params:
# 'dataset' mode — just preview the (first 10) instances
print("Showing the first 10 instances:")
for inst in train_data_loader.iter_instances():
print(inst)
return None
model = Model.from_params(vocab=vocab, params=params.pop("model"))
train_data_loader.index_with(vocab)
dev_data_loader.index_with(vocab)
# set up a temporary, empty directory for serialization
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = Trainer.from_params(
model=model,
serialization_dir=serialization_dir,
data_loader=train_data_loader,
validation_data_loader=dev_data_loader,
params=params.pop("trainer"),
)
trainer.train()
return {
"params": params_copy,
"dataset_reader": reader,
"vocab": vocab,
"model": model,
}
| allennlp-guide-master | exercises/part2/building-your-model/setup_model_io.py |
# Create a toy model that just prints tensors passed to forward
class ToyModel(Model):
def __init__(self, vocab: Vocabulary):
super().__init__(vocab)
# Note that the signature of forward() needs to match that of field names
def forward(
self, tokens: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
print("tokens:", tokens)
print("label:", label)
return {}
# Create fields and instances
token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer(namespace="tokens")
}
text_field_pos = TextField(
[Token("The"), Token("best"), Token("movie"), Token("ever"), Token("!")],
token_indexers=token_indexers,
)
text_field_neg = TextField(
[Token("Such"), Token("an"), Token("awful"), Token("movie"), Token(".")],
token_indexers=token_indexers,
)
label_field_pos = LabelField("pos", label_namespace="labels")
label_field_neg = LabelField("neg", label_namespace="labels")
instance_pos = Instance({"tokens": text_field_pos, "label": label_field_pos})
instance_neg = Instance({"tokens": text_field_neg, "label": label_field_neg})
instances = [instance_pos, instance_neg]
# Create a Vocabulary
vocab = Vocabulary.from_instances(instances)
# Create an iterator that creates batches of size 2
data_loader = SimpleDataLoader(instances, 2, vocab=vocab)
model = ToyModel(vocab)
# Iterate over batches and pass them to forward()
for batch in data_loader:
model(**batch)
| allennlp-guide-master | exercises/part2/building-your-model/model_forward.py |
CONFIG = """
{
"dataset_reader" : {
"type": "classification-tsv",
"token_indexers": {
"tokens": {
"type": "single_id"
}
}
},
"train_data_path": "quick_start/data/movie_review/train.tsv",
"validation_data_path": "quick_start/data/movie_review/dev.tsv",
"model": {
"type": "simple_classifier",
"embedder": {
"token_embedders": {
"tokens": {
"type": "embedding",
"embedding_dim": 10
}
}
},
"encoder": {
"type": "bag_of_embeddings",
"embedding_dim": 10
}
},
"data_loader": {
"batch_size": 8
},
"trainer": {
"optimizer": "adam",
"num_epochs": 5
}
}
"""
def make_predictions(
model: Model, dataset_reader: DatasetReader
) -> List[Dict[str, float]]:
"""Make predictions using the given model and dataset reader."""
predictions = []
predictor = SentenceClassifierPredictor(model, dataset_reader)
output = predictor.predict("A good movie!")
predictions.append(
{
vocab.get_token_from_index(label_id, "labels"): prob
for label_id, prob in enumerate(output["probs"])
}
)
output = predictor.predict("This was a monstrous waste of time.")
predictions.append(
{
vocab.get_token_from_index(label_id, "labels"): prob
for label_id, prob in enumerate(output["probs"])
}
)
return predictions
# Because we can't use bash to run allennlp commands, and so that we can more
# easily pull out some pieces to show you how this works, we wrote a simple method
# that runs a training loop from a configuration file. You can see it in the Setup
# section above.
components = run_config(CONFIG)
params = components["params"]
dataset_reader = components["dataset_reader"]
vocab = components["vocab"]
model = components["model"]
original_preds = make_predictions(model, dataset_reader)
# Save the model
serialization_dir = "model"
config_file = os.path.join(serialization_dir, "config.json")
vocabulary_dir = os.path.join(serialization_dir, "vocabulary")
weights_file = os.path.join(serialization_dir, "weights.th")
os.makedirs(serialization_dir, exist_ok=True)
params.to_file(config_file)
vocab.save_to_files(vocabulary_dir)
torch.save(model.state_dict(), weights_file)
# Load the model
loaded_params = Params.from_file(config_file)
loaded_model = Model.load(loaded_params, serialization_dir, weights_file)
loaded_vocab = loaded_model.vocab # Vocabulary is loaded in Model.load()
# Make sure the predictions are the same
loaded_preds = make_predictions(loaded_model, dataset_reader)
assert original_preds == loaded_preds
print("predictions matched")
# Create an archive file
archive_model(serialization_dir, weights="weights.th")
# Unarchive from the file
archive = load_archive(os.path.join(serialization_dir, "model.tar.gz"))
# Make sure the predictions are the same
archived_preds = make_predictions(archive.model, dataset_reader)
assert original_preds == archived_preds
print("predictions matched")
| allennlp-guide-master | exercises/part2/building-your-model/model_io.py |
import torch
from allennlp.nn.initializers import (
InitializerApplicator,
XavierUniformInitializer,
ConstantInitializer,
NormalInitializer,
)
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(2, 3)
self.linear2 = torch.nn.Linear(3, 2)
self.conv = torch.nn.Conv1d(2, 2, 2)
def forward(self, inputs):
pass
model = Net()
print("Initial parameters:")
for name, param in model.named_parameters():
print(name, param)
init_uniform = XavierUniformInitializer()
init_uniform(model.linear1.weight)
init_uniform(model.linear2.weight)
init_const = ConstantInitializer(val=10.0)
init_const(model.linear1.bias)
init_const(model.linear2.bias)
init_normal = NormalInitializer(mean=0.0, std=10.0)
init_normal(model.conv.weight)
init_normal(model.conv.bias)
print("\nAfter applying initializers individually:")
for name, param in model.named_parameters():
print(name, param)
model = Net()
applicator = InitializerApplicator(
regexes=[
("linear.*weight", init_uniform),
("linear.*bias", init_const),
("conv.*", init_normal),
]
)
applicator(model)
print("\nAfter applying an applicator:")
for name, param in model.named_parameters():
print(name, param)
| allennlp-guide-master | exercises/part2/building-your-model/model_init.py |
from typing import Dict
import torch
import numpy
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import TextField, LabelField
from allennlp.data.fields.text_field import TextFieldTensors
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.models import Model
| allennlp-guide-master | exercises/part2/building-your-model/setup_model_forward.py |
# Create a toy model that just returns a random distribution over labels
class ToyModel(Model):
def __init__(self, vocab: Vocabulary):
super().__init__(vocab)
def forward(
self, tokens: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
# Simply generate random logits and compute a probability distribution
batch_size = tokens["tokens"]["tokens"].size(0)
logits = torch.normal(mean=0.0, std=1.0, size=(batch_size, 2))
probs = torch.softmax(logits, dim=1)
return {"logits": logits, "probs": probs}
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
# Take the logits from the forward pass, and compute the label
# IDs for maximum values
logits = output_dict["logits"].cpu().data.numpy()
predicted_id: numpy.ndarray = numpy.argmax(logits, axis=-1) # type: ignore
# Convert these IDs back to label strings using vocab
output_dict["label"] = [ # type: ignore
self.vocab.get_token_from_index(x, namespace="labels") for x in predicted_id
]
return output_dict
# Create fields and instances
token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer(namespace="tokens")
}
text_field_pos = TextField(
[Token("The"), Token("best"), Token("movie"), Token("ever"), Token("!")],
token_indexers=token_indexers,
)
text_field_neg = TextField(
[Token("Such"), Token("an"), Token("awful"), Token("movie"), Token(".")],
token_indexers=token_indexers,
)
label_field_pos = LabelField("pos", label_namespace="labels")
label_field_neg = LabelField("neg", label_namespace="labels")
instance_pos = Instance({"tokens": text_field_pos, "label": label_field_pos})
instance_neg = Instance({"tokens": text_field_neg, "label": label_field_neg})
instances = [instance_pos, instance_neg]
# Create a Vocabulary
vocab = Vocabulary.from_instances(instances)
# Create an iterator that creates batches of size 2
data_loader = SimpleDataLoader(instances, 2, vocab=vocab)
model = ToyModel(vocab)
# Run forward pass on an instance. This will invoke forward() then decode()
print(model.forward_on_instance(instance_pos))
# Notice that the return value is one dictionary per instance,
# even though everything in forward() and decode() is batched
print(model.forward_on_instances([instance_pos, instance_neg]))
| allennlp-guide-master | exercises/part2/building-your-model/model_prediction.py |
import json
from allennlp.common import FromParams, Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
class Gaussian(FromParams):
def __init__(self, mean: float, variance: float):
self.mean = mean
self.variance = variance
class ModelWithGaussian(FromParams):
def __init__(self, vocab: Vocabulary, gaussian: Gaussian):
self.vocab = vocab
self.gaussian = gaussian
print(f"ModelWithGaussian got vocab: {vocab}")
param_str = """{"gaussian": {"mean": 0.0, "variance": 1.0}}"""
params = Params(json.loads(param_str))
try:
model = ModelWithGaussian.from_params(params)
except ConfigurationError:
print("Caught ConfigurationError")
vocab = Vocabulary()
model = ModelWithGaussian.from_params(params=params, vocab=vocab)
| allennlp-guide-master | exercises/part2/using-config-files/extras_basic.py |
import json
from typing import List
from allennlp.common import Registrable, Params
class Count(Registrable):
def __init__(self, count: int):
self.count = count
@classmethod
def from_list_of_ints(cls, int_list: List[int]):
return cls(len(int_list))
@classmethod
def from_list_of_strings(cls, str_list: List[str]):
return cls(len(str_list))
@classmethod
def from_string_length(cls, string: str):
return cls(len(string))
# We can't use the @Count.register() decorator before the Count class is defined,
# so we have to manually call the decorator here, below. If we were using a
# subclass of Count, we could have just used the @Count.register() decorator
# multiple times.
Count.register("default")(Count)
Count.register("from_list_of_ints", constructor="from_list_of_ints")(Count)
Count.register("from_list_of_strings", constructor="from_list_of_strings")(Count)
Count.register("from_string_length", constructor="from_string_length")(Count)
Count.default_implementation = "default"
param_str = """{"count": 23}"""
count = Count.from_params(Params(json.loads(param_str)))
print(f"Count 1: {count.count}")
param_str = """{"type": "from_list_of_ints", "int_list": [1, 2, 3]}"""
count = Count.from_params(Params(json.loads(param_str)))
print(f"Count 2: {count.count}")
param_str = """{"type": "from_list_of_strings", "str_list": ["a", "list"]}"""
count = Count.from_params(Params(json.loads(param_str)))
print(f"Count 3: {count.count}")
param_str = """{"type": "from_string_length", "string": "this is a string"}"""
count = Count.from_params(Params(json.loads(param_str)))
print(f"Count 4: {count.count}")
| allennlp-guide-master | exercises/part2/using-config-files/multiple_constructors.py |
import json
from allennlp.common import FromParams, Params, Registrable, Lazy
from allennlp.data import Vocabulary
class Gaussian(FromParams):
def __init__(self, vocab: Vocabulary, mean: float, variance: float):
self.vocab = vocab
self.mean = mean
self.variance = variance
print(f"Gaussian got vocab with object id: {id(vocab)}")
class ModelWithGaussian(Registrable):
def __init__(self, vocab: Vocabulary, gaussian: Gaussian):
self.vocab = vocab
self.gaussian = gaussian
@classmethod
def from_lazy_objects(cls, gaussian: Lazy[Gaussian]) -> "ModelWithGaussian":
# Pretend that we needed to do some non-trivial processing / reading from
# disk in order to construct this object.
vocab = Vocabulary()
gaussian_ = gaussian.construct(vocab=vocab)
return cls(vocab=vocab, gaussian=gaussian_)
# In order to use a constructor other than __init__, we need to inherit from
# Registrable, not just FromParams, and register the class with the separate
# constructor. And because we're registering the Registrable class itself, we
# can't do this as a decorator, like we typically do.
ModelWithGaussian.register("default", constructor="from_lazy_objects")(
ModelWithGaussian
)
ModelWithGaussian.default_implementation = "default"
param_str = """{"gaussian": {"mean": 0.0, "variance": 1.0}}"""
params = Params(json.loads(param_str))
model = ModelWithGaussian.from_params(params=params)
print("Mean:", model.gaussian.mean)
print("Variance:", model.gaussian.variance)
| allennlp-guide-master | exercises/part2/using-config-files/lazy_good.py |
import json
from allennlp.common import FromParams, Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
class Gaussian(FromParams):
def __init__(self, vocab: Vocabulary, mean: float, variance: float):
self.vocab = vocab
self.mean = mean
self.variance = variance
print(f"Gaussian got vocab with object id: {id(vocab)}")
class ModelWithGaussian(FromParams):
def __init__(self, vocab: Vocabulary, gaussian: Gaussian):
self.vocab = vocab
self.gaussian = gaussian
print(f"ModelWithGaussian got vocab with object id: {id(vocab)}")
param_str = """{"gaussian": {"mean": 0.0, "variance": 1.0}}"""
params = Params(json.loads(param_str))
try:
model = ModelWithGaussian.from_params(params)
except ConfigurationError:
print("Caught ConfigurationError")
vocab = Vocabulary()
# Even though we're only passing `vocab=vocab` at the top level, the vocab object
# is available recursively to any objects that are constructed inside this call,
# including the Gaussian object.
model = ModelWithGaussian.from_params(params=params, vocab=vocab)
| allennlp-guide-master | exercises/part2/using-config-files/extras_recursive.py |
import json
from allennlp.common import FromParams, Params
class BaseGaussian(FromParams):
def __init__(self, mean: float, variance: float):
self.mean = mean
self.variance = variance
class MyGaussian(BaseGaussian):
def __init__(self, name: str, **kwargs):
super().__init__(**kwargs)
self.name = name
param_str = """{"mean": 0.0, "variance": 1.0, "name": "My Gaussian"}"""
params = Params(json.loads(param_str))
gaussian = MyGaussian.from_params(params)
print(f"Mean: {gaussian.mean}")
print(f"Variance: {gaussian.variance}")
print(f"Name: {gaussian.name}")
| allennlp-guide-master | exercises/part2/using-config-files/kwargs.py |
import json
from allennlp.common import FromParams, Params, Lazy
from allennlp.data import Vocabulary
class Gaussian(FromParams):
def __init__(self, vocab: Vocabulary, mean: float, variance: float):
self.vocab = vocab
self.mean = mean
self.variance = variance
print(f"Gaussian got vocab with object id: {id(vocab)}")
class ModelWithGaussian(FromParams):
def __init__(self, gaussian: Lazy[Gaussian]):
# Pretend that we needed to do some non-trivial processing / reading from
# disk in order to construct this object.
self.vocab = Vocabulary()
self.gaussian = gaussian.construct(vocab=self.vocab)
param_str = """{"gaussian": {"mean": 0.0, "variance": 1.0}}"""
params = Params(json.loads(param_str))
model = ModelWithGaussian.from_params(params=params)
print("Mean:", model.gaussian.mean)
print("Variance:", model.gaussian.variance)
| allennlp-guide-master | exercises/part2/using-config-files/lazy_bad.py |
from typing import Dict, Iterable, List
import torch
from allennlp.data import DatasetReader, Instance, Vocabulary, TextFieldTensors
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
# There's a warning when you call `forward_on_instances` that you don't need
# to worry about right now, so we silence it.
import warnings
warnings.filterwarnings("ignore")
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
label_field = LabelField(sentiment)
yield Instance({"text": text_field, "label": label_field})
| allennlp-guide-master | exercises/part1/training-and-prediction/model_setup.py |
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence)
# We've copied the training loop from an earlier example, with updated model
# code, above in the Setup section. We run the training loop to get a trained
# model.
model, dataset_reader = run_training_loop()
vocab = model.vocab
predictor = SentenceClassifierPredictor(model, dataset_reader)
output = predictor.predict("A good movie!")
print(
[
(vocab.get_token_from_index(label_id, "labels"), prob)
for label_id, prob in enumerate(output["probs"])
]
)
output = predictor.predict("This was a monstrous waste of time.")
print(
[
(vocab.get_token_from_index(label_id, "labels"), prob)
for label_id, prob in enumerate(output["probs"])
]
)
| allennlp-guide-master | exercises/part1/training-and-prediction/prediction_source.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.nn import util
from allennlp.training import Trainer, GradientDescentTrainer
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.util import evaluate
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
label_field = LabelField(sentiment)
yield Instance({"text": text_field, "label": label_field})
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(
self, text: TextFieldTensors, label: torch.Tensor
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
# Shape: (1,)
loss = torch.nn.functional.cross_entropy(logits, label)
self.accuracy(logits, label)
output = {"loss": loss, "probs": probs}
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def read_data(reader: DatasetReader) -> Tuple[List[Instance], List[Instance]]:
print("Reading data")
training_data = list(reader.read("quick_start/data/movie_review/train.tsv"))
validation_data = list(reader.read("quick_start/data/movie_review/dev.tsv"))
return training_data, validation_data
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)}
)
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
def build_data_loaders(
train_data: List[Instance],
dev_data: List[Instance],
) -> Tuple[DataLoader, DataLoader]:
train_loader = SimpleDataLoader(train_data, 8, shuffle=True)
dev_loader = SimpleDataLoader(dev_data, 8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader,
) -> Trainer:
parameters = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
optimizer = AdamOptimizer(parameters) # type: ignore
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
)
return trainer
def run_training_loop():
dataset_reader = build_dataset_reader()
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
train_loader.index_with(vocab)
dev_loader.index_with(vocab)
# You obviously won't want to create a temporary file for your training
# results, but for execution in binder for this guide, we need to do this.
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = build_trainer(model, serialization_dir, train_loader, dev_loader)
trainer.train()
return model, dataset_reader
| allennlp-guide-master | exercises/part1/training-and-prediction/evaluation_setup.py |
from typing import Dict, Iterable, List
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import Field, LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
| allennlp-guide-master | exercises/part1/training-and-prediction/dataset_reader_setup.py |
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
label_field = LabelField(sentiment)
fields: Dict[str, Field] = {"text": text_field, "label": label_field}
yield Instance(fields)
dataset_reader = ClassificationTsvReader(max_tokens=64)
instances = list(dataset_reader.read("quick_start/data/movie_review/train.tsv"))
for instance in instances[:10]:
print(instance)
| allennlp-guide-master | exercises/part1/training-and-prediction/dataset_reader_source.py |
config = {
"dataset_reader": {
"type": "classification-tsv",
"token_indexers": {"tokens": {"type": "single_id"}},
},
"train_data_path": "quick_start/data/movie_review/train.tsv",
"validation_data_path": "quick_start/data/movie_review/dev.tsv",
"model": {
"type": "simple_classifier",
"embedder": {
"token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 10}}
},
"encoder": {"type": "bag_of_embeddings", "embedding_dim": 10},
},
"data_loader": {"batch_size": 8, "shuffle": True},
"trainer": {"optimizer": "adam", "num_epochs": 5},
}
with tempfile.TemporaryDirectory() as serialization_dir:
config_filename = serialization_dir + "/training_config.json"
with open(config_filename, "w") as config_file:
json.dump(config, config_file)
from allennlp.commands.train import train_model_from_file
# Instead of this python code, you would typically just call
# allennlp train [config_file] -s [serialization_dir]
train_model_from_file(
config_filename, serialization_dir, file_friendly_logging=True, force=True
)
| allennlp-guide-master | exercises/part1/training-and-prediction/config_source.py |
def run_training_loop():
dataset_reader = build_dataset_reader()
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
train_loader.index_with(vocab)
dev_loader.index_with(vocab)
# You obviously won't want to create a temporary file for your training
# results, but for execution in binder for this guide, we need to do this.
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = build_trainer(model, serialization_dir, train_loader, dev_loader)
print("Starting training")
trainer.train()
print("Finished training")
return model, dataset_reader
def build_data_loaders(
train_data: List[Instance],
dev_data: List[Instance],
) -> Tuple[DataLoader, DataLoader]:
train_loader = SimpleDataLoader(train_data, 8, shuffle=True)
dev_loader = SimpleDataLoader(dev_data, 8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader,
) -> Trainer:
parameters = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
optimizer = AdamOptimizer(parameters) # type: ignore
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
)
return trainer
run_training_loop()
| allennlp-guide-master | exercises/part1/training-and-prediction/training_source.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
from allennlp.common.util import JsonDict
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.nn import util
from allennlp.predictors import Predictor
from allennlp.training import Trainer, GradientDescentTrainer
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.util import evaluate
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, text: str, label: str = None) -> Instance:
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields = {"text": text_field}
if label:
fields["label"] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
yield self.text_to_instance(text, sentiment)
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(
self, text: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
output = {"probs": probs}
if label is not None:
self.accuracy(logits, label)
# Shape: (1,)
output["loss"] = torch.nn.functional.cross_entropy(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def read_data(reader: DatasetReader) -> Tuple[List[Instance], List[Instance]]:
print("Reading data")
training_data = list(reader.read("quick_start/data/movie_review/train.tsv"))
validation_data = list(reader.read("quick_start/data/movie_review/dev.tsv"))
return training_data, validation_data
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)}
)
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
def build_data_loaders(
train_data: List[Instance],
dev_data: List[Instance],
) -> Tuple[DataLoader, DataLoader]:
train_loader = SimpleDataLoader(train_data, 8, shuffle=True)
dev_loader = SimpleDataLoader(dev_data, 8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader,
) -> Trainer:
parameters = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
optimizer = AdamOptimizer(parameters) # type: ignore
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
)
return trainer
def run_training_loop():
dataset_reader = build_dataset_reader()
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
train_loader.index_with(vocab)
dev_loader.index_with(vocab)
# You obviously won't want to create a temporary file for your training
# results, but for execution in binder for this guide, we need to do this.
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = build_trainer(model, serialization_dir, train_loader, dev_loader)
trainer.train()
return model, dataset_reader
| allennlp-guide-master | exercises/part1/training-and-prediction/prediction_setup.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import allennlp
import torch
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.nn import util
from allennlp.training.trainer import Trainer
from allennlp.training.gradient_descent_trainer import GradientDescentTrainer
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.metrics import CategoricalAccuracy
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
label_field = LabelField(sentiment)
yield Instance({"text": text_field, "label": label_field})
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
def forward(
self, text: TextFieldTensors, label: torch.Tensor
) -> Dict[str, torch.Tensor]:
print("In model.forward(); printing here just because binder is so slow")
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits, dim=-1)
# Shape: (1,)
loss = torch.nn.functional.cross_entropy(logits, label)
return {"loss": loss, "probs": probs}
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def read_data(reader: DatasetReader) -> Tuple[List[Instance], List[Instance]]:
print("Reading data")
training_data = list(reader.read("quick_start/data/movie_review/train.tsv"))
validation_data = list(reader.read("quick_start/data/movie_review/dev.tsv"))
return training_data, validation_data
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)}
)
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
| allennlp-guide-master | exercises/part1/training-and-prediction/training_setup.py |
# We've copied the training loop from an earlier example, with updated model
# code, above in the Setup section. We run the training loop to get a trained
# model.
model, dataset_reader = run_training_loop()
# Now we can evaluate the model on a new dataset.
test_data = list(dataset_reader.read("quick_start/data/movie_review/test.tsv"))
data_loader = SimpleDataLoader(test_data, batch_size=8)
data_loader.index_with(model.vocab)
results = evaluate(model, data_loader)
print(results)
| allennlp-guide-master | exercises/part1/training-and-prediction/evaluation_source.py |
import tempfile
import json
from typing import Dict, Iterable, List
import torch
from allennlp.data import DatasetReader, Instance, Vocabulary, TextFieldTensors
from allennlp.data.fields import LabelField, TextField, Field
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, tokens: List[Token], label: str = None) -> Instance:
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields: Dict[str, Field] = {"text": text_field}
if label:
fields["label"] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
tokens = self.tokenizer.tokenize(text)
yield self.text_to_instance(tokens, sentiment)
@Model.register("simple_classifier")
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(
self, text: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
print("In model.forward(); printing here just because binder is so slow")
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
# Shape: (1,)
output = {"probs": probs}
if label is not None:
self.accuracy(logits, label)
output["loss"] = torch.nn.functional.cross_entropy(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
| allennlp-guide-master | exercises/part1/training-and-prediction/config_setup.py |
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
def forward(
self, text: TextFieldTensors, label: torch.Tensor
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits, dim=-1)
# Shape: (1,)
loss = torch.nn.functional.cross_entropy(logits, label)
return {"loss": loss, "probs": probs}
def run_training_loop():
dataset_reader = ClassificationTsvReader(max_tokens=64)
print("Reading data")
instances = list(dataset_reader.read("quick_start/data/movie_review/train.tsv"))
vocab = build_vocab(instances)
model = build_model(vocab)
outputs = model.forward_on_instances(instances[:4])
print(outputs)
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)}
)
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
run_training_loop()
| allennlp-guide-master | exercises/part1/training-and-prediction/model_source.py |
import glob
from dataclasses import dataclass
import re
from typing import Optional, Iterator
OUTPUT_DIR = "_exercises_test/"
CODEBLOCK_RE = re.compile(r"<codeblock source=\"([^\"]+)\"( setup=\"([^\"]+)\")?>")
@dataclass
class CodeExercise:
source: str
setup: Optional[str] = None
def find_code_exercises() -> Iterator[CodeExercise]:
# sort to ensure the order is deterministic.
for filepath in sorted(glob.glob("./chapters/**/*.md")):
with open(filepath) as f:
text = f.read()
for (source, _, setup) in CODEBLOCK_RE.findall(text):
yield CodeExercise(
source="exercises/" + source + ".py",
setup="exercises/" + setup + ".py" if setup else None,
)
def main():
for i, code_exercise in enumerate(find_code_exercises()):
test_filename = OUTPUT_DIR + f"exercise{i}.py"
with open(test_filename, "w") as test_file:
if code_exercise.setup is not None:
test_file.write(f"# setup: {code_exercise.setup}\n\n")
with open(code_exercise.setup) as setup_file:
for line in setup_file:
test_file.write(line)
test_file.write("\n\n")
test_file.write(f"# source: {code_exercise.source}\n\n")
with open(code_exercise.source) as source_file:
for line in source_file:
test_file.write(line)
if __name__ == "__main__":
main()
| allennlp-guide-master | scripts/build_exercise_tests.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
from allennlp.common.util import JsonDict
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.nn import util
from allennlp.predictors import Predictor
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.trainer import Trainer
from allennlp.training.gradient_descent_trainer import GradientDescentTrainer
from allennlp.training.util import evaluate
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, text: str, label: str = None) -> Instance:
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields = {"text": text_field}
if label:
fields["label"] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
yield self.text_to_instance(text, sentiment)
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(
self, text: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
output = {"probs": probs}
if label is not None:
self.accuracy(logits, label)
# Shape: (1,)
output["loss"] = torch.nn.functional.cross_entropy(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def read_data(reader: DatasetReader) -> Tuple[List[Instance], List[Instance]]:
print("Reading data")
training_data = list(reader.read("quick_start/data/movie_review/train.tsv"))
validation_data = list(reader.read("quick_start/data/movie_review/dev.tsv"))
return training_data, validation_data
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)}
)
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
def build_data_loaders(
train_data: List[Instance],
dev_data: List[Instance],
) -> Tuple[DataLoader, DataLoader]:
train_loader = SimpleDataLoader(train_data, 8, shuffle=True)
dev_loader = SimpleDataLoader(dev_data, 8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader,
) -> Trainer:
parameters = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
optimizer = AdamOptimizer(parameters) # type: ignore
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
)
return trainer
def run_training_loop():
dataset_reader = build_dataset_reader()
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
train_loader.index_with(vocab)
dev_loader.index_with(vocab)
# You obviously won't want to create a temporary file for your training
# results, but for execution in binder for this guide, we need to do this.
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = build_trainer(model, serialization_dir, train_loader, dev_loader)
trainer.train()
return model, dataset_reader
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence)
# We've copied the training loop from an earlier example, with updated model
# code, above in the Setup section. We run the training loop to get a trained
# model.
model, dataset_reader = run_training_loop()
vocab = model.vocab
predictor = SentenceClassifierPredictor(model, dataset_reader)
output = predictor.predict("A good movie!")
print(
[
(vocab.get_token_from_index(label_id, "labels"), prob)
for label_id, prob in enumerate(output["probs"])
]
)
output = predictor.predict("This was a monstrous waste of time.")
print(
[
(vocab.get_token_from_index(label_id, "labels"), prob)
for label_id, prob in enumerate(output["probs"])
]
)
| allennlp-guide-master | quick_start/predict.py |
allennlp-guide-master | quick_start/__init__.py |
|
import tempfile
from typing import Dict, Iterable, List, Tuple
import allennlp
import torch
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.nn import util
from allennlp.training.trainer import Trainer
from allennlp.training.gradient_descent_trainer import GradientDescentTrainer
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.metrics import CategoricalAccuracy
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
label_field = LabelField(sentiment)
yield Instance({"text": text_field, "label": label_field})
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
def forward(
self, text: TextFieldTensors, label: torch.Tensor
) -> Dict[str, torch.Tensor]:
print("In model.forward(); printing here just because binder is so slow")
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits, dim=-1)
# Shape: (1,)
loss = torch.nn.functional.cross_entropy(logits, label)
return {"loss": loss, "probs": probs}
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def read_data(reader: DatasetReader) -> Tuple[List[Instance], List[Instance]]:
print("Reading data")
training_data = list(reader.read("quick_start/data/movie_review/train.tsv"))
validation_data = list(reader.read("quick_start/data/movie_review/dev.tsv"))
return training_data, validation_data
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)}
)
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
def run_training_loop():
dataset_reader = build_dataset_reader()
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
train_loader.index_with(vocab)
dev_loader.index_with(vocab)
# You obviously won't want to create a temporary file for your training
# results, but for execution in binder for this guide, we need to do this.
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = build_trainer(model, serialization_dir, train_loader, dev_loader)
print("Starting training")
trainer.train()
print("Finished training")
return model, dataset_reader
def build_data_loaders(
train_data: List[Instance],
dev_data: List[Instance],
) -> Tuple[DataLoader, DataLoader]:
train_loader = SimpleDataLoader(train_data, 8, shuffle=True)
dev_loader = SimpleDataLoader(dev_data, 8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader,
) -> Trainer:
parameters = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
optimizer = AdamOptimizer(parameters) # type: ignore
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
cuda_device=-1,
)
return trainer
run_training_loop()
| allennlp-guide-master | quick_start/train.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.trainer import Trainer
from allennlp.training.gradient_descent_trainer import GradientDescentTrainer
from allennlp.training.util import evaluate
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
label_field = LabelField(sentiment)
yield Instance({"text": text_field, "label": label_field})
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(
self, text: TextFieldTensors, label: torch.Tensor
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
# Shape: (1,)
loss = torch.nn.functional.cross_entropy(logits, label)
self.accuracy(logits, label)
output = {"loss": loss, "probs": probs}
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
def build_dataset_reader() -> DatasetReader:
return ClassificationTsvReader()
def read_data(reader: DatasetReader) -> Tuple[List[Instance], List[Instance]]:
print("Reading data")
training_data = list(reader.read("quick_start/data/movie_review/train.tsv"))
validation_data = list(reader.read("quick_start/data/movie_review/dev.tsv"))
return training_data, validation_data
def build_vocab(instances: Iterable[Instance]) -> Vocabulary:
print("Building the vocabulary")
return Vocabulary.from_instances(instances)
def build_model(vocab: Vocabulary) -> Model:
print("Building the model")
vocab_size = vocab.get_vocab_size("tokens")
embedder = BasicTextFieldEmbedder(
{"tokens": Embedding(embedding_dim=10, num_embeddings=vocab_size)}
)
encoder = BagOfEmbeddingsEncoder(embedding_dim=10)
return SimpleClassifier(vocab, embedder, encoder)
def build_data_loaders(
train_data: List[Instance],
dev_data: List[Instance],
) -> Tuple[DataLoader, DataLoader]:
train_loader = SimpleDataLoader(train_data, 8, shuffle=True)
dev_loader = SimpleDataLoader(dev_data, 8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader,
) -> Trainer:
parameters = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
optimizer = AdamOptimizer(parameters) # type: ignore
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
)
return trainer
def run_training_loop():
dataset_reader = build_dataset_reader()
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
train_loader.index_with(vocab)
dev_loader.index_with(vocab)
# You obviously won't want to create a temporary file for your training
# results, but for execution in binder for this guide, we need to do this.
with tempfile.TemporaryDirectory() as serialization_dir:
trainer = build_trainer(model, serialization_dir, train_loader, dev_loader)
trainer.train()
return model, dataset_reader
# We've copied the training loop from an earlier example, with updated model
# code, above in the Setup section. We run the training loop to get a trained
# model.
model, dataset_reader = run_training_loop()
# Now we can evaluate the model on a new dataset.
test_data = list(dataset_reader.read("quick_start/data/movie_review/test.tsv"))
data_loader = SimpleDataLoader(test_data, 8)
data_loader.index_with(model.vocab)
results = evaluate(model, data_loader)
print(results)
| allennlp-guide-master | quick_start/evaluate.py |
from .dataset_readers import *
from .models import *
from .predictors import *
| allennlp-guide-master | quick_start/my_text_classifier/__init__.py |
from .classification_tsv import ClassificationTsvReader
| allennlp-guide-master | quick_start/my_text_classifier/dataset_readers/__init__.py |
from typing import Dict, Iterable, List
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, text: str, label: str = None) -> Instance:
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields = {"text": text_field}
if label:
fields["label"] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
yield self.text_to_instance(text, sentiment)
| allennlp-guide-master | quick_start/my_text_classifier/dataset_readers/classification_tsv.py |
from .sentence_classifier_predictor import SentenceClassifierPredictor
| allennlp-guide-master | quick_start/my_text_classifier/predictors/__init__.py |
from allennlp.common import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors import Predictor
@Predictor.register("sentence_classifier")
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence)
| allennlp-guide-master | quick_start/my_text_classifier/predictors/sentence_classifier_predictor.py |
from .simple_classifier import SimpleClassifier
| allennlp-guide-master | quick_start/my_text_classifier/models/__init__.py |
from typing import Dict
import torch
from allennlp.data import Vocabulary
from allennlp.data import TextFieldTensors
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("simple_classifier")
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward(
self, text: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
# Shape: (1,)
output = {"probs": probs}
if label is not None:
self.accuracy(logits, label)
output["loss"] = torch.nn.functional.cross_entropy(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
| allennlp-guide-master | quick_start/my_text_classifier/models/simple_classifier.py |
from typing import Dict, Optional
from overrides import overrides
import torch
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, Seq2VecEncoder, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, util
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy
"""
shared things:
- "encoded_representations" field in output if "output_hidden_states" is turned on
- "linear_classifier_weight" field with w
- "linear_classifier_bias" field with b
"""
@Model.register("encoder_classifier")
class EncoderClassifier(Model):
"""
This `Model` implements a basic text classifier. After embedding the text into
a text field, we will optionally encode the embeddings with a `Seq2SeqEncoder`. The
resulting sequence is pooled using a `Seq2VecEncoder` and then passed to
a linear classification layer, which projects into the label space. If a
`Seq2SeqEncoder` is not provided, we will pass the embedded text directly to the
`Seq2VecEncoder`.
Registered as a `Model` with name "basic_classifier".
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the input text into a `TextField`
seq2seq_encoder : `Seq2SeqEncoder`, optional (default=`None`)
Optional Seq2Seq encoder layer for the input text.
seq2vec_encoder : `Seq2VecEncoder`
Required Seq2Vec encoder layer. If `seq2seq_encoder` is provided, this encoder
will pool its output. Otherwise, this encoder will operate directly on the output
of the `text_field_embedder`.
feedforward : `FeedForward`, optional, (default = `None`)
An optional feedforward layer to apply after the seq2vec_encoder.
dropout : `float`, optional (default = `None`)
Dropout percentage to use.
num_labels : `int`, optional (default = `None`)
Number of labels to project to in classification layer. By default, the classification layer will
project to the size of the vocabulary namespace corresponding to labels.
label_namespace : `str`, optional (default = `"labels"`)
Vocabulary namespace corresponding to labels. By default, we use the "labels" namespace.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
If provided, will be used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
seq2vec_encoder: Seq2VecEncoder,
output_hidden_states: bool = False,
seq2seq_encoder: Seq2SeqEncoder = None,
feedforward: Optional[FeedForward] = None,
dropout: float = None,
num_labels: int = None,
label_namespace: str = "labels",
namespace: str = "tokens",
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
if seq2seq_encoder:
self._seq2seq_encoder = seq2seq_encoder
else:
self._seq2seq_encoder = None
self._seq2vec_encoder = seq2vec_encoder
self._feedforward = feedforward
if feedforward is not None:
self._classifier_input_dim = self._feedforward.get_output_dim()
else:
self._classifier_input_dim = self._seq2vec_encoder.get_output_dim()
if dropout:
self._dropout = torch.nn.Dropout(dropout)
else:
self._dropout = None
self._label_namespace = label_namespace
self._namespace = namespace
if num_labels:
self._num_labels = num_labels
else:
self._num_labels = vocab.get_vocab_size(namespace=self._label_namespace)
self._classification_layer = torch.nn.Linear(self._classifier_input_dim, self._num_labels)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
self._output_hidden_states = output_hidden_states
def forward( # type: ignore
self, tokens: TextFieldTensors, label: torch.IntTensor = None
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`
From a `TextField`
label : `torch.IntTensor`, optional (default = `None`)
From a `LabelField`
# Returns
An output dictionary consisting of:
- `logits` (`torch.FloatTensor`) :
A tensor of shape `(batch_size, num_labels)` representing
unnormalized log probabilities of the label.
- `probs` (`torch.FloatTensor`) :
A tensor of shape `(batch_size, num_labels)` representing
probabilities of the label.
- `loss` : (`torch.FloatTensor`, optional) :
A scalar loss to be optimised.
"""
embedded_text = self._text_field_embedder(tokens)
mask = get_text_field_mask(tokens)
if self._seq2seq_encoder:
embedded_text = self._seq2seq_encoder(embedded_text, mask=mask)
embedded_text = self._seq2vec_encoder(embedded_text, mask=mask)
if self._dropout:
embedded_text = self._dropout(embedded_text)
if self._feedforward is not None:
embedded_text = self._feedforward(embedded_text)
logits = self._classification_layer(embedded_text)
probs = torch.nn.functional.softmax(logits, dim=-1)
output_dict = {"logits": logits, "probs": probs}
if self._output_hidden_states:
output_dict["encoded_representations"] = embedded_text
output_dict["token_ids"] = util.get_token_ids_from_text_field_tensors(tokens)
if label is not None:
loss = self._loss(logits, label.long().view(-1))
output_dict["loss"] = loss
self._accuracy(logits, label)
return output_dict
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the probabilities, converts index to string label, and
add `"label"` key to the dictionary with the result.
"""
predictions = output_dict["probs"]
if predictions.dim() == 2:
predictions_list = [predictions[i] for i in range(predictions.shape[0])]
else:
predictions_list = [predictions]
classes = []
for prediction in predictions_list:
label_idx = prediction.argmax(dim=-1).item()
label_str = self.vocab.get_index_to_token_vocabulary(self._label_namespace).get(
label_idx, str(label_idx)
)
classes.append(label_str)
output_dict["label"] = classes
tokens = []
for instance_tokens in output_dict["token_ids"]:
tokens.append(
[
self.vocab.get_token_from_index(token_id.item(), namespace=self._namespace)
for token_id in instance_tokens
]
)
output_dict["tokens"] = tokens
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = {"accuracy": self._accuracy.get_metric(reset)}
return metrics
default_predictor = "text_classifier"
| contrastive-explanations-main | allennlp_lib/encoder_classifier.py |
from typing import List, Dict
import numpy
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import LabelField
@Predictor.register("textual_entailment_fixed")
class TextualEntailmentPredictorFixed(Predictor):
"""
Predictor for the [`DecomposableAttention`](../models/decomposable_attention.md) model.
Registered as a `Predictor` with name "textual_entailment".
"""
def predict(self, premise: str, hypothesis: str) -> JsonDict:
"""
Predicts whether the hypothesis is entailed by the premise text.
# Parameters
premise : `str`
A passage representing what is assumed to be true.
hypothesis : `str`
A sentence that may be entailed by the premise.
# Returns
`JsonDict`
A dictionary where the key "label_probs" determines the probabilities of each of
[entailment, contradiction, neutral].
"""
return self.predict_json({"sentence1": premise, "sentence2": hypothesis})
# def predict_json(self, js: JsonDict) -> JsonDict:
# return self.predict_json(js)
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"premise": "...", "hypothesis": "..."}`.
"""
premise_text = json_dict["sentence1"]
hypothesis_text = json_dict["sentence2"]
return self._dataset_reader.text_to_instance(premise_text, hypothesis_text)
@overrides
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = instance.duplicate()
label = numpy.argmax(outputs["probs"])
# Skip indexing, we have integer representations of the strings "entailment", etc.
new_instance.add_field("label", LabelField(int(label), skip_indexing=True))
return [new_instance]
| contrastive-explanations-main | allennlp_lib/nli_predictor.py |
from typing import List, Dict
import numpy
import logging
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import LabelField
logger = logging.getLogger(__name__)
@Predictor.register("bios_masked_predictor")
class BiosIrrelevantPredictor(Predictor):
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"premise": "...", "hypothesis": "..."}`.
"""
# premise_text = json_dict["Sentence1"]
# hypothesis_text = json_dict["Sentence2"]
# logger.info(str(json_dict))
# logger.info(list(enumerate(json_dict['text'].split())))
# logger.info(list(enumerate(json_dict['text_without_gender'].split())))
text = json_dict["text_without_gender"]
text = text.split()
for i in json_dict['gender_tokens']:
text[i] = text[i].replace('_', '<mask>')
text = ' '.join(text)
logger.info(text)
# text = json_dict["text"]
# new_text = text.split()
# for i, w in enumerate(text.split()):
# if i not in json_dict['gender_tokens']:
# new_text[i] = '<mask>'
# text = ' '.join(new_text)
# logger.info(text)
return self._dataset_reader.text_to_instance(text)
@overrides
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = instance.duplicate()
label = numpy.argmax(outputs["probs"])
# Skip indexing, we have integer representations of the strings "entailment", etc.
new_instance.add_field("label", LabelField(int(label), skip_indexing=True))
return [new_instance]
| contrastive-explanations-main | allennlp_lib/bios_masked_predictor.py |
from copy import deepcopy
from typing import List, Dict
from overrides import overrides
import numpy
import json
from nltk.tree import Tree
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import LabelField
@Predictor.register("jsonl_predictor")
class TextClassifierPredictor(Predictor):
"""
Predictor for any model that takes in a sentence and returns
a single class for it. In particular, it can be used with
the :class:`~allennlp.models.basic_classifier.BasicClassifier` model
"""
def predict(self, sentence: str) -> JsonDict:
return self.predict_json(json.loads(sentence))
@overrides
def load_line(self, line: str) -> JsonDict:
"""
If your inputs are not in JSON-lines format (e.g. you have a CSV)
you can override this function to parse them correctly.
"""
return json.loads(line)
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like ``{"sentence": "..."}``.
Runs the underlying model, and adds the ``"label"`` to the output.
"""
sentence = json_dict["text"]
return self._dataset_reader.text_to_instance(sentence)
@overrides
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = deepcopy(instance)
label = numpy.argmax(outputs["probs"])
label_vocab = self.model.vocab.get_token_to_index_vocabulary("labels")
label_vocab = {v: k for k, v in label_vocab.items()}
new_instance.add_field("label", LabelField(label_vocab[label]))
return [new_instance]
| contrastive-explanations-main | allennlp_lib/jsonl_predictor.py |
import itertools
from typing import Dict, Optional
import json
import logging
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, LabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer, PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("mnli")
class MnliReader(DatasetReader):
"""
Reads a file from the Stanford Natural Language Inference (SNLI) dataset. This data is
formatted as jsonl, one json-formatted instance per line. The keys in the data are
"gold_label", "sentence1", and "sentence2". We convert these keys into fields named "label",
"premise" and "hypothesis", along with a metadata field containing the tokenized strings of the
premise and hypothesis.
Registered as a `DatasetReader` with name "snli".
# Parameters
tokenizer : `Tokenizer`, optional (default=`SpacyTokenizer()`)
We use this `Tokenizer` for both the premise and the hypothesis. See :class:`Tokenizer`.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`.
combine_input_fields : `bool`, optional
(default=`isinstance(tokenizer, PretrainedTransformerTokenizer)`)
If False, represent the premise and the hypothesis as separate fields in the instance.
If True, tokenize them together using `tokenizer.tokenize_sentence_pair()`
and provide a single `tokens` field in the instance.
"""
def __init__(
self,
tokenizer: Optional[Tokenizer] = None,
token_indexers: Dict[str, TokenIndexer] = None,
combine_input_fields: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(manual_distributed_sharding=True, **kwargs)
self._tokenizer = tokenizer or SpacyTokenizer()
if isinstance(self._tokenizer, PretrainedTransformerTokenizer):
assert not self._tokenizer._add_special_tokens
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
if combine_input_fields is not None:
self._combine_input_fields = combine_input_fields
else:
self._combine_input_fields = isinstance(self._tokenizer, PretrainedTransformerTokenizer)
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
import torch.distributed as dist
from allennlp.common.util import is_distributed
if is_distributed():
start_index = dist.get_rank()
step_size = dist.get_world_size()
logger.info(
"Reading SNLI instances %% %d from jsonl dataset at: %s", step_size, file_path
)
else:
start_index = 0
step_size = 1
logger.info("Reading SNLI instances from jsonl dataset at: %s", file_path)
with open(file_path, "r") as snli_file:
example_iter = (json.loads(line) for line in snli_file)
filtered_example_iter = (
example for example in example_iter if example["gold_label"] != "-"
)
for example in itertools.islice(filtered_example_iter, start_index, None, step_size):
label = example["gold_label"]
premise = example["sentence1"]
hypothesis = example["sentence2"]
yield self.text_to_instance(premise, hypothesis, label)
@overrides
def text_to_instance(
self, # type: ignore
premise: str,
hypothesis: str,
label: str = None,
) -> Instance:
fields: Dict[str, Field] = {}
premise = self._tokenizer.tokenize(premise)
hypothesis = self._tokenizer.tokenize(hypothesis)
if self._combine_input_fields:
tokens = self._tokenizer.add_special_tokens(premise, hypothesis)
fields["tokens"] = TextField(tokens, self._token_indexers)
else:
premise_tokens = self._tokenizer.add_special_tokens(premise)
hypothesis_tokens = self._tokenizer.add_special_tokens(hypothesis)
fields["premise"] = TextField(premise_tokens, self._token_indexers)
fields["hypothesis"] = TextField(hypothesis_tokens, self._token_indexers)
metadata = {
"premise_tokens": [x.text for x in premise_tokens],
"hypothesis_tokens": [x.text for x in hypothesis_tokens],
}
fields["metadata"] = MetadataField(metadata)
if label:
fields["label"] = LabelField(label)
return Instance(fields)
| contrastive-explanations-main | allennlp_lib/mnli.py |
if __name__ == '__main__':
import argparse
from os import listdir
import os
import json
import re
from os.path import isfile, join
import numpy as np
import json
from allennlp.common.util import import_module_and_submodules as import_submodules
from allennlp.models.archival import load_archive
parser = argparse.ArgumentParser()
parser.add_argument('--model-path', action='store')
args = parser.parse_args()
import_submodules("allennlp_lib")
model_path = args.model_path
archive = load_archive(model_path + '/model.tar.gz')
model = archive.model
weight = model._classification_layer.weight.detach().numpy()
bias = model._classification_layer.bias.detach().numpy()
# weight = model.model.classifier.out_proj.weight.detach()
# bias = model.model.classifier.out_proj.bias.detach()
# label_vocab = model.vocab.get_token_to_index_vocabulary("labels")
label_vocab = model.vocab.get_token_to_index_vocabulary("labels")
np.save(model_path + "/w", weight)
np.save(model_path + "/b", bias)
with open(model_path + "/label2index.json", "w") as f:
json.dump(label_vocab, f)
| contrastive-explanations-main | scripts/cache_linear_classifier.py |
if __name__ == '__main__':
import argparse
from os import listdir
import os
from nltk.tree import Tree
import json
import re
from os.path import isfile, join
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-path', action='store')
parser.add_argument('-o', '--output-path', action='store')
parser.add_argument('-m', '--model-path', action='store')
args = parser.parse_args()
if not os.path.exists(os.path.dirname(args.output_path)):
os.makedirs(os.path.dirname(args.output_path))
with open(args.model_path + "/label2index.json", "r") as f:
label_dict = json.load(f)
instance = None
label = None
# cls_vectors = []
encoded_representations = []
labels = []
preds = []
with open(args.input_path, 'r') as f:
for line in f:
line = line.strip()
if not line:
assert instance is not None and label is not None
# cls_vectors.append(label['hidden_layer12_cls'])
encoded_representations.append(label['encoded_representations'])
if 'label' in instance:
labels.append(label_dict[instance['label']])
preds.append(label_dict[label['label']])
print(len(preds))
instance = None
label = None
if line.startswith('input'):
line = line[line.index(':')+1:].strip()
instance = json.loads(line)
elif line.startswith('prediction'):
line = line[line.index(':')+1:].strip()
label = json.loads(line)
split_name = os.path.basename(args.input_path).split('.')[0]
# np.save(args.output_path + f"/{split_name}_cls", np.array(cls_vectors))
np.save(args.output_path + f"/{split_name}_encoded_representations", np.array(encoded_representations))
if labels:
np.save(args.output_path + f"/{split_name}_labels", np.array(labels))
np.save(args.output_path + f"/{split_name}_predictions", np.array(preds))
| contrastive-explanations-main | scripts/cache_encodings.py |
if __name__ == '__main__':
import argparse
import json
import numpy as np
import json
import os
import json
import pandas as pd
import spacy
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', action='store')
parser.add_argument('--concept-path', action='store')
args = parser.parse_args()
if not os.path.exists(os.path.dirname(args.concept_path)):
os.makedirs(os.path.dirname(args.concept_path))
with open(args.data_path) as f:
data = [json.loads(line) for line in f if line.strip() if line.strip()]
pretok = lambda sen: sen[:-1] if sen[-1] == "." else sen
preproc = lambda sen: set([str(w) for w in tokenizer(pretok(sen).lower())])
preproc_sw = lambda sen: set([str(w) for w in sen if nlp.vocab[w].is_stop == False])
def is_overlap(p, h):
prem_tokens = preproc_sw(preproc(p))
hyp_tokens = preproc_sw(preproc(h))
overlap = hyp_tokens.intersection(prem_tokens)
if len(prem_tokens) == 0 and len(hyp_tokens) == 0:
return 1, True, True
frac = len(overlap) / len(hyp_tokens) if hyp_tokens else len(overlap) / len(prem_tokens)
return frac, len(overlap) == len(hyp_tokens), len(overlap) == len(prem_tokens)
for i, ex in enumerate(data):
frac, ovh, ovp = is_overlap(ex['sentence1'], ex['sentence2'])
data[i]['overlap_full_h'] = ovh
concept = np.array([1 if e['overlap_full_h'] else 0 for e in data], dtype=int)
np.save(args.concept_path, concept)
| contrastive-explanations-main | scripts/mnli_concepts.py |
import json
import csv
import os
import pickle
for split in ["train", "dev", "test"]:
path1 = f"data/bios/{split}.pickle"
path2 = f"data/bios/{split}.jsonl"
def find_idx(d: dict):
with_gender, without_gender = d["hard_text"], d["text_without_gender"]
masked_gender = []
with_gender_lst, without_gender_lst = with_gender.split(" "), without_gender.split(" ")
gender_idx = [i for i, (w_g, w_ng) in enumerate(zip(with_gender_lst, without_gender_lst)) if
"_" in w_ng and "_" not in w_g]
return gender_idx
if not os.path.exists(os.path.dirname(path2)):
os.makedirs(os.path.dirname(path2))
with open(path1, "rb") as f:
data = pickle.load(f)
with open(path2, 'w') as fo:
for line in data:
if not line:
continue
if line['p'] == 'model':
continue
indices = find_idx(line)
# if 'http' not in line['text']:
# continue
# print(line['text'])
# print(line['text_without_gender'])
# for i, w in enumerate(line['text_without_gender'].split()):
# if i in indices:
# print(w)
ex = {}
ex['text'] = line['hard_text']
ex['original_text'] = line['text']
ex['text_without_gender'] = line['text_without_gender']
ex['label'] = line['p']
ex['gender'] = line['g']
ex['start'] = line['start']
ex['gender_tokens'] = indices
fo.write(json.dumps(ex) + "\n")
| contrastive-explanations-main | scripts/bios_pickle_to_jsonl.py |
if __name__ == '__main__':
import argparse
import json
import numpy as np
import json
import os
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', action='store')
parser.add_argument('--concept-path', action='store')
args = parser.parse_args()
if not os.path.exists(os.path.dirname(args.concept_path)):
os.makedirs(os.path.dirname(args.concept_path))
index2label = {0: 'professor',
1: 'physician',
2: 'attorney',
3: 'photographer',
4: 'journalist',
5: 'psychologist',
6: 'nurse',
7: 'teacher',
8: 'dentist',
9: 'surgeon',
10: 'architect',
11: 'painter',
12: 'filmmaker',
13: 'software_engineer',
14: 'poet',
15: 'accountant',
16: 'composer',
17: 'dietitian',
18: 'pastor',
19: 'chiropractor',
20: 'comedian',
21: 'paralegal',
22: 'interior_designer',
23: 'yoga_teacher',
24: 'dj',
25: 'personal_trainer',
26: 'rapper'}
label2index = {index2label[k]: k for k in index2label}
with open(args.data_path) as f:
data = [json.loads(line) for line in f if line.strip() if line.strip()]
concept = np.array([ex['gender'] == 'm' for ex in data], dtype=int)
np.save(args.concept_path, concept)
| contrastive-explanations-main | scripts/bios_concepts.py |
'''
ELMo usage example to write biLM embeddings for an entire dataset to
a file.
'''
import os
import h5py
from bilm import dump_bilm_embeddings
# Our small dataset.
raw_context = [
'Pretrained biLMs compute representations useful for NLP tasks .',
'They give state of the art performance for many tasks .'
]
tokenized_context = [sentence.split() for sentence in raw_context]
tokenized_question = [
['What', 'are', 'biLMs', 'useful', 'for', '?'],
]
# Create the dataset file.
dataset_file = 'dataset_file.txt'
with open(dataset_file, 'w') as fout:
for sentence in tokenized_context + tokenized_question:
fout.write(' '.join(sentence) + '\n')
# Location of pretrained LM. Here we use the test fixtures.
datadir = os.path.join('tests', 'fixtures', 'model')
vocab_file = os.path.join(datadir, 'vocab_test.txt')
options_file = os.path.join(datadir, 'options.json')
weight_file = os.path.join(datadir, 'lm_weights.hdf5')
# Dump the embeddings to a file. Run this once for your dataset.
embedding_file = 'elmo_embeddings.hdf5'
dump_bilm_embeddings(
vocab_file, dataset_file, options_file, weight_file, embedding_file
)
# Load the embeddings from the file -- here the 2nd sentence.
with h5py.File(embedding_file, 'r') as fin:
second_sentence_embeddings = fin['1'][...]
| bilm-tf-master | usage_cached.py |
#!/usr/bin/python
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='bilm',
version='0.1.post5',
url='http://github.com/allenai/bilm-tf',
packages=setuptools.find_packages(),
tests_require=[],
zip_safe=False,
entry_points='',
description='Tensorflow implementation of contextualized word representations from bi-directional language models',
long_description=long_description,
long_description_content_type="text/markdown",
license='Apache License 2.0',
python_requires='>=3.5',
install_requires=[
'h5py',
],
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
],
keywords='bilm elmo nlp embedding',
author='Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, Luke Zettlemoyer',
author_email='[email protected]',
maintainer='Matthew Peters',
)
| bilm-tf-master | setup.py |
'''
ELMo usage example with character inputs.
Below, we show usage for SQuAD where each input example consists of both
a question and a paragraph of context.
'''
import tensorflow as tf
import os
from bilm import Batcher, BidirectionalLanguageModel, weight_layers
# Location of pretrained LM. Here we use the test fixtures.
datadir = os.path.join('tests', 'fixtures', 'model')
vocab_file = os.path.join(datadir, 'vocab_test.txt')
options_file = os.path.join(datadir, 'options.json')
weight_file = os.path.join(datadir, 'lm_weights.hdf5')
# Create a Batcher to map text to character ids.
batcher = Batcher(vocab_file, 50)
# Input placeholders to the biLM.
context_character_ids = tf.placeholder('int32', shape=(None, None, 50))
question_character_ids = tf.placeholder('int32', shape=(None, None, 50))
# Build the biLM graph.
bilm = BidirectionalLanguageModel(options_file, weight_file)
# Get ops to compute the LM embeddings.
context_embeddings_op = bilm(context_character_ids)
question_embeddings_op = bilm(question_character_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
# Our SQuAD model includes ELMo at both the input and output layers
# of the task GRU, so we need 4x ELMo representations for the question
# and context at each of the input and output.
# We use the same ELMo weights for both the question and context
# at each of the input and output.
elmo_context_input = weight_layers('input', context_embeddings_op, l2_coef=0.0)
with tf.variable_scope('', reuse=True):
# the reuse=True scope reuses weights from the context for the question
elmo_question_input = weight_layers(
'input', question_embeddings_op, l2_coef=0.0
)
elmo_context_output = weight_layers(
'output', context_embeddings_op, l2_coef=0.0
)
with tf.variable_scope('', reuse=True):
# the reuse=True scope reuses weights from the context for the question
elmo_question_output = weight_layers(
'output', question_embeddings_op, l2_coef=0.0
)
# Now we can compute embeddings.
raw_context = [
'Pretrained biLMs compute representations useful for NLP tasks .',
'They give state of the art performance for many tasks .'
]
tokenized_context = [sentence.split() for sentence in raw_context]
tokenized_question = [
['What', 'are', 'biLMs', 'useful', 'for', '?'],
]
with tf.Session() as sess:
# It is necessary to initialize variables once before running inference.
sess.run(tf.global_variables_initializer())
# Create batches of data.
context_ids = batcher.batch_sentences(tokenized_context)
question_ids = batcher.batch_sentences(tokenized_question)
# Compute ELMo representations (here for the input only, for simplicity).
elmo_context_input_, elmo_question_input_ = sess.run(
[elmo_context_input['weighted_op'], elmo_question_input['weighted_op']],
feed_dict={context_character_ids: context_ids,
question_character_ids: question_ids}
)
| bilm-tf-master | usage_character.py |
'''
ELMo usage example with pre-computed and cached context independent
token representations
Below, we show usage for SQuAD where each input example consists of both
a question and a paragraph of context.
'''
import tensorflow as tf
import os
from bilm import TokenBatcher, BidirectionalLanguageModel, weight_layers, \
dump_token_embeddings
# Our small dataset.
raw_context = [
'Pretrained biLMs compute representations useful for NLP tasks .',
'They give state of the art performance for many tasks .'
]
tokenized_context = [sentence.split() for sentence in raw_context]
tokenized_question = [
['What', 'are', 'biLMs', 'useful', 'for', '?'],
]
# Create the vocabulary file with all unique tokens and
# the special <S>, </S> tokens (case sensitive).
all_tokens = set(['<S>', '</S>'] + tokenized_question[0])
for context_sentence in tokenized_context:
for token in context_sentence:
all_tokens.add(token)
vocab_file = 'vocab_small.txt'
with open(vocab_file, 'w') as fout:
fout.write('\n'.join(all_tokens))
# Location of pretrained LM. Here we use the test fixtures.
datadir = os.path.join('tests', 'fixtures', 'model')
options_file = os.path.join(datadir, 'options.json')
weight_file = os.path.join(datadir, 'lm_weights.hdf5')
# Dump the token embeddings to a file. Run this once for your dataset.
token_embedding_file = 'elmo_token_embeddings.hdf5'
dump_token_embeddings(
vocab_file, options_file, weight_file, token_embedding_file
)
tf.reset_default_graph()
## Now we can do inference.
# Create a TokenBatcher to map text to token ids.
batcher = TokenBatcher(vocab_file)
# Input placeholders to the biLM.
context_token_ids = tf.placeholder('int32', shape=(None, None))
question_token_ids = tf.placeholder('int32', shape=(None, None))
# Build the biLM graph.
bilm = BidirectionalLanguageModel(
options_file,
weight_file,
use_character_inputs=False,
embedding_weight_file=token_embedding_file
)
# Get ops to compute the LM embeddings.
context_embeddings_op = bilm(context_token_ids)
question_embeddings_op = bilm(question_token_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
# Our SQuAD model includes ELMo at both the input and output layers
# of the task GRU, so we need 4x ELMo representations for the question
# and context at each of the input and output.
# We use the same ELMo weights for both the question and context
# at each of the input and output.
elmo_context_input = weight_layers('input', context_embeddings_op, l2_coef=0.0)
with tf.variable_scope('', reuse=True):
# the reuse=True scope reuses weights from the context for the question
elmo_question_input = weight_layers(
'input', question_embeddings_op, l2_coef=0.0
)
elmo_context_output = weight_layers(
'output', context_embeddings_op, l2_coef=0.0
)
with tf.variable_scope('', reuse=True):
# the reuse=True scope reuses weights from the context for the question
elmo_question_output = weight_layers(
'output', question_embeddings_op, l2_coef=0.0
)
with tf.Session() as sess:
# It is necessary to initialize variables once before running inference.
sess.run(tf.global_variables_initializer())
# Create batches of data.
context_ids = batcher.batch_sentences(tokenized_context)
question_ids = batcher.batch_sentences(tokenized_question)
# Compute ELMo representations (here for the input only, for simplicity).
elmo_context_input_, elmo_question_input_ = sess.run(
[elmo_context_input['weighted_op'], elmo_question_input['weighted_op']],
feed_dict={context_token_ids: context_ids,
question_token_ids: question_ids}
)
| bilm-tf-master | usage_token.py |
import argparse
import numpy as np
from bilm.training import train, load_options_latest_checkpoint, load_vocab
from bilm.data import BidirectionalLMDataset
def main(args):
# load the vocab
vocab = load_vocab(args.vocab_file, 50)
# define the options
batch_size = 128 # batch size for each GPU
n_gpus = 3
# number of tokens in training data (this for 1B Word Benchmark)
n_train_tokens = 768648884
options = {
'bidirectional': True,
'char_cnn': {'activation': 'relu',
'embedding': {'dim': 16},
'filters': [[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 1024]],
'max_characters_per_token': 50,
'n_characters': 261,
'n_highway': 2},
'dropout': 0.1,
'lstm': {
'cell_clip': 3,
'dim': 4096,
'n_layers': 2,
'proj_clip': 3,
'projection_dim': 512,
'use_skip_connections': True},
'all_clip_norm_val': 10.0,
'n_epochs': 10,
'n_train_tokens': n_train_tokens,
'batch_size': batch_size,
'n_tokens_vocab': vocab.size,
'unroll_steps': 20,
'n_negative_samples_batch': 8192,
}
prefix = args.train_prefix
data = BidirectionalLMDataset(prefix, vocab, test=False,
shuffle_on_load=True)
tf_save_dir = args.save_dir
tf_log_dir = args.save_dir
train(options, data, n_gpus, tf_save_dir, tf_log_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', help='Location of checkpoint files')
parser.add_argument('--vocab_file', help='Vocabulary file')
parser.add_argument('--train_prefix', help='Prefix for train files')
args = parser.parse_args()
main(args)
| bilm-tf-master | bin/train_elmo.py |
import argparse
import numpy as np
from bilm.training import train, load_options_latest_checkpoint, load_vocab
from bilm.data import LMDataset, BidirectionalLMDataset
def main(args):
options, ckpt_file = load_options_latest_checkpoint(args.save_dir)
if 'char_cnn' in options:
max_word_length = options['char_cnn']['max_characters_per_token']
else:
max_word_length = None
vocab = load_vocab(args.vocab_file, max_word_length)
prefix = args.train_prefix
kwargs = {
'test': False,
'shuffle_on_load': True,
}
if options.get('bidirectional'):
data = BidirectionalLMDataset(prefix, vocab, **kwargs)
else:
data = LMDataset(prefix, vocab, **kwargs)
tf_save_dir = args.save_dir
tf_log_dir = args.save_dir
# set optional inputs
if args.n_train_tokens > 0:
options['n_train_tokens'] = args.n_train_tokens
if args.n_epochs > 0:
options['n_epochs'] = args.n_epochs
if args.batch_size > 0:
options['batch_size'] = args.batch_size
train(options, data, args.n_gpus, tf_save_dir, tf_log_dir,
restart_ckpt_file=ckpt_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', help='Location of checkpoint files')
parser.add_argument('--vocab_file', help='Vocabulary file')
parser.add_argument('--train_prefix', help='Prefix for train files')
parser.add_argument('--n_gpus', type=int, default=1,
help='Number of GPUs to use')
parser.add_argument('--batch_size', type=int, default=0)
parser.add_argument('--n_train_tokens', type=int, default=0)
parser.add_argument('--n_epochs', type=int, default=0)
args = parser.parse_args()
main(args)
| bilm-tf-master | bin/restart.py |
import argparse
from bilm.training import dump_weights as dw
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', help='Location of checkpoint files')
parser.add_argument('--outfile', help='Output hdf5 file with weights')
args = parser.parse_args()
dw(args.save_dir, args.outfile)
| bilm-tf-master | bin/dump_weights.py |
import argparse
from bilm.training import test, load_options_latest_checkpoint, load_vocab
from bilm.data import LMDataset, BidirectionalLMDataset
def main(args):
options, ckpt_file = load_options_latest_checkpoint(args.save_dir)
# load the vocab
if 'char_cnn' in options:
max_word_length = options['char_cnn']['max_characters_per_token']
else:
max_word_length = None
vocab = load_vocab(args.vocab_file, max_word_length)
test_prefix = args.test_prefix
kwargs = {
'test': True,
'shuffle_on_load': False,
}
if options.get('bidirectional'):
data = BidirectionalLMDataset(test_prefix, vocab, **kwargs)
else:
data = LMDataset(test_prefix, vocab, **kwargs)
test(options, ckpt_file, data, batch_size=args.batch_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute test perplexity')
parser.add_argument('--save_dir', help='Location of checkpoint files')
parser.add_argument('--vocab_file', help='Vocabulary file')
parser.add_argument('--test_prefix', help='Prefix for test files')
parser.add_argument('--batch_size',
type=int, default=256,
help='Batch size')
args = parser.parse_args()
main(args)
| bilm-tf-master | bin/run_test.py |
import tensorflow as tf
def weight_layers(name, bilm_ops, l2_coef=None,
use_top_only=False, do_layer_norm=False):
'''
Weight the layers of a biLM with trainable scalar weights to
compute ELMo representations.
For each output layer, this returns two ops. The first computes
a layer specific weighted average of the biLM layers, and
the second the l2 regularizer loss term.
The regularization terms are also add to tf.GraphKeys.REGULARIZATION_LOSSES
Input:
name = a string prefix used for the trainable variable names
bilm_ops = the tensorflow ops returned to compute internal
representations from a biLM. This is the return value
from BidirectionalLanguageModel(...)(ids_placeholder)
l2_coef: the l2 regularization coefficient $\lambda$.
Pass None or 0.0 for no regularization.
use_top_only: if True, then only use the top layer.
do_layer_norm: if True, then apply layer normalization to each biLM
layer before normalizing
Output:
{
'weighted_op': op to compute weighted average for output,
'regularization_op': op to compute regularization term
}
'''
def _l2_regularizer(weights):
if l2_coef is not None:
return l2_coef * tf.reduce_sum(tf.square(weights))
else:
return 0.0
# Get ops for computing LM embeddings and mask
lm_embeddings = bilm_ops['lm_embeddings']
mask = bilm_ops['mask']
n_lm_layers = int(lm_embeddings.get_shape()[1])
lm_dim = int(lm_embeddings.get_shape()[3])
with tf.control_dependencies([lm_embeddings, mask]):
# Cast the mask and broadcast for layer use.
mask_float = tf.cast(mask, 'float32')
broadcast_mask = tf.expand_dims(mask_float, axis=-1)
def _do_ln(x):
# do layer normalization excluding the mask
x_masked = x * broadcast_mask
N = tf.reduce_sum(mask_float) * lm_dim
mean = tf.reduce_sum(x_masked) / N
variance = tf.reduce_sum(((x_masked - mean) * broadcast_mask)**2
) / N
return tf.nn.batch_normalization(
x, mean, variance, None, None, 1E-12
)
if use_top_only:
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
# just the top layer
sum_pieces = tf.squeeze(layers[-1], squeeze_dims=1)
# no regularization
reg = 0.0
else:
W = tf.get_variable(
'{}_ELMo_W'.format(name),
shape=(n_lm_layers, ),
initializer=tf.zeros_initializer,
regularizer=_l2_regularizer,
trainable=True,
)
# normalize the weights
normed_weights = tf.split(
tf.nn.softmax(W + 1.0 / n_lm_layers), n_lm_layers
)
# split LM layers
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
# compute the weighted, normalized LM activations
pieces = []
for w, t in zip(normed_weights, layers):
if do_layer_norm:
pieces.append(w * _do_ln(tf.squeeze(t, squeeze_dims=1)))
else:
pieces.append(w * tf.squeeze(t, squeeze_dims=1))
sum_pieces = tf.add_n(pieces)
# get the regularizer
reg = [
r for r in tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
if r.name.find('{}_ELMo_W/'.format(name)) >= 0
]
if len(reg) != 1:
raise ValueError
# scale the weighted sum by gamma
gamma = tf.get_variable(
'{}_ELMo_gamma'.format(name),
shape=(1, ),
initializer=tf.ones_initializer,
regularizer=None,
trainable=True,
)
weighted_lm_layers = sum_pieces * gamma
ret = {'weighted_op': weighted_lm_layers, 'regularization_op': reg}
return ret
| bilm-tf-master | bilm/elmo.py |
from .data import Batcher, TokenBatcher
from .model import BidirectionalLanguageModel, dump_token_embeddings, \
dump_bilm_embeddings
from .elmo import weight_layers
| bilm-tf-master | bilm/__init__.py |
import numpy as np
import tensorflow as tf
import h5py
import json
import re
from .data import UnicodeCharsVocabulary, Batcher
DTYPE = 'float32'
DTYPE_INT = 'int64'
class BidirectionalLanguageModel(object):
def __init__(
self,
options_file: str,
weight_file: str,
use_character_inputs=True,
embedding_weight_file=None,
max_batch_size=128,
):
'''
Creates the language model computational graph and loads weights
Two options for input type:
(1) To use character inputs (paired with Batcher)
pass use_character_inputs=True, and ids_placeholder
of shape (None, None, max_characters_per_token)
to __call__
(2) To use token ids as input (paired with TokenBatcher),
pass use_character_inputs=False and ids_placeholder
of shape (None, None) to __call__.
In this case, embedding_weight_file is also required input
options_file: location of the json formatted file with
LM hyperparameters
weight_file: location of the hdf5 file with LM weights
use_character_inputs: if True, then use character ids as input,
otherwise use token ids
max_batch_size: the maximum allowable batch size
'''
with open(options_file, 'r') as fin:
options = json.load(fin)
if not use_character_inputs:
if embedding_weight_file is None:
raise ValueError(
"embedding_weight_file is required input with "
"not use_character_inputs"
)
self._options = options
self._weight_file = weight_file
self._embedding_weight_file = embedding_weight_file
self._use_character_inputs = use_character_inputs
self._max_batch_size = max_batch_size
self._ops = {}
self._graphs = {}
def __call__(self, ids_placeholder):
'''
Given the input character ids (or token ids), returns a dictionary
with tensorflow ops:
{'lm_embeddings': embedding_op,
'lengths': sequence_lengths_op,
'mask': op to compute mask}
embedding_op computes the LM embeddings and is shape
(None, 3, None, 1024)
lengths_op computes the sequence lengths and is shape (None, )
mask computes the sequence mask and is shape (None, None)
ids_placeholder: a tf.placeholder of type int32.
If use_character_inputs=True, it is shape
(None, None, max_characters_per_token) and holds the input
character ids for a batch
If use_character_input=False, it is shape (None, None) and
holds the input token ids for a batch
'''
if ids_placeholder in self._ops:
# have already created ops for this placeholder, just return them
ret = self._ops[ids_placeholder]
else:
# need to create the graph
if len(self._ops) == 0:
# first time creating the graph, don't reuse variables
lm_graph = BidirectionalLanguageModelGraph(
self._options,
self._weight_file,
ids_placeholder,
embedding_weight_file=self._embedding_weight_file,
use_character_inputs=self._use_character_inputs,
max_batch_size=self._max_batch_size)
else:
with tf.variable_scope('', reuse=True):
lm_graph = BidirectionalLanguageModelGraph(
self._options,
self._weight_file,
ids_placeholder,
embedding_weight_file=self._embedding_weight_file,
use_character_inputs=self._use_character_inputs,
max_batch_size=self._max_batch_size)
ops = self._build_ops(lm_graph)
self._ops[ids_placeholder] = ops
self._graphs[ids_placeholder] = lm_graph
ret = ops
return ret
def _build_ops(self, lm_graph):
with tf.control_dependencies([lm_graph.update_state_op]):
# get the LM embeddings
token_embeddings = lm_graph.embedding
layers = [
tf.concat([token_embeddings, token_embeddings], axis=2)
]
n_lm_layers = len(lm_graph.lstm_outputs['forward'])
for i in range(n_lm_layers):
layers.append(
tf.concat(
[lm_graph.lstm_outputs['forward'][i],
lm_graph.lstm_outputs['backward'][i]],
axis=-1
)
)
# The layers include the BOS/EOS tokens. Remove them
sequence_length_wo_bos_eos = lm_graph.sequence_lengths - 2
layers_without_bos_eos = []
for layer in layers:
layer_wo_bos_eos = layer[:, 1:, :]
layer_wo_bos_eos = tf.reverse_sequence(
layer_wo_bos_eos,
lm_graph.sequence_lengths - 1,
seq_axis=1,
batch_axis=0,
)
layer_wo_bos_eos = layer_wo_bos_eos[:, 1:, :]
layer_wo_bos_eos = tf.reverse_sequence(
layer_wo_bos_eos,
sequence_length_wo_bos_eos,
seq_axis=1,
batch_axis=0,
)
layers_without_bos_eos.append(layer_wo_bos_eos)
# concatenate the layers
lm_embeddings = tf.concat(
[tf.expand_dims(t, axis=1) for t in layers_without_bos_eos],
axis=1
)
# get the mask op without bos/eos.
# tf doesn't support reversing boolean tensors, so cast
# to int then back
mask_wo_bos_eos = tf.cast(lm_graph.mask[:, 1:], 'int32')
mask_wo_bos_eos = tf.reverse_sequence(
mask_wo_bos_eos,
lm_graph.sequence_lengths - 1,
seq_axis=1,
batch_axis=0,
)
mask_wo_bos_eos = mask_wo_bos_eos[:, 1:]
mask_wo_bos_eos = tf.reverse_sequence(
mask_wo_bos_eos,
sequence_length_wo_bos_eos,
seq_axis=1,
batch_axis=0,
)
mask_wo_bos_eos = tf.cast(mask_wo_bos_eos, 'bool')
return {
'lm_embeddings': lm_embeddings,
'lengths': sequence_length_wo_bos_eos,
'token_embeddings': lm_graph.embedding,
'mask': mask_wo_bos_eos,
}
def _pretrained_initializer(varname, weight_file, embedding_weight_file=None):
'''
We'll stub out all the initializers in the pretrained LM with
a function that loads the weights from the file
'''
weight_name_map = {}
for i in range(2):
for j in range(8): # if we decide to add more layers
root = 'RNN_{}/RNN/MultiRNNCell/Cell{}'.format(i, j)
weight_name_map[root + '/rnn/lstm_cell/kernel'] = \
root + '/LSTMCell/W_0'
weight_name_map[root + '/rnn/lstm_cell/bias'] = \
root + '/LSTMCell/B'
weight_name_map[root + '/rnn/lstm_cell/projection/kernel'] = \
root + '/LSTMCell/W_P_0'
# convert the graph name to that in the checkpoint
varname_in_file = varname[5:]
if varname_in_file.startswith('RNN'):
varname_in_file = weight_name_map[varname_in_file]
if varname_in_file == 'embedding':
with h5py.File(embedding_weight_file, 'r') as fin:
# Have added a special 0 index for padding not present
# in the original model.
embed_weights = fin[varname_in_file][...]
weights = np.zeros(
(embed_weights.shape[0] + 1, embed_weights.shape[1]),
dtype=DTYPE
)
weights[1:, :] = embed_weights
else:
with h5py.File(weight_file, 'r') as fin:
if varname_in_file == 'char_embed':
# Have added a special 0 index for padding not present
# in the original model.
char_embed_weights = fin[varname_in_file][...]
weights = np.zeros(
(char_embed_weights.shape[0] + 1,
char_embed_weights.shape[1]),
dtype=DTYPE
)
weights[1:, :] = char_embed_weights
else:
weights = fin[varname_in_file][...]
# Tensorflow initializers are callables that accept a shape parameter
# and some optional kwargs
def ret(shape, **kwargs):
if list(shape) != list(weights.shape):
raise ValueError(
"Invalid shape initializing {0}, got {1}, expected {2}".format(
varname_in_file, shape, weights.shape)
)
return weights
return ret
class BidirectionalLanguageModelGraph(object):
'''
Creates the computational graph and holds the ops necessary for runnint
a bidirectional language model
'''
def __init__(self, options, weight_file, ids_placeholder,
use_character_inputs=True, embedding_weight_file=None,
max_batch_size=128):
self.options = options
self._max_batch_size = max_batch_size
self.ids_placeholder = ids_placeholder
self.use_character_inputs = use_character_inputs
# this custom_getter will make all variables not trainable and
# override the default initializer
def custom_getter(getter, name, *args, **kwargs):
kwargs['trainable'] = False
kwargs['initializer'] = _pretrained_initializer(
name, weight_file, embedding_weight_file
)
return getter(name, *args, **kwargs)
if embedding_weight_file is not None:
# get the vocab size
with h5py.File(embedding_weight_file, 'r') as fin:
# +1 for padding
self._n_tokens_vocab = fin['embedding'].shape[0] + 1
else:
self._n_tokens_vocab = None
with tf.variable_scope('bilm', custom_getter=custom_getter):
self._build()
def _build(self):
if self.use_character_inputs:
self._build_word_char_embeddings()
else:
self._build_word_embeddings()
self._build_lstms()
def _build_word_char_embeddings(self):
'''
options contains key 'char_cnn': {
'n_characters': 262,
# includes the start / end characters
'max_characters_per_token': 50,
'filters': [
[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'activation': 'tanh',
# for the character embedding
'embedding': {'dim': 16}
# for highway layers
# if omitted, then no highway layers
'n_highway': 2,
}
'''
projection_dim = self.options['lstm']['projection_dim']
cnn_options = self.options['char_cnn']
filters = cnn_options['filters']
n_filters = sum(f[1] for f in filters)
max_chars = cnn_options['max_characters_per_token']
char_embed_dim = cnn_options['embedding']['dim']
n_chars = cnn_options['n_characters']
if n_chars != 262:
raise InvalidNumberOfCharacters(
"Set n_characters=262 after training see the README.md"
)
if cnn_options['activation'] == 'tanh':
activation = tf.nn.tanh
elif cnn_options['activation'] == 'relu':
activation = tf.nn.relu
# the character embeddings
with tf.device("/cpu:0"):
self.embedding_weights = tf.get_variable(
"char_embed", [n_chars, char_embed_dim],
dtype=DTYPE,
initializer=tf.random_uniform_initializer(-1.0, 1.0)
)
# shape (batch_size, unroll_steps, max_chars, embed_dim)
self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights,
self.ids_placeholder)
# the convolutions
def make_convolutions(inp):
with tf.variable_scope('CNN') as scope:
convolutions = []
for i, (width, num) in enumerate(filters):
if cnn_options['activation'] == 'relu':
# He initialization for ReLU activation
# with char embeddings init between -1 and 1
#w_init = tf.random_normal_initializer(
# mean=0.0,
# stddev=np.sqrt(2.0 / (width * char_embed_dim))
#)
# Kim et al 2015, +/- 0.05
w_init = tf.random_uniform_initializer(
minval=-0.05, maxval=0.05)
elif cnn_options['activation'] == 'tanh':
# glorot init
w_init = tf.random_normal_initializer(
mean=0.0,
stddev=np.sqrt(1.0 / (width * char_embed_dim))
)
w = tf.get_variable(
"W_cnn_%s" % i,
[1, width, char_embed_dim, num],
initializer=w_init,
dtype=DTYPE)
b = tf.get_variable(
"b_cnn_%s" % i, [num], dtype=DTYPE,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(
inp, w,
strides=[1, 1, 1, 1],
padding="VALID") + b
# now max pool
conv = tf.nn.max_pool(
conv, [1, 1, max_chars-width+1, 1],
[1, 1, 1, 1], 'VALID')
# activation
conv = activation(conv)
conv = tf.squeeze(conv, squeeze_dims=[2])
convolutions.append(conv)
return tf.concat(convolutions, 2)
embedding = make_convolutions(self.char_embedding)
# for highway and projection layers
n_highway = cnn_options.get('n_highway')
use_highway = n_highway is not None and n_highway > 0
use_proj = n_filters != projection_dim
if use_highway or use_proj:
# reshape from (batch_size, n_tokens, dim) to (-1, dim)
batch_size_n_tokens = tf.shape(embedding)[0:2]
embedding = tf.reshape(embedding, [-1, n_filters])
# set up weights for projection
if use_proj:
assert n_filters > projection_dim
with tf.variable_scope('CNN_proj') as scope:
W_proj_cnn = tf.get_variable(
"W_proj", [n_filters, projection_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / n_filters)),
dtype=DTYPE)
b_proj_cnn = tf.get_variable(
"b_proj", [projection_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
# apply highways layers
def high(x, ww_carry, bb_carry, ww_tr, bb_tr):
carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry)
transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr)
return carry_gate * transform_gate + (1.0 - carry_gate) * x
if use_highway:
highway_dim = n_filters
for i in range(n_highway):
with tf.variable_scope('CNN_high_%s' % i) as scope:
W_carry = tf.get_variable(
'W_carry', [highway_dim, highway_dim],
# glorit init
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_carry = tf.get_variable(
'b_carry', [highway_dim],
initializer=tf.constant_initializer(-2.0),
dtype=DTYPE)
W_transform = tf.get_variable(
'W_transform', [highway_dim, highway_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_transform = tf.get_variable(
'b_transform', [highway_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
embedding = high(embedding, W_carry, b_carry,
W_transform, b_transform)
# finally project down if needed
if use_proj:
embedding = tf.matmul(embedding, W_proj_cnn) + b_proj_cnn
# reshape back to (batch_size, tokens, dim)
if use_highway or use_proj:
shp = tf.concat([batch_size_n_tokens, [projection_dim]], axis=0)
embedding = tf.reshape(embedding, shp)
# at last assign attributes for remainder of the model
self.embedding = embedding
def _build_word_embeddings(self):
projection_dim = self.options['lstm']['projection_dim']
# the word embeddings
with tf.device("/cpu:0"):
self.embedding_weights = tf.get_variable(
"embedding", [self._n_tokens_vocab, projection_dim],
dtype=DTYPE,
)
self.embedding = tf.nn.embedding_lookup(self.embedding_weights,
self.ids_placeholder)
def _build_lstms(self):
# now the LSTMs
# these will collect the initial states for the forward
# (and reverse LSTMs if we are doing bidirectional)
# parse the options
lstm_dim = self.options['lstm']['dim']
projection_dim = self.options['lstm']['projection_dim']
n_lstm_layers = self.options['lstm'].get('n_layers', 1)
cell_clip = self.options['lstm'].get('cell_clip')
proj_clip = self.options['lstm'].get('proj_clip')
use_skip_connections = self.options['lstm']['use_skip_connections']
if use_skip_connections:
print("USING SKIP CONNECTIONS")
else:
print("NOT USING SKIP CONNECTIONS")
# the sequence lengths from input mask
if self.use_character_inputs:
mask = tf.reduce_any(self.ids_placeholder > 0, axis=2)
else:
mask = self.ids_placeholder > 0
sequence_lengths = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1)
batch_size = tf.shape(sequence_lengths)[0]
# for each direction, we'll store tensors for each layer
self.lstm_outputs = {'forward': [], 'backward': []}
self.lstm_state_sizes = {'forward': [], 'backward': []}
self.lstm_init_states = {'forward': [], 'backward': []}
self.lstm_final_states = {'forward': [], 'backward': []}
update_ops = []
for direction in ['forward', 'backward']:
if direction == 'forward':
layer_input = self.embedding
else:
layer_input = tf.reverse_sequence(
self.embedding,
sequence_lengths,
seq_axis=1,
batch_axis=0
)
for i in range(n_lstm_layers):
if projection_dim < lstm_dim:
# are projecting down output
lstm_cell = tf.nn.rnn_cell.LSTMCell(
lstm_dim, num_proj=projection_dim,
cell_clip=cell_clip, proj_clip=proj_clip)
else:
lstm_cell = tf.nn.rnn_cell.LSTMCell(
lstm_dim,
cell_clip=cell_clip, proj_clip=proj_clip)
if use_skip_connections:
# ResidualWrapper adds inputs to outputs
if i == 0:
# don't add skip connection from token embedding to
# 1st layer output
pass
else:
# add a skip connection
lstm_cell = tf.nn.rnn_cell.ResidualWrapper(lstm_cell)
# collect the input state, run the dynamic rnn, collect
# the output
state_size = lstm_cell.state_size
# the LSTMs are stateful. To support multiple batch sizes,
# we'll allocate size for states up to max_batch_size,
# then use the first batch_size entries for each batch
init_states = [
tf.Variable(
tf.zeros([self._max_batch_size, dim]),
trainable=False
)
for dim in lstm_cell.state_size
]
batch_init_states = [
state[:batch_size, :] for state in init_states
]
if direction == 'forward':
i_direction = 0
else:
i_direction = 1
variable_scope_name = 'RNN_{0}/RNN/MultiRNNCell/Cell{1}'.format(
i_direction, i)
with tf.variable_scope(variable_scope_name):
layer_output, final_state = tf.nn.dynamic_rnn(
lstm_cell,
layer_input,
sequence_length=sequence_lengths,
initial_state=tf.nn.rnn_cell.LSTMStateTuple(
*batch_init_states),
)
self.lstm_state_sizes[direction].append(lstm_cell.state_size)
self.lstm_init_states[direction].append(init_states)
self.lstm_final_states[direction].append(final_state)
if direction == 'forward':
self.lstm_outputs[direction].append(layer_output)
else:
self.lstm_outputs[direction].append(
tf.reverse_sequence(
layer_output,
sequence_lengths,
seq_axis=1,
batch_axis=0
)
)
with tf.control_dependencies([layer_output]):
# update the initial states
for i in range(2):
new_state = tf.concat(
[final_state[i][:batch_size, :],
init_states[i][batch_size:, :]], axis=0)
state_update_op = tf.assign(init_states[i], new_state)
update_ops.append(state_update_op)
layer_input = layer_output
self.mask = mask
self.sequence_lengths = sequence_lengths
self.update_state_op = tf.group(*update_ops)
def dump_token_embeddings(vocab_file, options_file, weight_file, outfile):
'''
Given an input vocabulary file, dump all the token embeddings to the
outfile. The result can be used as the embedding_weight_file when
constructing a BidirectionalLanguageModel.
'''
with open(options_file, 'r') as fin:
options = json.load(fin)
max_word_length = options['char_cnn']['max_characters_per_token']
vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)
batcher = Batcher(vocab_file, max_word_length)
ids_placeholder = tf.placeholder('int32',
shape=(None, None, max_word_length)
)
model = BidirectionalLanguageModel(options_file, weight_file)
embedding_op = model(ids_placeholder)['token_embeddings']
n_tokens = vocab.size
embed_dim = int(embedding_op.shape[2])
embeddings = np.zeros((n_tokens, embed_dim), dtype=DTYPE)
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for k in range(n_tokens):
token = vocab.id_to_word(k)
char_ids = batcher.batch_sentences([[token]])[0, 1, :].reshape(
1, 1, -1)
embeddings[k, :] = sess.run(
embedding_op, feed_dict={ids_placeholder: char_ids}
)
with h5py.File(outfile, 'w') as fout:
ds = fout.create_dataset(
'embedding', embeddings.shape, dtype='float32', data=embeddings
)
def dump_bilm_embeddings(vocab_file, dataset_file, options_file,
weight_file, outfile):
with open(options_file, 'r') as fin:
options = json.load(fin)
max_word_length = options['char_cnn']['max_characters_per_token']
vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)
batcher = Batcher(vocab_file, max_word_length)
ids_placeholder = tf.placeholder('int32',
shape=(None, None, max_word_length)
)
model = BidirectionalLanguageModel(options_file, weight_file)
ops = model(ids_placeholder)
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sentence_id = 0
with open(dataset_file, 'r') as fin, h5py.File(outfile, 'w') as fout:
for line in fin:
sentence = line.strip().split()
char_ids = batcher.batch_sentences([sentence])
embeddings = sess.run(
ops['lm_embeddings'], feed_dict={ids_placeholder: char_ids}
)
ds = fout.create_dataset(
'{}'.format(sentence_id),
embeddings.shape[1:], dtype='float32',
data=embeddings[0, :, :, :]
)
sentence_id += 1
| bilm-tf-master | bilm/model.py |
'''
Train and test bidirectional language models.
'''
import os
import time
import json
import re
import tensorflow as tf
import numpy as np
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
from .data import Vocabulary, UnicodeCharsVocabulary, InvalidNumberOfCharacters
DTYPE = 'float32'
DTYPE_INT = 'int64'
tf.logging.set_verbosity(tf.logging.INFO)
def print_variable_summary():
import pprint
variables = sorted([[v.name, v.get_shape()] for v in tf.global_variables()])
pprint.pprint(variables)
class LanguageModel(object):
'''
A class to build the tensorflow computational graph for NLMs
All hyperparameters and model configuration is specified in a dictionary
of 'options'.
is_training is a boolean used to control behavior of dropout layers
and softmax. Set to False for testing.
The LSTM cell is controlled by the 'lstm' key in options
Here is an example:
'lstm': {
'cell_clip': 5,
'dim': 4096,
'n_layers': 2,
'proj_clip': 5,
'projection_dim': 512,
'use_skip_connections': True},
'projection_dim' is assumed token embedding size and LSTM output size.
'dim' is the hidden state size.
Set 'dim' == 'projection_dim' to skip a projection layer.
'''
def __init__(self, options, is_training):
self.options = options
self.is_training = is_training
self.bidirectional = options.get('bidirectional', False)
# use word or char inputs?
self.char_inputs = 'char_cnn' in self.options
# for the loss function
self.share_embedding_softmax = options.get(
'share_embedding_softmax', False)
if self.char_inputs and self.share_embedding_softmax:
raise ValueError("Sharing softmax and embedding weights requires "
"word input")
self.sample_softmax = options.get('sample_softmax', True)
self._build()
def _build_word_embeddings(self):
n_tokens_vocab = self.options['n_tokens_vocab']
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
# LSTM options
projection_dim = self.options['lstm']['projection_dim']
# the input token_ids and word embeddings
self.token_ids = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps),
name='token_ids')
# the word embeddings
with tf.device("/cpu:0"):
self.embedding_weights = tf.get_variable(
"embedding", [n_tokens_vocab, projection_dim],
dtype=DTYPE,
)
self.embedding = tf.nn.embedding_lookup(self.embedding_weights,
self.token_ids)
# if a bidirectional LM then make placeholders for reverse
# model and embeddings
if self.bidirectional:
self.token_ids_reverse = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps),
name='token_ids_reverse')
with tf.device("/cpu:0"):
self.embedding_reverse = tf.nn.embedding_lookup(
self.embedding_weights, self.token_ids_reverse)
def _build_word_char_embeddings(self):
'''
options contains key 'char_cnn': {
'n_characters': 262,
# includes the start / end characters
'max_characters_per_token': 50,
'filters': [
[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'activation': 'tanh',
# for the character embedding
'embedding': {'dim': 16}
# for highway layers
# if omitted, then no highway layers
'n_highway': 2,
}
'''
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
projection_dim = self.options['lstm']['projection_dim']
cnn_options = self.options['char_cnn']
filters = cnn_options['filters']
n_filters = sum(f[1] for f in filters)
max_chars = cnn_options['max_characters_per_token']
char_embed_dim = cnn_options['embedding']['dim']
n_chars = cnn_options['n_characters']
if n_chars != 261:
raise InvalidNumberOfCharacters(
"Set n_characters=261 for training see the README.md"
)
if cnn_options['activation'] == 'tanh':
activation = tf.nn.tanh
elif cnn_options['activation'] == 'relu':
activation = tf.nn.relu
# the input character ids
self.tokens_characters = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps, max_chars),
name='tokens_characters')
# the character embeddings
with tf.device("/cpu:0"):
self.embedding_weights = tf.get_variable(
"char_embed", [n_chars, char_embed_dim],
dtype=DTYPE,
initializer=tf.random_uniform_initializer(-1.0, 1.0)
)
# shape (batch_size, unroll_steps, max_chars, embed_dim)
self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights,
self.tokens_characters)
if self.bidirectional:
self.tokens_characters_reverse = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps, max_chars),
name='tokens_characters_reverse')
self.char_embedding_reverse = tf.nn.embedding_lookup(
self.embedding_weights, self.tokens_characters_reverse)
# the convolutions
def make_convolutions(inp, reuse):
with tf.variable_scope('CNN', reuse=reuse) as scope:
convolutions = []
for i, (width, num) in enumerate(filters):
if cnn_options['activation'] == 'relu':
# He initialization for ReLU activation
# with char embeddings init between -1 and 1
#w_init = tf.random_normal_initializer(
# mean=0.0,
# stddev=np.sqrt(2.0 / (width * char_embed_dim))
#)
# Kim et al 2015, +/- 0.05
w_init = tf.random_uniform_initializer(
minval=-0.05, maxval=0.05)
elif cnn_options['activation'] == 'tanh':
# glorot init
w_init = tf.random_normal_initializer(
mean=0.0,
stddev=np.sqrt(1.0 / (width * char_embed_dim))
)
w = tf.get_variable(
"W_cnn_%s" % i,
[1, width, char_embed_dim, num],
initializer=w_init,
dtype=DTYPE)
b = tf.get_variable(
"b_cnn_%s" % i, [num], dtype=DTYPE,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(
inp, w,
strides=[1, 1, 1, 1],
padding="VALID") + b
# now max pool
conv = tf.nn.max_pool(
conv, [1, 1, max_chars-width+1, 1],
[1, 1, 1, 1], 'VALID')
# activation
conv = activation(conv)
conv = tf.squeeze(conv, squeeze_dims=[2])
convolutions.append(conv)
return tf.concat(convolutions, 2)
# for first model, this is False, for others it's True
reuse = tf.get_variable_scope().reuse
embedding = make_convolutions(self.char_embedding, reuse)
self.token_embedding_layers = [embedding]
if self.bidirectional:
# re-use the CNN weights from forward pass
embedding_reverse = make_convolutions(
self.char_embedding_reverse, True)
# for highway and projection layers:
# reshape from (batch_size, n_tokens, dim) to
n_highway = cnn_options.get('n_highway')
use_highway = n_highway is not None and n_highway > 0
use_proj = n_filters != projection_dim
if use_highway or use_proj:
embedding = tf.reshape(embedding, [-1, n_filters])
if self.bidirectional:
embedding_reverse = tf.reshape(embedding_reverse,
[-1, n_filters])
# set up weights for projection
if use_proj:
assert n_filters > projection_dim
with tf.variable_scope('CNN_proj') as scope:
W_proj_cnn = tf.get_variable(
"W_proj", [n_filters, projection_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / n_filters)),
dtype=DTYPE)
b_proj_cnn = tf.get_variable(
"b_proj", [projection_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
# apply highways layers
def high(x, ww_carry, bb_carry, ww_tr, bb_tr):
carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry)
transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr)
return carry_gate * transform_gate + (1.0 - carry_gate) * x
if use_highway:
highway_dim = n_filters
for i in range(n_highway):
with tf.variable_scope('CNN_high_%s' % i) as scope:
W_carry = tf.get_variable(
'W_carry', [highway_dim, highway_dim],
# glorit init
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_carry = tf.get_variable(
'b_carry', [highway_dim],
initializer=tf.constant_initializer(-2.0),
dtype=DTYPE)
W_transform = tf.get_variable(
'W_transform', [highway_dim, highway_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_transform = tf.get_variable(
'b_transform', [highway_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
embedding = high(embedding, W_carry, b_carry,
W_transform, b_transform)
if self.bidirectional:
embedding_reverse = high(embedding_reverse,
W_carry, b_carry,
W_transform, b_transform)
self.token_embedding_layers.append(
tf.reshape(embedding,
[batch_size, unroll_steps, highway_dim])
)
# finally project down to projection dim if needed
if use_proj:
embedding = tf.matmul(embedding, W_proj_cnn) + b_proj_cnn
if self.bidirectional:
embedding_reverse = tf.matmul(embedding_reverse, W_proj_cnn) \
+ b_proj_cnn
self.token_embedding_layers.append(
tf.reshape(embedding,
[batch_size, unroll_steps, projection_dim])
)
# reshape back to (batch_size, tokens, dim)
if use_highway or use_proj:
shp = [batch_size, unroll_steps, projection_dim]
embedding = tf.reshape(embedding, shp)
if self.bidirectional:
embedding_reverse = tf.reshape(embedding_reverse, shp)
# at last assign attributes for remainder of the model
self.embedding = embedding
if self.bidirectional:
self.embedding_reverse = embedding_reverse
def _build(self):
# size of input options
n_tokens_vocab = self.options['n_tokens_vocab']
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
# LSTM options
lstm_dim = self.options['lstm']['dim']
projection_dim = self.options['lstm']['projection_dim']
n_lstm_layers = self.options['lstm'].get('n_layers', 1)
dropout = self.options['dropout']
keep_prob = 1.0 - dropout
if self.char_inputs:
self._build_word_char_embeddings()
else:
self._build_word_embeddings()
# now the LSTMs
# these will collect the initial states for the forward
# (and reverse LSTMs if we are doing bidirectional)
self.init_lstm_state = []
self.final_lstm_state = []
# get the LSTM inputs
if self.bidirectional:
lstm_inputs = [self.embedding, self.embedding_reverse]
else:
lstm_inputs = [self.embedding]
# now compute the LSTM outputs
cell_clip = self.options['lstm'].get('cell_clip')
proj_clip = self.options['lstm'].get('proj_clip')
use_skip_connections = self.options['lstm'].get(
'use_skip_connections')
if use_skip_connections:
print("USING SKIP CONNECTIONS")
lstm_outputs = []
for lstm_num, lstm_input in enumerate(lstm_inputs):
lstm_cells = []
for i in range(n_lstm_layers):
if projection_dim < lstm_dim:
# are projecting down output
lstm_cell = tf.nn.rnn_cell.LSTMCell(
lstm_dim, num_proj=projection_dim,
cell_clip=cell_clip, proj_clip=proj_clip)
else:
lstm_cell = tf.nn.rnn_cell.LSTMCell(
lstm_dim,
cell_clip=cell_clip, proj_clip=proj_clip)
if use_skip_connections:
# ResidualWrapper adds inputs to outputs
if i == 0:
# don't add skip connection from token embedding to
# 1st layer output
pass
else:
# add a skip connection
lstm_cell = tf.nn.rnn_cell.ResidualWrapper(lstm_cell)
# add dropout
if self.is_training:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell,
input_keep_prob=keep_prob)
lstm_cells.append(lstm_cell)
if n_lstm_layers > 1:
lstm_cell = tf.nn.rnn_cell.MultiRNNCell(lstm_cells)
else:
lstm_cell = lstm_cells[0]
with tf.control_dependencies([lstm_input]):
self.init_lstm_state.append(
lstm_cell.zero_state(batch_size, DTYPE))
# NOTE: this variable scope is for backward compatibility
# with existing models...
if self.bidirectional:
with tf.variable_scope('RNN_%s' % lstm_num):
_lstm_output_unpacked, final_state = tf.nn.static_rnn(
lstm_cell,
tf.unstack(lstm_input, axis=1),
initial_state=self.init_lstm_state[-1])
else:
_lstm_output_unpacked, final_state = tf.nn.static_rnn(
lstm_cell,
tf.unstack(lstm_input, axis=1),
initial_state=self.init_lstm_state[-1])
self.final_lstm_state.append(final_state)
# (batch_size * unroll_steps, 512)
lstm_output_flat = tf.reshape(
tf.stack(_lstm_output_unpacked, axis=1), [-1, projection_dim])
if self.is_training:
# add dropout to output
lstm_output_flat = tf.nn.dropout(lstm_output_flat,
keep_prob)
tf.add_to_collection('lstm_output_embeddings',
_lstm_output_unpacked)
lstm_outputs.append(lstm_output_flat)
self._build_loss(lstm_outputs)
def _build_loss(self, lstm_outputs):
'''
Create:
self.total_loss: total loss op for training
self.softmax_W, softmax_b: the softmax variables
self.next_token_id / _reverse: placeholders for gold input
'''
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
n_tokens_vocab = self.options['n_tokens_vocab']
# DEFINE next_token_id and *_reverse placeholders for the gold input
def _get_next_token_placeholders(suffix):
name = 'next_token_id' + suffix
id_placeholder = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps),
name=name)
return id_placeholder
# get the window and weight placeholders
self.next_token_id = _get_next_token_placeholders('')
if self.bidirectional:
self.next_token_id_reverse = _get_next_token_placeholders(
'_reverse')
# DEFINE THE SOFTMAX VARIABLES
# get the dimension of the softmax weights
# softmax dimension is the size of the output projection_dim
softmax_dim = self.options['lstm']['projection_dim']
# the output softmax variables -- they are shared if bidirectional
if self.share_embedding_softmax:
# softmax_W is just the embedding layer
self.softmax_W = self.embedding_weights
with tf.variable_scope('softmax'), tf.device('/cpu:0'):
# Glorit init (std=(1.0 / sqrt(fan_in))
softmax_init = tf.random_normal_initializer(0.0,
1.0 / np.sqrt(softmax_dim))
if not self.share_embedding_softmax:
self.softmax_W = tf.get_variable(
'W', [n_tokens_vocab, softmax_dim],
dtype=DTYPE,
initializer=softmax_init
)
self.softmax_b = tf.get_variable(
'b', [n_tokens_vocab],
dtype=DTYPE,
initializer=tf.constant_initializer(0.0))
# now calculate losses
# loss for each direction of the LSTM
self.individual_losses = []
if self.bidirectional:
next_ids = [self.next_token_id, self.next_token_id_reverse]
else:
next_ids = [self.next_token_id]
for id_placeholder, lstm_output_flat in zip(next_ids, lstm_outputs):
# flatten the LSTM output and next token id gold to shape:
# (batch_size * unroll_steps, softmax_dim)
# Flatten and reshape the token_id placeholders
next_token_id_flat = tf.reshape(id_placeholder, [-1, 1])
with tf.control_dependencies([lstm_output_flat]):
if self.is_training and self.sample_softmax:
losses = tf.nn.sampled_softmax_loss(
self.softmax_W, self.softmax_b,
next_token_id_flat, lstm_output_flat,
self.options['n_negative_samples_batch'],
self.options['n_tokens_vocab'],
num_true=1)
else:
# get the full softmax loss
output_scores = tf.matmul(
lstm_output_flat,
tf.transpose(self.softmax_W)
) + self.softmax_b
# NOTE: tf.nn.sparse_softmax_cross_entropy_with_logits
# expects unnormalized output since it performs the
# softmax internally
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=output_scores,
labels=tf.squeeze(next_token_id_flat, squeeze_dims=[1])
)
self.individual_losses.append(tf.reduce_mean(losses))
# now make the total loss -- it's the mean of the individual losses
if self.bidirectional:
self.total_loss = 0.5 * (self.individual_losses[0]
+ self.individual_losses[1])
else:
self.total_loss = self.individual_losses[0]
def average_gradients(tower_grads, batch_size, options):
# calculate average gradient for each shared variable across all GPUs
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
# We need to average the gradients across each GPU.
g0, v0 = grad_and_vars[0]
if g0 is None:
# no gradient for this variable, skip it
average_grads.append((g0, v0))
continue
if isinstance(g0, tf.IndexedSlices):
# If the gradient is type IndexedSlices then this is a sparse
# gradient with attributes indices and values.
# To average, need to concat them individually then create
# a new IndexedSlices object.
indices = []
values = []
for g, v in grad_and_vars:
indices.append(g.indices)
values.append(g.values)
all_indices = tf.concat(indices, 0)
avg_values = tf.concat(values, 0) / len(grad_and_vars)
# deduplicate across indices
av, ai = _deduplicate_indexed_slices(avg_values, all_indices)
grad = tf.IndexedSlices(av, ai, dense_shape=g0.dense_shape)
else:
# a normal tensor can just do a simple average
grads = []
for g, v in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# the Variables are redundant because they are shared
# across towers. So.. just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
assert len(average_grads) == len(list(zip(*tower_grads)))
return average_grads
def summary_gradient_updates(grads, opt, lr):
'''get summary ops for the magnitude of gradient updates'''
# strategy:
# make a dict of variable name -> [variable, grad, adagrad slot]
vars_grads = {}
for v in tf.trainable_variables():
vars_grads[v.name] = [v, None, None]
for g, v in grads:
vars_grads[v.name][1] = g
vars_grads[v.name][2] = opt.get_slot(v, 'accumulator')
# now make summaries
ret = []
for vname, (v, g, a) in vars_grads.items():
if g is None:
continue
if isinstance(g, tf.IndexedSlices):
# a sparse gradient - only take norm of params that are updated
values = tf.gather(v, g.indices)
updates = lr * g.values
if a is not None:
updates /= tf.sqrt(tf.gather(a, g.indices))
else:
values = v
updates = lr * g
if a is not None:
updates /= tf.sqrt(a)
values_norm = tf.sqrt(tf.reduce_sum(v * v)) + 1.0e-7
updates_norm = tf.sqrt(tf.reduce_sum(updates * updates))
ret.append(
tf.summary.scalar('UPDATE/' + vname.replace(":", "_"), updates_norm / values_norm))
return ret
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = tf.unique(indices)
summed_values = tf.unsorted_segment_sum(
values, new_index_positions,
tf.shape(unique_indices)[0])
return (summed_values, unique_indices)
def _get_feed_dict_from_X(X, start, end, model, char_inputs, bidirectional):
feed_dict = {}
if not char_inputs:
token_ids = X['token_ids'][start:end]
feed_dict[model.token_ids] = token_ids
else:
# character inputs
char_ids = X['tokens_characters'][start:end]
feed_dict[model.tokens_characters] = char_ids
if bidirectional:
if not char_inputs:
feed_dict[model.token_ids_reverse] = \
X['token_ids_reverse'][start:end]
else:
feed_dict[model.tokens_characters_reverse] = \
X['tokens_characters_reverse'][start:end]
# now the targets with weights
next_id_placeholders = [[model.next_token_id, '']]
if bidirectional:
next_id_placeholders.append([model.next_token_id_reverse, '_reverse'])
for id_placeholder, suffix in next_id_placeholders:
name = 'next_token_id' + suffix
feed_dict[id_placeholder] = X[name][start:end]
return feed_dict
def train(options, data, n_gpus, tf_save_dir, tf_log_dir,
restart_ckpt_file=None):
# not restarting so save the options
if restart_ckpt_file is None:
with open(os.path.join(tf_save_dir, 'options.json'), 'w') as fout:
fout.write(json.dumps(options))
with tf.device('/cpu:0'):
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# set up the optimizer
lr = options.get('learning_rate', 0.2)
opt = tf.train.AdagradOptimizer(learning_rate=lr,
initial_accumulator_value=1.0)
# calculate the gradients on each GPU
tower_grads = []
models = []
train_perplexity = tf.get_variable(
'train_perplexity', [],
initializer=tf.constant_initializer(0.0), trainable=False)
norm_summaries = []
for k in range(n_gpus):
with tf.device('/gpu:%d' % k):
with tf.variable_scope('lm', reuse=k > 0):
# calculate the loss for one model replica and get
# lstm states
model = LanguageModel(options, True)
loss = model.total_loss
models.append(model)
# get gradients
grads = opt.compute_gradients(
loss * options['unroll_steps'],
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,
)
tower_grads.append(grads)
# keep track of loss across all GPUs
train_perplexity += loss
print_variable_summary()
# calculate the mean of each gradient across all GPUs
grads = average_gradients(tower_grads, options['batch_size'], options)
grads, norm_summary_ops = clip_grads(grads, options, True, global_step)
norm_summaries.extend(norm_summary_ops)
# log the training perplexity
train_perplexity = tf.exp(train_perplexity / n_gpus)
perplexity_summmary = tf.summary.scalar(
'train_perplexity', train_perplexity)
# some histogram summaries. all models use the same parameters
# so only need to summarize one
histogram_summaries = [
tf.summary.histogram('token_embedding', models[0].embedding)
]
# tensors of the output from the LSTM layer
lstm_out = tf.get_collection('lstm_output_embeddings')
histogram_summaries.append(
tf.summary.histogram('lstm_embedding_0', lstm_out[0]))
if options.get('bidirectional', False):
# also have the backward embedding
histogram_summaries.append(
tf.summary.histogram('lstm_embedding_1', lstm_out[1]))
# apply the gradients to create the training operation
train_op = opt.apply_gradients(grads, global_step=global_step)
# histograms of variables
for v in tf.global_variables():
histogram_summaries.append(tf.summary.histogram(v.name.replace(":", "_"), v))
# get the gradient updates -- these aren't histograms, but we'll
# only update them when histograms are computed
histogram_summaries.extend(
summary_gradient_updates(grads, opt, lr))
saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)
summary_op = tf.summary.merge(
[perplexity_summmary] + norm_summaries
)
hist_summary_op = tf.summary.merge(histogram_summaries)
init = tf.initialize_all_variables()
# do the training loop
bidirectional = options.get('bidirectional', False)
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=True)) as sess:
sess.run(init)
# load the checkpoint data if needed
if restart_ckpt_file is not None:
loader = tf.train.Saver()
loader.restore(sess, restart_ckpt_file)
summary_writer = tf.summary.FileWriter(tf_log_dir, sess.graph)
# For each batch:
# Get a batch of data from the generator. The generator will
# yield batches of size batch_size * n_gpus that are sliced
# and fed for each required placeholer.
#
# We also need to be careful with the LSTM states. We will
# collect the final LSTM states after each batch, then feed
# them back in as the initial state for the next batch
batch_size = options['batch_size']
unroll_steps = options['unroll_steps']
n_train_tokens = options.get('n_train_tokens', 768648884)
n_tokens_per_batch = batch_size * unroll_steps * n_gpus
n_batches_per_epoch = int(n_train_tokens / n_tokens_per_batch)
n_batches_total = options['n_epochs'] * n_batches_per_epoch
print("Training for %s epochs and %s batches" % (
options['n_epochs'], n_batches_total))
# get the initial lstm states
init_state_tensors = []
final_state_tensors = []
for model in models:
init_state_tensors.extend(model.init_lstm_state)
final_state_tensors.extend(model.final_lstm_state)
char_inputs = 'char_cnn' in options
if char_inputs:
max_chars = options['char_cnn']['max_characters_per_token']
if not char_inputs:
feed_dict = {
model.token_ids:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
for model in models
}
else:
feed_dict = {
model.tokens_characters:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
for model in models
}
if bidirectional:
if not char_inputs:
feed_dict.update({
model.token_ids_reverse:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
for model in models
})
else:
feed_dict.update({
model.tokens_characters_reverse:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
for model in models
})
init_state_values = sess.run(init_state_tensors, feed_dict=feed_dict)
t1 = time.time()
data_gen = data.iter_batches(batch_size * n_gpus, unroll_steps)
for batch_no, batch in enumerate(data_gen, start=1):
# slice the input in the batch for the feed_dict
X = batch
feed_dict = {t: v for t, v in zip(
init_state_tensors, init_state_values)}
for k in range(n_gpus):
model = models[k]
start = k * batch_size
end = (k + 1) * batch_size
feed_dict.update(
_get_feed_dict_from_X(X, start, end, model,
char_inputs, bidirectional)
)
# This runs the train_op, summaries and the "final_state_tensors"
# which just returns the tensors, passing in the initial
# state tensors, token ids and next token ids
if batch_no % 1250 != 0:
ret = sess.run(
[train_op, summary_op, train_perplexity] +
final_state_tensors,
feed_dict=feed_dict
)
# first three entries of ret are:
# train_op, summary_op, train_perplexity
# last entries are the final states -- set them to
# init_state_values
# for next batch
init_state_values = ret[3:]
else:
# also run the histogram summaries
ret = sess.run(
[train_op, summary_op, train_perplexity, hist_summary_op] +
final_state_tensors,
feed_dict=feed_dict
)
init_state_values = ret[4:]
if batch_no % 1250 == 0:
summary_writer.add_summary(ret[3], batch_no)
if batch_no % 100 == 0:
# write the summaries to tensorboard and display perplexity
summary_writer.add_summary(ret[1], batch_no)
print("Batch %s, train_perplexity=%s" % (batch_no, ret[2]))
print("Total time: %s" % (time.time() - t1))
if (batch_no % 1250 == 0) or (batch_no == n_batches_total):
# save the model
checkpoint_path = os.path.join(tf_save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=global_step)
if batch_no == n_batches_total:
# done training!
break
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables):
# wrapper around tf.clip_by_global_norm that also does summary ops of norms
# compute norms
# use global_norm with one element to handle IndexedSlices vs dense
norms = [tf.global_norm([t]) for t in t_list]
# summary ops before clipping
summary_ops = []
for ns, v in zip(norms, variables):
name = 'norm_pre_clip/' + v.name.replace(":", "_")
summary_ops.append(tf.summary.scalar(name, ns))
# clip
clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm)
# summary ops after clipping
norms_post = [tf.global_norm([t]) for t in clipped_t_list]
for ns, v in zip(norms_post, variables):
name = 'norm_post_clip/' + v.name.replace(":", "_")
summary_ops.append(tf.summary.scalar(name, ns))
summary_ops.append(tf.summary.scalar(norm_name, tf_norm))
return clipped_t_list, tf_norm, summary_ops
def clip_grads(grads, options, do_summaries, global_step):
# grads = [(grad1, var1), (grad2, var2), ...]
def _clip_norms(grad_and_vars, val, name):
# grad_and_vars is a list of (g, v) pairs
grad_tensors = [g for g, v in grad_and_vars]
vv = [v for g, v in grad_and_vars]
scaled_val = val
if do_summaries:
clipped_tensors, g_norm, so = clip_by_global_norm_summary(
grad_tensors, scaled_val, name, vv)
else:
so = []
clipped_tensors, g_norm = tf.clip_by_global_norm(
grad_tensors, scaled_val)
ret = []
for t, (g, v) in zip(clipped_tensors, grad_and_vars):
ret.append((t, v))
return ret, so
all_clip_norm_val = options['all_clip_norm_val']
ret, summary_ops = _clip_norms(grads, all_clip_norm_val, 'norm_grad')
assert len(ret) == len(grads)
return ret, summary_ops
def test(options, ckpt_file, data, batch_size=256):
'''
Get the test set perplexity!
'''
bidirectional = options.get('bidirectional', False)
char_inputs = 'char_cnn' in options
if char_inputs:
max_chars = options['char_cnn']['max_characters_per_token']
unroll_steps = 1
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
with tf.device('/gpu:0'), tf.variable_scope('lm'):
test_options = dict(options)
# NOTE: the number of tokens we skip in the last incomplete
# batch is bounded above batch_size * unroll_steps
test_options['batch_size'] = batch_size
test_options['unroll_steps'] = 1
model = LanguageModel(test_options, False)
# we use the "Saver" class to load the variables
loader = tf.train.Saver()
loader.restore(sess, ckpt_file)
# model.total_loss is the op to compute the loss
# perplexity is exp(loss)
init_state_tensors = model.init_lstm_state
final_state_tensors = model.final_lstm_state
if not char_inputs:
feed_dict = {
model.token_ids:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
}
if bidirectional:
feed_dict.update({
model.token_ids_reverse:
np.zeros([batch_size, unroll_steps], dtype=np.int64)
})
else:
feed_dict = {
model.tokens_characters:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
}
if bidirectional:
feed_dict.update({
model.tokens_characters_reverse:
np.zeros([batch_size, unroll_steps, max_chars],
dtype=np.int32)
})
init_state_values = sess.run(
init_state_tensors,
feed_dict=feed_dict)
t1 = time.time()
batch_losses = []
total_loss = 0.0
for batch_no, batch in enumerate(
data.iter_batches(batch_size, 1), start=1):
# slice the input in the batch for the feed_dict
X = batch
feed_dict = {t: v for t, v in zip(
init_state_tensors, init_state_values)}
feed_dict.update(
_get_feed_dict_from_X(X, 0, X['token_ids'].shape[0], model,
char_inputs, bidirectional)
)
ret = sess.run(
[model.total_loss, final_state_tensors],
feed_dict=feed_dict
)
loss, init_state_values = ret
batch_losses.append(loss)
batch_perplexity = np.exp(loss)
total_loss += loss
avg_perplexity = np.exp(total_loss / batch_no)
print("batch=%s, batch_perplexity=%s, avg_perplexity=%s, time=%s" %
(batch_no, batch_perplexity, avg_perplexity, time.time() - t1))
avg_loss = np.mean(batch_losses)
print("FINSIHED! AVERAGE PERPLEXITY = %s" % np.exp(avg_loss))
return np.exp(avg_loss)
def load_options_latest_checkpoint(tf_save_dir):
options_file = os.path.join(tf_save_dir, 'options.json')
ckpt_file = tf.train.latest_checkpoint(tf_save_dir)
with open(options_file, 'r') as fin:
options = json.load(fin)
return options, ckpt_file
def load_vocab(vocab_file, max_word_length=None):
if max_word_length:
return UnicodeCharsVocabulary(vocab_file, max_word_length,
validate_file=True)
else:
return Vocabulary(vocab_file, validate_file=True)
def dump_weights(tf_save_dir, outfile):
'''
Dump the trained weights from a model to a HDF5 file.
'''
import h5py
def _get_outname(tf_name):
outname = re.sub(':0$', '', tf_name)
outname = outname.lstrip('lm/')
outname = re.sub('/rnn/', '/RNN/', outname)
outname = re.sub('/multi_rnn_cell/', '/MultiRNNCell/', outname)
outname = re.sub('/cell_', '/Cell', outname)
outname = re.sub('/lstm_cell/', '/LSTMCell/', outname)
if '/RNN/' in outname:
if 'projection' in outname:
outname = re.sub('projection/kernel', 'W_P_0', outname)
else:
outname = re.sub('/kernel', '/W_0', outname)
outname = re.sub('/bias', '/B', outname)
return outname
options, ckpt_file = load_options_latest_checkpoint(tf_save_dir)
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
with tf.variable_scope('lm'):
model = LanguageModel(options, False)
# we use the "Saver" class to load the variables
loader = tf.train.Saver()
loader.restore(sess, ckpt_file)
with h5py.File(outfile, 'w') as fout:
for v in tf.trainable_variables():
if v.name.find('softmax') >= 0:
# don't dump these
continue
outname = _get_outname(v.name)
print("Saving variable {0} with name {1}".format(
v.name, outname))
shape = v.get_shape().as_list()
dset = fout.create_dataset(outname, shape, dtype='float32')
values = sess.run([v])[0]
dset[...] = values
| bilm-tf-master | bilm/training.py |
# originally based on https://github.com/tensorflow/models/tree/master/lm_1b
import glob
import random
import numpy as np
from typing import List
class Vocabulary(object):
'''
A token vocabulary. Holds a map from token to ids and provides
a method for encoding text to a sequence of ids.
'''
def __init__(self, filename, validate_file=False):
'''
filename = the vocabulary file. It is a flat text file with one
(normalized) token per line. In addition, the file should also
contain the special tokens <S>, </S>, <UNK> (case sensitive).
'''
self._id_to_word = []
self._word_to_id = {}
self._unk = -1
self._bos = -1
self._eos = -1
with open(filename) as f:
idx = 0
for line in f:
word_name = line.strip()
if word_name == '<S>':
self._bos = idx
elif word_name == '</S>':
self._eos = idx
elif word_name == '<UNK>':
self._unk = idx
if word_name == '!!!MAXTERMID':
continue
self._id_to_word.append(word_name)
self._word_to_id[word_name] = idx
idx += 1
# check to ensure file has special tokens
if validate_file:
if self._bos == -1 or self._eos == -1 or self._unk == -1:
raise ValueError("Ensure the vocabulary file has "
"<S>, </S>, <UNK> tokens")
@property
def bos(self):
return self._bos
@property
def eos(self):
return self._eos
@property
def unk(self):
return self._unk
@property
def size(self):
return len(self._id_to_word)
def word_to_id(self, word):
if word in self._word_to_id:
return self._word_to_id[word]
return self.unk
def id_to_word(self, cur_id):
return self._id_to_word[cur_id]
def decode(self, cur_ids):
"""Convert a list of ids to a sentence, with space inserted."""
return ' '.join([self.id_to_word(cur_id) for cur_id in cur_ids])
def encode(self, sentence, reverse=False, split=True):
"""Convert a sentence to a list of ids, with special tokens added.
Sentence is a single string with tokens separated by whitespace.
If reverse, then the sentence is assumed to be reversed, and
this method will swap the BOS/EOS tokens appropriately."""
if split:
word_ids = [
self.word_to_id(cur_word) for cur_word in sentence.split()
]
else:
word_ids = [self.word_to_id(cur_word) for cur_word in sentence]
if reverse:
return np.array([self.eos] + word_ids + [self.bos], dtype=np.int32)
else:
return np.array([self.bos] + word_ids + [self.eos], dtype=np.int32)
class UnicodeCharsVocabulary(Vocabulary):
"""Vocabulary containing character-level and word level information.
Has a word vocabulary that is used to lookup word ids and
a character id that is used to map words to arrays of character ids.
The character ids are defined by ord(c) for c in word.encode('utf-8')
This limits the total number of possible char ids to 256.
To this we add 5 additional special ids: begin sentence, end sentence,
begin word, end word and padding.
WARNING: for prediction, we add +1 to the output ids from this
class to create a special padding id (=0). As a result, we suggest
you use the `Batcher`, `TokenBatcher`, and `LMDataset` classes instead
of this lower level class. If you are using this lower level class,
then be sure to add the +1 appropriately, otherwise embeddings computed
from the pre-trained model will be useless.
"""
def __init__(self, filename, max_word_length, **kwargs):
super(UnicodeCharsVocabulary, self).__init__(filename, **kwargs)
self._max_word_length = max_word_length
# char ids 0-255 come from utf-8 encoding bytes
# assign 256-300 to special chars
self.bos_char = 256 # <begin sentence>
self.eos_char = 257 # <end sentence>
self.bow_char = 258 # <begin word>
self.eow_char = 259 # <end word>
self.pad_char = 260 # <padding>
num_words = len(self._id_to_word)
self._word_char_ids = np.zeros([num_words, max_word_length],
dtype=np.int32)
# the charcter representation of the begin/end of sentence characters
def _make_bos_eos(c):
r = np.zeros([self.max_word_length], dtype=np.int32)
r[:] = self.pad_char
r[0] = self.bow_char
r[1] = c
r[2] = self.eow_char
return r
self.bos_chars = _make_bos_eos(self.bos_char)
self.eos_chars = _make_bos_eos(self.eos_char)
for i, word in enumerate(self._id_to_word):
self._word_char_ids[i] = self._convert_word_to_char_ids(word)
self._word_char_ids[self.bos] = self.bos_chars
self._word_char_ids[self.eos] = self.eos_chars
# TODO: properly handle <UNK>
@property
def word_char_ids(self):
return self._word_char_ids
@property
def max_word_length(self):
return self._max_word_length
def _convert_word_to_char_ids(self, word):
code = np.zeros([self.max_word_length], dtype=np.int32)
code[:] = self.pad_char
word_encoded = word.encode('utf-8', 'ignore')[:(self.max_word_length-2)]
code[0] = self.bow_char
for k, chr_id in enumerate(word_encoded, start=1):
code[k] = chr_id
code[len(word_encoded) + 1] = self.eow_char
return code
def word_to_char_ids(self, word):
if word in self._word_to_id:
return self._word_char_ids[self._word_to_id[word]]
else:
return self._convert_word_to_char_ids(word)
def encode_chars(self, sentence, reverse=False, split=True):
'''
Encode the sentence as a white space delimited string of tokens.
'''
if split:
chars_ids = [self.word_to_char_ids(cur_word)
for cur_word in sentence.split()]
else:
chars_ids = [self.word_to_char_ids(cur_word)
for cur_word in sentence]
if reverse:
return np.vstack([self.eos_chars] + chars_ids + [self.bos_chars])
else:
return np.vstack([self.bos_chars] + chars_ids + [self.eos_chars])
class Batcher(object):
'''
Batch sentences of tokenized text into character id matrices.
'''
def __init__(self, lm_vocab_file: str, max_token_length: int):
'''
lm_vocab_file = the language model vocabulary file (one line per
token)
max_token_length = the maximum number of characters in each token
'''
self._lm_vocab = UnicodeCharsVocabulary(
lm_vocab_file, max_token_length
)
self._max_token_length = max_token_length
def batch_sentences(self, sentences: List[List[str]]):
'''
Batch the sentences as character ids
Each sentence is a list of tokens without <s> or </s>, e.g.
[['The', 'first', 'sentence', '.'], ['Second', '.']]
'''
n_sentences = len(sentences)
max_length = max(len(sentence) for sentence in sentences) + 2
X_char_ids = np.zeros(
(n_sentences, max_length, self._max_token_length),
dtype=np.int64
)
for k, sent in enumerate(sentences):
length = len(sent) + 2
char_ids_without_mask = self._lm_vocab.encode_chars(
sent, split=False)
# add one so that 0 is the mask value
X_char_ids[k, :length, :] = char_ids_without_mask + 1
return X_char_ids
class TokenBatcher(object):
'''
Batch sentences of tokenized text into token id matrices.
'''
def __init__(self, lm_vocab_file: str):
'''
lm_vocab_file = the language model vocabulary file (one line per
token)
'''
self._lm_vocab = Vocabulary(lm_vocab_file)
def batch_sentences(self, sentences: List[List[str]]):
'''
Batch the sentences as character ids
Each sentence is a list of tokens without <s> or </s>, e.g.
[['The', 'first', 'sentence', '.'], ['Second', '.']]
'''
n_sentences = len(sentences)
max_length = max(len(sentence) for sentence in sentences) + 2
X_ids = np.zeros((n_sentences, max_length), dtype=np.int64)
for k, sent in enumerate(sentences):
length = len(sent) + 2
ids_without_mask = self._lm_vocab.encode(sent, split=False)
# add one so that 0 is the mask value
X_ids[k, :length] = ids_without_mask + 1
return X_ids
##### for training
def _get_batch(generator, batch_size, num_steps, max_word_length):
"""Read batches of input."""
cur_stream = [None] * batch_size
no_more_data = False
while True:
inputs = np.zeros([batch_size, num_steps], np.int32)
if max_word_length is not None:
char_inputs = np.zeros([batch_size, num_steps, max_word_length],
np.int32)
else:
char_inputs = None
targets = np.zeros([batch_size, num_steps], np.int32)
for i in range(batch_size):
cur_pos = 0
while cur_pos < num_steps:
if cur_stream[i] is None or len(cur_stream[i][0]) <= 1:
try:
cur_stream[i] = list(next(generator))
except StopIteration:
# No more data, exhaust current streams and quit
no_more_data = True
break
how_many = min(len(cur_stream[i][0]) - 1, num_steps - cur_pos)
next_pos = cur_pos + how_many
inputs[i, cur_pos:next_pos] = cur_stream[i][0][:how_many]
if max_word_length is not None:
char_inputs[i, cur_pos:next_pos] = cur_stream[i][1][
:how_many]
targets[i, cur_pos:next_pos] = cur_stream[i][0][1:how_many+1]
cur_pos = next_pos
cur_stream[i][0] = cur_stream[i][0][how_many:]
if max_word_length is not None:
cur_stream[i][1] = cur_stream[i][1][how_many:]
if no_more_data:
# There is no more data. Note: this will not return data
# for the incomplete batch
break
X = {'token_ids': inputs, 'tokens_characters': char_inputs,
'next_token_id': targets}
yield X
class LMDataset(object):
"""
Hold a language model dataset.
A dataset is a list of tokenized files. Each file contains one sentence
per line. Each sentence is pre-tokenized and white space joined.
"""
def __init__(self, filepattern, vocab, reverse=False, test=False,
shuffle_on_load=False):
'''
filepattern = a glob string that specifies the list of files.
vocab = an instance of Vocabulary or UnicodeCharsVocabulary
reverse = if True, then iterate over tokens in each sentence in reverse
test = if True, then iterate through all data once then stop.
Otherwise, iterate forever.
shuffle_on_load = if True, then shuffle the sentences after loading.
'''
self._vocab = vocab
self._all_shards = glob.glob(filepattern)
print('Found %d shards at %s' % (len(self._all_shards), filepattern))
self._shards_to_choose = []
self._reverse = reverse
self._test = test
self._shuffle_on_load = shuffle_on_load
self._use_char_inputs = hasattr(vocab, 'encode_chars')
self._ids = self._load_random_shard()
def _choose_random_shard(self):
if len(self._shards_to_choose) == 0:
self._shards_to_choose = list(self._all_shards)
random.shuffle(self._shards_to_choose)
shard_name = self._shards_to_choose.pop()
return shard_name
def _load_random_shard(self):
"""Randomly select a file and read it."""
if self._test:
if len(self._all_shards) == 0:
# we've loaded all the data
# this will propogate up to the generator in get_batch
# and stop iterating
raise StopIteration
else:
shard_name = self._all_shards.pop()
else:
# just pick a random shard
shard_name = self._choose_random_shard()
ids = self._load_shard(shard_name)
self._i = 0
self._nids = len(ids)
return ids
def _load_shard(self, shard_name):
"""Read one file and convert to ids.
Args:
shard_name: file path.
Returns:
list of (id, char_id) tuples.
"""
print('Loading data from: %s' % shard_name)
with open(shard_name) as f:
sentences_raw = f.readlines()
if self._reverse:
sentences = []
for sentence in sentences_raw:
splitted = sentence.split()
splitted.reverse()
sentences.append(' '.join(splitted))
else:
sentences = sentences_raw
if self._shuffle_on_load:
random.shuffle(sentences)
ids = [self.vocab.encode(sentence, self._reverse)
for sentence in sentences]
if self._use_char_inputs:
chars_ids = [self.vocab.encode_chars(sentence, self._reverse)
for sentence in sentences]
else:
chars_ids = [None] * len(ids)
print('Loaded %d sentences.' % len(ids))
print('Finished loading')
return list(zip(ids, chars_ids))
def get_sentence(self):
while True:
if self._i == self._nids:
self._ids = self._load_random_shard()
ret = self._ids[self._i]
self._i += 1
yield ret
@property
def max_word_length(self):
if self._use_char_inputs:
return self._vocab.max_word_length
else:
return None
def iter_batches(self, batch_size, num_steps):
for X in _get_batch(self.get_sentence(), batch_size, num_steps,
self.max_word_length):
# token_ids = (batch_size, num_steps)
# char_inputs = (batch_size, num_steps, 50) of character ids
# targets = word ID of next word (batch_size, num_steps)
yield X
@property
def vocab(self):
return self._vocab
class BidirectionalLMDataset(object):
def __init__(self, filepattern, vocab, test=False, shuffle_on_load=False):
'''
bidirectional version of LMDataset
'''
self._data_forward = LMDataset(
filepattern, vocab, reverse=False, test=test,
shuffle_on_load=shuffle_on_load)
self._data_reverse = LMDataset(
filepattern, vocab, reverse=True, test=test,
shuffle_on_load=shuffle_on_load)
def iter_batches(self, batch_size, num_steps):
max_word_length = self._data_forward.max_word_length
for X, Xr in zip(
_get_batch(self._data_forward.get_sentence(), batch_size,
num_steps, max_word_length),
_get_batch(self._data_reverse.get_sentence(), batch_size,
num_steps, max_word_length)
):
for k, v in Xr.items():
X[k + '_reverse'] = v
yield X
class InvalidNumberOfCharacters(Exception):
pass
| bilm-tf-master | bilm/data.py |
import unittest
import os
import json
import numpy as np
import tensorflow as tf
from bilm.model import BidirectionalLanguageModel
from bilm.data import Batcher
from bilm.elmo import weight_layers
FIXTURES = 'tests/fixtures/model/'
class TestWeightedLayers(unittest.TestCase):
def tearDown(self):
tf.reset_default_graph()
self.sess.close()
def setUp(self):
self.sess = tf.Session()
def _check_weighted_layer(self, l2_coef, do_layer_norm, use_top_only):
# create the Batcher
vocab_file = os.path.join(FIXTURES, 'vocab_test.txt')
batcher = Batcher(vocab_file, 50)
# load the model
options_file = os.path.join(FIXTURES, 'options.json')
weight_file = os.path.join(FIXTURES, 'lm_weights.hdf5')
character_ids = tf.placeholder('int32', (None, None, 50))
model = BidirectionalLanguageModel(
options_file, weight_file, max_batch_size=4)
bilm_ops = model(character_ids)
weighted_ops = []
for k in range(2):
ops = weight_layers(str(k), bilm_ops, l2_coef=l2_coef,
do_layer_norm=do_layer_norm,
use_top_only=use_top_only)
weighted_ops.append(ops)
# initialize
self.sess.run(tf.global_variables_initializer())
n_expected_trainable_weights = 2 * (1 + int(not use_top_only))
self.assertEqual(len(tf.trainable_variables()),
n_expected_trainable_weights)
# and one regularizer per weighted layer
n_expected_reg_losses = 2 * int(not use_top_only)
self.assertEqual(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
n_expected_reg_losses,
)
# Set the variables.
weights = [[np.array([0.1, 0.3, 0.5]), np.array([1.1])],
[np.array([0.2, 0.4, 0.6]), np.array([0.88])]]
for k in range(2):
with tf.variable_scope('', reuse=True):
if not use_top_only:
W = tf.get_variable('{}_ELMo_W'.format(k))
_ = self.sess.run([W.assign(weights[k][0])])
gamma = tf.get_variable('{}_ELMo_gamma'.format(k))
_ = self.sess.run([gamma.assign(weights[k][1])])
# make some data
sentences = [
['The', 'first', 'sentence', '.'],
['The', 'second'],
['Third']
]
X_chars = batcher.batch_sentences(sentences)
ops = model(character_ids)
lm_embeddings, mask, weighted0, weighted1 = self.sess.run(
[ops['lm_embeddings'], ops['mask'],
weighted_ops[0]['weighted_op'], weighted_ops[1]['weighted_op']],
feed_dict={character_ids: X_chars}
)
actual_elmo = [weighted0, weighted1]
# check the mask first
expected_mask = [[True, True, True, True],
[True, True, False, False],
[True, False, False, False]]
self.assertTrue((expected_mask == mask).all())
# Now compute the actual weighted layers
for k in range(2):
normed_weights = np.exp(weights[k][0] + 1.0 / 3) / np.sum(
np.exp(weights[k][0] + 1.0 / 3))
# masked layer normalization
expected_elmo = np.zeros((3, 4, lm_embeddings.shape[-1]))
if not use_top_only:
for j in range(3): # number of LM layers
if do_layer_norm:
mean = np.mean(lm_embeddings[:, j, :, :][mask])
std = np.std(lm_embeddings[:, j, :, :][mask])
normed_lm_embed = (lm_embeddings[:, j, :, :] - mean) / (
std + 1E-12)
expected_elmo += normed_weights[j] * normed_lm_embed
else:
expected_elmo += normed_weights[j] * lm_embeddings[
:, j, :, :]
else:
expected_elmo += lm_embeddings[:, -1, :, :]
# the scale parameter
expected_elmo *= weights[k][1]
self.assertTrue(
np.allclose(expected_elmo, actual_elmo[k], atol=1e-6)
)
def test_weighted_layers(self):
self._check_weighted_layer(1.0, do_layer_norm=True, use_top_only=False)
def test_weighted_layers_no_norm(self):
self._check_weighted_layer(1.0, do_layer_norm=False, use_top_only=False)
def test_weighted_layers_top_only(self):
self._check_weighted_layer(None, do_layer_norm=False, use_top_only=True)
if __name__ == '__main__':
unittest.main()
| bilm-tf-master | tests/test_elmo.py |
import unittest
import os
import json
import h5py
import tempfile
import shutil
import numpy as np
import tensorflow as tf
from bilm.model import BidirectionalLanguageModel, dump_token_embeddings
from bilm.data import Batcher, TokenBatcher
FIXTURES = 'tests/fixtures/model/'
def _load_sentences_embeddings():
# get the raw data
with open(os.path.join(FIXTURES,
'lm_embeddings_sentences.json')) as fin:
sentences = json.load(fin)
# the expected embeddings
expected_lm_embeddings = []
for k in range(len(sentences)):
embed_fname = os.path.join(
FIXTURES, 'lm_embeddings_{}.hdf5'.format(k)
)
expected_lm_embeddings.append([])
with h5py.File(embed_fname, 'r') as fin:
for i in range(10):
sent_embeds = fin['%s' % i][...]
sent_embeds_concat = np.concatenate(
(sent_embeds[0, :, :], sent_embeds[1, :, :]),
axis=-1
)
expected_lm_embeddings[-1].append(sent_embeds_concat)
return sentences, expected_lm_embeddings
class TestBidirectionalLanguageModel(unittest.TestCase):
def setUp(self):
self.sess = tf.Session()
def tearDown(self):
tf.reset_default_graph()
self.sess.close()
def test_bilm(self):
sentences, expected_lm_embeddings = _load_sentences_embeddings()
# create the Batcher
vocab_file = os.path.join(FIXTURES, 'vocab_test.txt')
batcher = Batcher(vocab_file, 50)
# load the model
options_file = os.path.join(FIXTURES, 'options.json')
weight_file = os.path.join(FIXTURES, 'lm_weights.hdf5')
character_ids = tf.placeholder('int32', (None, None, 50))
model = BidirectionalLanguageModel(options_file, weight_file,
max_batch_size=4)
# get the ops to compute embeddings
ops = model(character_ids)
# initialize
self.sess.run(tf.global_variables_initializer())
# We shouldn't have any trainable variables
self.assertEqual(len(tf.trainable_variables()), 0)
# will run 10 batches of 3 sentences
for i in range(10):
# make a batch of sentences
batch_sentences = []
for k in range(3):
sentence = sentences[k][i].strip().split()
batch_sentences.append(sentence)
X = batcher.batch_sentences(batch_sentences)
lm_embeddings, lengths = self.sess.run(
[ops['lm_embeddings'], ops['lengths']],
feed_dict={character_ids: X}
)
actual_lengths = [len(sent) for sent in batch_sentences]
self.assertEqual(actual_lengths, list(lengths))
# get the expected embeddings and compare!
expected_y = [expected_lm_embeddings[k][i] for k in range(3)]
for k in range(3):
self.assertTrue(
np.allclose(
lm_embeddings[k, 2, :lengths[k], :],
expected_y[k],
atol=1.0e-6
)
)
# Finally, check that the states are being updated properly.
# All batches were size=3, so last element of states should always
# be zero.
third_states = []
for direction in ['forward', 'backward']:
states = self.sess.run(
model._graphs[character_ids].lstm_init_states[direction]
)
for i in range(2):
for state in states[i]:
self.assertTrue(np.sum(np.abs(state[-1, :])) < 1e-7)
third_states.append(state[2, :])
# Run a batch with size=2, the third state should not have been updated
_ = self.sess.run(
ops['lm_embeddings'],
feed_dict={character_ids: np.ones((2, 5, 50), dtype=np.int32)}
)
k = 0
for direction in ['forward', 'backward']:
states = self.sess.run(
model._graphs[character_ids].lstm_init_states[direction]
)
for i in range(2):
for state in states[i]:
self.assertTrue(
np.allclose(third_states[k], state[2, :], atol=1e-6)
)
k += 1
class TestBidirectionalLanguageModelTokenInput(unittest.TestCase):
def setUp(self):
self.sess = tf.Session()
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
tf.reset_default_graph()
self.sess.close()
shutil.rmtree(self.tmp_dir)
def test_bilm_token(self):
sentences, expected_lm_embeddings = _load_sentences_embeddings()
options_file = os.path.join(FIXTURES, 'options.json')
weight_file = os.path.join(FIXTURES, 'lm_weights.hdf5')
# We'll create a vocabulary file with all unique tokens,
# dump the token embeddings, use them in the bilm, then compare
# to actual embeddings.
# Create a vocab file with all unique tokens.
all_tokens = set(['<S>', '</S>'])
for row in sentences:
for sentence in row:
for token in sentence.strip().split():
all_tokens.add(token)
vocab_file = os.path.join(self.tmp_dir, 'vocab_file.txt')
with open(vocab_file, 'w') as fout:
fout.write('\n'.join(all_tokens))
# Dump the token embeddings and clear the session.
embedding_weight_file = os.path.join(self.tmp_dir, 'embeddings.hdf5')
dump_token_embeddings(
vocab_file, options_file, weight_file, embedding_weight_file
)
tf.reset_default_graph()
self.sess.close()
self.sess = tf.Session()
# create the Batcher
batcher = TokenBatcher(vocab_file)
# load the model
token_ids = tf.placeholder('int32', (None, None))
model = BidirectionalLanguageModel(
options_file, weight_file,
use_character_inputs=False,
embedding_weight_file=embedding_weight_file,
max_batch_size=4
)
# get the ops to compute embeddings
ops = model(token_ids)
# initialize
self.sess.run(tf.global_variables_initializer())
# We shouldn't have any trainable variables
self.assertEqual(len(tf.trainable_variables()), 0)
# will run 10 batches of 3 sentences
for i in range(10):
# make a batch of sentences
batch_sentences = []
for k in range(3):
sentence = sentences[k][i].strip().split()
batch_sentences.append(sentence)
X = batcher.batch_sentences(batch_sentences)
lm_embeddings, lengths = self.sess.run(
[ops['lm_embeddings'], ops['lengths']],
feed_dict={token_ids: X}
)
actual_lengths = [len(sent) for sent in batch_sentences]
self.assertEqual(actual_lengths, list(lengths))
# get the expected embeddings and compare!
expected_y = [expected_lm_embeddings[k][i] for k in range(3)]
for k in range(3):
self.assertTrue(
np.allclose(
lm_embeddings[k, 2, :lengths[k], :],
expected_y[k],
atol=1.0e-6
)
)
if __name__ == '__main__':
unittest.main()
| bilm-tf-master | tests/test_model.py |
import unittest
import os
import shutil
import tempfile
import tensorflow as tf
import numpy as np
from bilm.training import train, test, load_vocab, \
load_options_latest_checkpoint
from bilm.data import LMDataset, BidirectionalLMDataset
FIXTURES = 'tests/fixtures/train/'
class TestLanguageModel(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self.tmp_dir)
os.remove(self.tmp_file)
tf.reset_default_graph()
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
(_, self.tmp_file) = tempfile.mkstemp()
def _get_data(self, bidirectional, use_chars, test=False):
vocab_file = os.path.join(FIXTURES, 'vocab.txt')
if use_chars:
vocab = load_vocab(vocab_file, 10)
else:
vocab = load_vocab(vocab_file, None)
prefix = os.path.join(FIXTURES, 'data.txt')
if bidirectional:
data = BidirectionalLMDataset(prefix, vocab, test=test)
else:
data = LMDataset(prefix, vocab, test=test, reverse=False)
return data, vocab
def _get_vocab_data_options(self, bidirectional, use_chars,
share_embedding_softmax=False):
data, vocab = self._get_data(bidirectional, use_chars)
options = {
'n_tokens_vocab': vocab.size,
'n_negative_samples_batch': 16,
'n_train_tokens': 134,
'batch_size': 2,
'unroll_steps': 10,
'n_epochs': 50,
'all_clip_norm_val': 1.0,
'dropout': 0.1,
'lstm': {'dim': 16, 'projection_dim': 8, 'n_layers': 2},
'bidirectional': bidirectional,
}
if use_chars:
options['char_cnn'] = {
'n_characters': 261,
'max_characters_per_token': 10,
'filters': [
[1, 8],
[2, 8],
[3, 16],
[4, 32],
[5, 64],
],
'activation': 'tanh',
'embedding': {'dim': 4},
'n_highway': 1,
}
if share_embedding_softmax:
options['share_embedding_softmax'] = True
return vocab, data, options
def test_train_single_direction(self):
vocab, data, options = self._get_vocab_data_options(False, False)
train(options, data, 1, self.tmp_dir, self.tmp_dir)
# now test
tf.reset_default_graph()
options, ckpt_file = load_options_latest_checkpoint(self.tmp_dir)
data_test, vocab_test = self._get_data(False, False, True)
perplexity = test(options, ckpt_file, data_test, batch_size=1)
self.assertTrue(perplexity < 20.0)
def test_train_bilm_chars(self):
vocab, data, options = self._get_vocab_data_options(True, True)
train(options, data, 1, self.tmp_dir, self.tmp_dir)
# now test
tf.reset_default_graph()
options, ckpt_file = load_options_latest_checkpoint(self.tmp_dir)
data_test, vocab_test = self._get_data(True, True, True)
perplexity = test(options, ckpt_file, data_test, batch_size=1)
self.assertTrue(perplexity < 20.0)
def test_shared_variables(self):
vocab, data, options = self._get_vocab_data_options(True, True)
options['n_epochs'] = 1
train(options, data, 2, self.tmp_dir, self.tmp_dir)
self.assertEqual(len(tf.global_variables()), 64)
def test_train_shared_softmax_embedding(self):
bidirectional = True
use_chars = False
vocab, data, options = self._get_vocab_data_options(
bidirectional, use_chars, share_embedding_softmax=True)
train(options, data, 1, self.tmp_dir, self.tmp_dir)
# now test
tf.reset_default_graph()
options, ckpt_file = load_options_latest_checkpoint(self.tmp_dir)
data_test, vocab_test = self._get_data(
bidirectional, use_chars, test=True)
perplexity = test(options, ckpt_file, data_test, batch_size=1)
self.assertTrue(perplexity < 20.0)
def test_train_shared_softmax_no_chars(self):
bidirectional = True
use_chars = True
vocab, data, options = self._get_vocab_data_options(
bidirectional, use_chars, share_embedding_softmax=True)
# character inputs and sharing weights not suppported
with self.assertRaises(ValueError):
train(options, data, 1, self.tmp_dir, self.tmp_dir)
def test_train_skip_connections(self):
bidirectional = True
use_chars = False
vocab, data, options = self._get_vocab_data_options(
bidirectional, use_chars)
options['lstm']['use_skip_connections'] = True
train(options, data, 1, self.tmp_dir, self.tmp_dir)
# now test
tf.reset_default_graph()
options, ckpt_file = load_options_latest_checkpoint(self.tmp_dir)
data_test, vocab_test = self._get_data(
bidirectional, use_chars, test=True)
perplexity = test(options, ckpt_file, data_test, batch_size=1)
self.assertTrue(perplexity < 20.0)
if __name__ == '__main__':
unittest.main()
| bilm-tf-master | tests/test_training.py |
import unittest
import tempfile
import os
import numpy as np
from bilm.data import UnicodeCharsVocabulary, Vocabulary, \
Batcher, TokenBatcher, LMDataset, BidirectionalLMDataset
DATA_FIXTURES = 'tests/fixtures/data/'
TRAIN_FIXTURES = 'tests/fixtures/train/'
class TestVocabulary(unittest.TestCase):
def setUp(self):
words = ['<S>', '</S>', '<UNK>', 'the', '.']
(_, tmp) = tempfile.mkstemp()
with open(tmp, 'w') as fout:
fout.write('\n'.join(words))
self._tmp = tmp
def test_vocab_encode(self):
vocab = Vocabulary(self._tmp)
sentence = 'the unknown .'
ids = vocab.encode(sentence)
expected = np.array([0, 3, 2, 4, 1], dtype=np.int32)
self.assertTrue((ids == expected).all())
def test_vocab_encode_reverse(self):
vocab = Vocabulary(self._tmp)
sentence = '. unknown the'
ids = vocab.encode(sentence, reverse=True)
expected = np.array([1, 4, 2, 3, 0], dtype=np.int32)
self.assertTrue((ids == expected).all())
def tearDown(self):
os.remove(self._tmp)
class TestUnicodeCharsVocabulary(unittest.TestCase):
def setUp(self):
words = ['the', '.', chr(256) + 't', '<S>', '</S>', '<UNK>']
(_, tmp) = tempfile.mkstemp()
with open(tmp, 'w') as fout:
fout.write('\n'.join(words))
self.vocab = UnicodeCharsVocabulary(tmp, 5)
self._tmp = tmp
def test_vocab_word_to_char_ids(self):
char_ids = self.vocab.word_to_char_ids('th')
expected = np.array([258, 116, 104, 259, 260], dtype=np.int32)
self.assertTrue((char_ids == expected).all())
char_ids = self.vocab.word_to_char_ids('thhhhh')
expected = np.array([258, 116, 104, 104, 259])
self.assertTrue((char_ids == expected).all())
char_ids = self.vocab.word_to_char_ids(chr(256) + 't')
expected = np.array([258, 196, 128, 116, 259], dtype=np.int32)
self.assertTrue((char_ids == expected).all())
def test_bos_eos(self):
bos_ids = self.vocab.word_to_char_ids('<S>')
self.assertTrue((bos_ids == self.vocab.bos_chars).all())
bos_ids = self.vocab.word_char_ids[self.vocab.word_to_id('<S>')]
self.assertTrue((bos_ids == self.vocab.bos_chars).all())
eos_ids = self.vocab.word_to_char_ids('</S>')
self.assertTrue((eos_ids == self.vocab.eos_chars).all())
eos_ids = self.vocab.word_char_ids[self.vocab.word_to_id('</S>')]
self.assertTrue((eos_ids == self.vocab.eos_chars).all())
def test_vocab_encode_chars(self):
sentence = ' '.join(['th', 'thhhhh', chr(256) + 't'])
char_ids = self.vocab.encode_chars(sentence)
expected = np.array(
[[258, 256, 259, 260, 260],
[258, 116, 104, 259, 260],
[258, 116, 104, 104, 259],
[258, 196, 128, 116, 259],
[258, 257, 259, 260, 260]], dtype=np.int32)
self.assertTrue((char_ids == expected).all())
def test_vocab_encode_chars_reverse(self):
sentence = ' '.join(reversed(['th', 'thhhhh', chr(256) + 't']))
vocab = UnicodeCharsVocabulary(self._tmp, 5)
char_ids = vocab.encode_chars(sentence, reverse=True)
expected = np.array(
[[258, 256, 259, 260, 260],
[258, 116, 104, 259, 260],
[258, 116, 104, 104, 259],
[258, 196, 128, 116, 259],
[258, 257, 259, 260, 260]], dtype=np.int32)[::-1, :]
self.assertTrue((char_ids == expected).all())
def tearDown(self):
os.remove(self._tmp)
class TestBatcher(unittest.TestCase):
def setUp(self):
self._expected_char_ids = np.array(
[
[
[
259, 257, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261
], [
259, 85, 105, 102, 260, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261
], [
259, 103, 106, 115, 116, 117, 260, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261
], [
259, 116, 102, 111, 117, 102, 111, 100, 102,
260, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261
], [
259, 258, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261
]
], [
[
259, 257, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261
], [
259, 84, 102, 100, 112, 111, 101, 260, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261
], [
259, 47, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261
], [
259, 258, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261
], [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
]
]
]
)
def test_batch_sentences(self):
batcher = Batcher(os.path.join(DATA_FIXTURES, 'vocab_test.txt'), 50)
sentences = [['The', 'first', 'sentence'], ['Second', '.']]
x_char_ids = batcher.batch_sentences(sentences)
self.assertTrue((x_char_ids == self._expected_char_ids).all())
class TestTokenBatcher(unittest.TestCase):
def test_token_batcher(self):
batcher = TokenBatcher(os.path.join(DATA_FIXTURES, 'vocab_test.txt'))
sentences = [['The', 'first', '.'], ['It', 'said']]
x_token_ids = batcher.batch_sentences(sentences)
expected_ids = np.array([
[2, 18, 75, 6, 1],
[2, 67, 21, 1, 0]
])
self.assertTrue((x_token_ids == expected_ids).all())
class TestLMDataset(unittest.TestCase):
def setUp(self):
sentences = ['the unknown .', 'th .', 'the']
(_, tmp_train) = tempfile.mkstemp()
with open(tmp_train, 'w') as fout:
fout.write('\n'.join(sentences))
words = ['<S>', '</S>', '<UNK>', 'the', '.', chr(256) + 't']
(_, tmp_vocab) = tempfile.mkstemp()
with open(tmp_vocab, 'w') as fout:
fout.write('\n'.join(words))
self._tmp_train = tmp_train
self._tmp_vocab = tmp_vocab
def _expected(self, reverse, chars, bidirectional=False):
ret_forward = [
{'tokens_characters':
np.array(
[[[258, 256, 259, 260, 260],
[258, 116, 104, 101, 259],
[258, 117, 110, 107, 259]],
[[258, 256, 259, 260, 260],
[258, 116, 104, 259, 260],
[258, 46, 259, 260, 260]]], dtype=np.int32),
'token_ids':
np.array(
[[0, 3, 2],
[0, 2, 4]], dtype=np.int32),
'next_token_id':
np.array(
[[3, 2, 4],
[2, 4, 1]], dtype=np.int32)},
{'tokens_characters':
np.array(
[[[258, 46, 259, 260, 260],
[258, 256, 259, 260, 260],
[258, 116, 104, 101, 259]],
[[258, 256, 259, 260, 260],
[258, 116, 104, 101, 259],
[258, 117, 110, 107, 259]]], dtype=np.int32),
'token_ids':
np.array(
[[4, 0, 3],
[0, 3, 2]], dtype=np.int32),
'next_token_id':
np.array(
[[1, 3, 1],
[3, 2, 4]], dtype=np.int32)}]
ret_reverse = [
{'tokens_characters': np.array([[[258, 257, 259, 260, 260],
[258, 46, 259, 260, 260],
[258, 117, 110, 107, 259]],
[[258, 257, 259, 260, 260],
[258, 46, 259, 260, 260],
[258, 116, 104, 259, 260]]], dtype=np.int32),
'next_token_id': np.array([[4, 2, 3],
[4, 2, 0]], dtype=np.int32),
'token_ids': np.array([[1, 4, 2],
[1, 4, 2]], dtype=np.int32)},
{'tokens_characters': np.array([[[258, 116, 104, 101, 259],
[258, 257, 259, 260, 260],
[258, 116, 104, 101, 259]],
[[258, 257, 259, 260, 260],
[258, 46, 259, 260, 260],
[258, 117, 110, 107, 259]]], dtype=np.int32),
'next_token_id': np.array([[0, 3, 0],
[4, 2, 3]], dtype=np.int32),
'token_ids': np.array([[3, 1, 3],
[1, 4, 2]], dtype=np.int32)}]
if bidirectional:
expected = []
for f, r in zip(ret_forward, ret_reverse):
batch = dict(f)
for k, v in r.items():
batch[k + '_reverse'] = v
expected.append(batch)
elif reverse:
expected = ret_reverse
else:
expected = ret_forward
if not chars:
# set 'tokens_characters' key to None
ret = []
for e in expected:
e['tokens_characters'] = None
if 'tokens_characters_reverse' in e:
e['tokens_characters_reverse'] = None
ret.append(e)
else:
ret = expected
return ret
def _load_data(self, reverse, chars, bidirectional=False):
if chars:
vocab = UnicodeCharsVocabulary(self._tmp_vocab, 5)
else:
vocab = Vocabulary(self._tmp_vocab)
if not bidirectional:
data = LMDataset(self._tmp_train, vocab, reverse=reverse)
else:
data = BidirectionalLMDataset(self._tmp_train, vocab)
return data
def _compare(self, actual, expected):
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(sorted(list(a.keys())), sorted(list(e.keys())))
for k in a.keys():
if a[k] is not None:
self.assertTrue(np.all(a[k] == e[k]))
else:
self.assertEqual(a[k], e[k])
def _get_batches(self, *args, **kwargs):
data = self._load_data(*args, **kwargs)
batches = []
np.random.seed(5)
for i, batch in enumerate(data.iter_batches(2, 3)):
batches.append(batch)
if i == 1:
break
return batches
def test_lm_dataset(self):
batches = self._get_batches(False, True)
expected = self._expected(False, True)
self._compare(expected, batches)
def test_lm_dataset_reverse(self):
batches = self._get_batches(True, True)
expected = self._expected(True, True)
self._compare(expected, batches)
def test_lm_dataset_no_char(self):
batches = self._get_batches(False, False)
expected = self._expected(False, False)
self._compare(expected, batches)
def test_lm_dataset_no_char_reverse(self):
batches = self._get_batches(True, False)
expected = self._expected(True, False)
self._compare(expected, batches)
def test_bi_lm_dataset(self):
for a1 in [True, False]:
for a2 in [True, False]:
batches = self._get_batches(a1, a2, True)
expected = self._expected(a1, a2, True)
self._compare(expected, batches)
if __name__ == '__main__':
unittest.main()
| bilm-tf-master | tests/test_data.py |
import argparse
import functools
import os
import os
os.environ['MKL_THREADING_LAYER'] = 'GNU'
from omegaconf import OmegaConf
from src.lightning.trainers.moco2_trainer import MocoV2Trainer
# set default of print to flush
# print = functools.partial(print, flush=True)
def train(conf_path):
conf = OmegaConf.load(conf_path)
print(OmegaConf.to_yaml(conf))
if not os.path.exists(conf.checkpoint_path):
os.mkdir(conf.checkpoint_path)
rt = MocoV2Trainer(conf)
rt.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Embodied Scene Representations (ESR)')
parser.add_argument('--conf', required=True, type=str,
help='configuration file to run an experiment')
args = parser.parse_args()
train(args.conf)
| CSR-main | train_csr.py |
import argparse
import json
import os
import numpy as np
def create_table(args):
metric_dir = args.metrics_dir
success = []
num_no_change_energy = 0
prop_fixed_strict = []
energy_prop = []
num_changed = []
atomic_success_walkthrough= []
precision_w = []
atomic_success_unshuffle = []
precision_un = []
missed_detection_ratio = []
errors = {}
histogram = {}
total = 0
made_worse = 0
success_ids = []
for filename in os.listdir(metric_dir):
if filename.endswith(".json") and filename.startswith('result'):
raw_metrics = {}
with open(os.path.join(metric_dir, filename), 'r') as f:
raw_metrics = json.load(f)
if 'error' not in raw_metrics:
energy_prop.append(raw_metrics['energy_prop'])
if raw_metrics['energy_prop'] > 1.0:
made_worse += 1
missed_detection_ratio.append(len(raw_metrics['objects_undetected_either']) / raw_metrics['object_count'])
for o in raw_metrics['objects_undetected_either']:
class_name = o.split('_')[0]
if class_name in histogram:
histogram[class_name] += 1
else:
histogram[class_name] = 1
prop_fixed_strict.append(raw_metrics['prop_fixed_strict'])
success.append(raw_metrics['success'])
if raw_metrics['success']:
_, room_id, instance_id = filename.split('.')[0].split('_')
success_ids.append([int(room_id), int(instance_id)])
num_changed.append(raw_metrics['num_changed'])
atomic_success_walkthrough.append(raw_metrics['atomic_success_walkthrough'])
atomic_success_unshuffle.append(raw_metrics['atomic_success_unshuffle'])
precision_w.append(raw_metrics['adjusted_rand_walkthrough'])
precision_un.append(raw_metrics['adjusted_rand_unshuffle'])
if raw_metrics['change_energy'] == 0.0:
num_no_change_energy += 1
else:
errors[filename.split('.json')[0]] = raw_metrics['error']
total += 1
print(f'run: {metric_dir}')
print(f'total evals: {total}')
print(f'success: {np.mean(success) * (len(success) / total)}')
print(f'prop fixed strict: {np.mean(prop_fixed_strict)}')
print(f'energy prop: {np.mean(energy_prop)}')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Data generation for Embodied Scene Representations (ESR)')
parser.add_argument('--metrics-dir', required=True, action='store', type=str)
args = parser.parse_args()
create_table(args)
| CSR-main | aggrigate_metrics.py |
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor
from allenact.base_abstractions.sensor import SensorSuite
from allenact.algorithms.onpolicy_sync.storage import RolloutStorage
from ray.util.queue import Queue
import time
import numpy as np
from src.dataloaders.roomr_dataset_utils import get_rearrange_task_spec
from src.models.exploration_model import init_exploration_model
from src.simulation.environment import RearrangeTHOREnvironment
from src.simulation.rearrange_utils import load_rearrange_data_from_path
from src.simulation.rearrangement_args import RearrangementArgs
from src.shared.constants import (IMAGE_SIZE, TEST_ROOM_IDS, TRAIN_ROOM_IDS,
VAL_ROOM_IDS)
from src.simulation.constants import ACTION_NEGATIONS, EXPLORATION_ACTION_ORDER, ROOMR_CONTROLLER_COMMIT_ID
import src.dataloaders.augmentations as A
from pytorch_lightning import seed_everything
import argparse
import json
import os
from random import shuffle
import shutil
from itertools import product
from typing import List, Tuple, cast
from torch.distributions.categorical import Categorical
from PIL import Image
import ray
ray.init(num_gpus=1)
seed_everything(0)
@ray.remote(num_gpus=1)
def cache_trajectory(
queue,
data
):
try:
model = None
rearrangement_args = queue.get(block=False)
env = RearrangeTHOREnvironment(
force_cache_reset=False,
controller_kwargs={
'commit_id': ROOMR_CONTROLLER_COMMIT_ID,
'height': IMAGE_SIZE,
'width': IMAGE_SIZE,
'renderInstanceSegmentation': False,
'renderDepthImage': False,
'visibilityDistance': 1.5,
'quality': "Very Low"})
trajectories = {}
trajectories['walkthrough'] = []
trajectories['unshuffle'] = []
try:
# ray.get_gpu_ids()[0]
rearrangement_args.device_relation_tracking = 0
times = []
for i in range(2):
seed_everything(0)
model = init_exploration_model(
rearrangement_args.exploration_model_path)
task_spec = get_rearrange_task_spec(
data, f'FloorPlan{rearrangement_args.room_id}', rearrangement_args.instance_id, rearrangement_args.data_split)
env.reset(task_spec, force_axis_aligned_start=True)
label = 'walkthrough'
if i == 1:
env.shuffle()
label = 'unshuffle'
rollout_storage = RolloutStorage(
num_steps=1,
num_samplers=1,
actor_critic=model,
only_store_first_and_last_in_memory=True,
)
memory = rollout_storage.pick_memory_step(0)
tmp = memory["rnn"][1]
memory["rnn"] = (memory["rnn"][0].cuda(), tmp)
memory.tensor("rnn").cuda()
masks = rollout_storage.masks[:1]
masks = 0 * masks
masks = masks.cuda()
# rollout walkthrough traj
count = 0
last_action = None
while 1:
observation = {
'image': env.controller.last_event.frame.copy()}
A.TestTransform(observation)
observation['image'] = observation['image'].permute(
1, 2, 0).unsqueeze(0).unsqueeze(0).to(0)
tic = time.perf_counter()
ac_out, memory = cast(
Tuple[ActorCriticOutput, Memory],
model.forward(
observations=observation,
memory=memory,
prev_actions=None,
masks=masks,
),
)
toc = time.perf_counter()
# print(f"eval {toc - tic:0.4f} seconds")
times.append(toc - tic)
masks.fill_(1)
action_success = False
dist = Categorical(ac_out.distributions.probs)
while not action_success:
if len(trajectories[label]) > 2:
if ACTION_NEGATIONS[EXPLORATION_ACTION_ORDER[trajectories[label][-2]]] == EXPLORATION_ACTION_ORDER[trajectories[label][-1]]:
dist.probs[0][0][trajectories[label][-2]] = 0.0
action_num = dist.sample().item()
action = EXPLORATION_ACTION_ORDER[action_num]
action_dict = {}
action_dict['action'] = action
sr = env.controller.step(action_dict)
count += 1
action_success = sr.metadata['lastActionSuccess']
if action_success:
trajectories[label].append(action_num)
else:
# modify the distribution
dist.probs[0][0][action_num] = 0.0
assert len(trajectories[label]) < 250
if count == 249:
break
assert len(trajectories[label]) < 250
if count == 249:
break
except Exception as e:
trajectories = {'error': str(e)}
print(trajectories)
print(f'FloorPlan{rearrangement_args.room_id}')
print(rearrangement_args.instance_id)
print('-'*20)
print(np.mean(times))
room_id = rearrangement_args.room_id
instance_id = rearrangement_args.instance_id
try:
with open(os.path.join(rearrangement_args.dump_dir, f'cache_{room_id}_{instance_id}.json'), 'w') as f:
json.dump(trajectories, f)
except Exception as e:
print('WRITE FAILED')
print(e)
return True
except:
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='ai2thor + ray validation script')
parser.add_argument('--batch-size', required=False, type=int, default=1,
help='number of rendering jobs to execute in parallel')
parser.add_argument('--config-dir-path', required=True, type=str,
help='configuration specifying how to run experiments')
parser.add_argument('--subset', required=False, action='store_true')
args = parser.parse_args()
for j in os.listdir(args.config_dir_path):
config_dict = {}
with open(os.path.join(args.config_dir_path, j), 'r') as f:
config_dict = json.load(f)
rearrangement_args = RearrangementArgs(**config_dict)
room_ids = None
if rearrangement_args.data_split == 'train':
room_ids = TRAIN_ROOM_IDS
elif rearrangement_args.data_split == 'val':
room_ids = VAL_ROOM_IDS
elif rearrangement_args.data_split == 'test':
room_ids = TEST_ROOM_IDS
else:
raise ValueError('unsupported data split')
instance_ids = [i for i in range(50)]
jobs = list(product(room_ids, instance_ids))
# shuffle the jobs so there is less correlation between gpu and task load
shuffle(jobs)
# if os.path.exists(rearrangement_args.dump_dir):
# shutil.rmtree(rearrangement_args.dump_dir)
# os.mkdir(rearrangement_args.dump_dir)
rearrangement_args_lists = []
if args.subset:
# jobs = [
# [421, 22], [21, 44],
# [425, 19], [425, 14],
# [21, 10], [424, 21],
# [421, 21], [423, 18],
# [324, 18], [221, 5],
# [324, 25], [421, 48],
# [424, 34], [225, 8]
# ]
jobs = [
[30, 37]
# [326, 49], [228, 39],
# [229, 39], [328, 21],
# [29, 10], [329, 8],
]
queue = Queue(maxsize=len(jobs))
tasks = []
for i, (room_id, instance_id) in enumerate(jobs):
# NOTE: assuming 8 gpu machine
# device_num = i % 8
rearrangement_args = RearrangementArgs(**config_dict)
rearrangement_args.room_id = room_id
rearrangement_args.instance_id = instance_id
# rearrangement_args.device_relation_tracking = device_num
tasks.append(rearrangement_args)
[queue.put(t) for t in tasks]
thread_tasks = []
data = load_rearrange_data_from_path(
rearrangement_args.data_split, rearrangement_args.roomr_dir)
# Start batch_size tasks.
remaining_ids = [cache_trajectory.remote(
queue, data) for _ in range(min(args.batch_size, len(jobs)))]
# Whenever one task finishes, start a new one.
for _ in range(len(tasks)):
ready_ids, remaining_ids = ray.wait(remaining_ids)
# Get the available object and do something with it.
if ray.get(ready_ids)[0]:
# Start a new task.
remaining_ids.append(cache_trajectory.remote(queue, data))
print('Done.')
| CSR-main | runner_cache_trajectories.py |
from ray.util.queue import Queue
from src.simulation.rearrangement_args import RearrangementArgs
from src.simulation.agent_roomr import AgentRoomr
from src.shared.constants import (IMAGE_SIZE, TEST_ROOM_IDS, TRAIN_ROOM_IDS,
VAL_ROOM_IDS)
from pytorch_lightning import seed_everything
import argparse
import json
import os
from random import shuffle
import shutil
from itertools import product
from typing import List
import ray
ray.init(num_gpus=8)
seed_everything(0)
@ray.remote(num_gpus=1)
def render(
queue
):
try:
a = None
metrics = None
rearrangement_args = queue.get(block=False)
rearrangement_args.device_relation_tracking = 0 # ray.get_gpu_ids()[0]
try:
if a is None:
a = AgentRoomr(rearrangement_args)
else:
# optimization to prevent re-init of controller and torch models
a.reset(rearrangement_args=rearrangement_args)
a.walkthrough_pipeline()
a.rearrange_room()
a.unshuffle_pipeline()
metrics = a.get_metrics()
except Exception as e:
metrics = {'error': str(e), 'metrics': a.get_metrics(with_error=True)}
room_id = rearrangement_args.room_id
instance_id = rearrangement_args.instance_id
try:
with open(os.path.join(rearrangement_args.dump_dir, f'results_{room_id}_{instance_id}.json'), 'w') as f:
json.dump(metrics, f, indent=4)
except Exception as e:
# queue.put(rearrangement_args)
print('WRITE FAILED')
print(e)
print(metrics)
return True
except:
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='ai2thor + ray validation script')
parser.add_argument('--batch-size', required=False, type=int, default=8,
help='number of rendering jobs to execute in parallel')
parser.add_argument('--config-dir-path', required=False, type=str,
default='./configs_rearrangement',
help='configuration specifying how to run experiments')
parser.add_argument('--subset', required=False, action='store_true')
args = parser.parse_args()
for j in os.listdir(args.config_dir_path):
config_dict = {}
with open(os.path.join(args.config_dir_path, j), 'r') as f:
config_dict = json.load(f)
rearrangement_args = RearrangementArgs(**config_dict)
room_ids = None
if rearrangement_args.data_split == 'train':
room_ids = TRAIN_ROOM_IDS
elif rearrangement_args.data_split == 'val':
room_ids = VAL_ROOM_IDS
elif rearrangement_args.data_split == 'test':
room_ids = TEST_ROOM_IDS
else:
raise ValueError('unsupported data split')
instance_ids = [i for i in range(50)]
jobs = list(product(room_ids, instance_ids))
# shuffle the jobs so there is less correlation between gpu and task load
shuffle(jobs)
if os.path.exists(rearrangement_args.dump_dir):
shutil.rmtree(rearrangement_args.dump_dir)
os.mkdir(rearrangement_args.dump_dir)
rearrangement_args_lists = []
print(rearrangement_args)
if args.subset:
jobs = [
[421, 22], [21, 44],
[425, 19], [425, 14],
[21, 10], [424, 21],
[421, 21], [423, 18],
[324, 18], [221, 5],
[324, 25], [421, 48],
[424, 34], [225, 8]
]
queue = Queue(maxsize=len(jobs))
tasks = []
for i, (room_id, instance_id) in enumerate(jobs):
# NOTE: assuming 8 gpu machine
# device_num = i % 8
rearrangement_args = RearrangementArgs(**config_dict)
rearrangement_args.room_id = room_id
rearrangement_args.instance_id = instance_id
# rearrangement_args.device_relation_tracking = device_num
tasks.append(rearrangement_args)
[queue.put(t) for t in tasks]
# Start batch_size tasks.
remaining_ids = [render.remote(queue) for _ in range(args.batch_size)]
# Whenever one task finishes, start a new one.
while not queue.empty():
ready_ids, remaining_ids = ray.wait(remaining_ids, num_returns = 1)
# Get the available object and do something with it.
for _ in ray.get(ready_ids):
# Start a new task.
remaining_ids.append(render.remote(queue))
print('Done.')
| CSR-main | runner_eval_rearrangement.py |
import atexit
import os
import platform
import re
import shlex
import subprocess
import tempfile
# Turning off automatic black formatting for this script as it breaks quotes.
# fmt: off
def pci_records():
records = []
command = shlex.split("lspci -vmm")
output = subprocess.check_output(command).decode()
for devices in output.strip().split("\n\n"):
record = {}
records.append(record)
for row in devices.split("\n"):
key, value = row.split("\t")
record[key.split(":")[0]] = value
return records
def generate_xorg_conf(devices):
xorg_conf = []
device_section = """
Section "Device"
Identifier "Device{device_id}"
Driver "nvidia"
VendorName "NVIDIA Corporation"
BusID "{bus_id}"
EndSection
"""
server_layout_section = """
Section "ServerLayout"
Identifier "Layout0"
{screen_records}
EndSection
"""
screen_section = """
Section "Screen"
Identifier "Screen{screen_id}"
Device "Device{device_id}"
DefaultDepth 24
Option "AllowEmptyInitialConfiguration" "True"
SubSection "Display"
Depth 24
Virtual 1024 768
EndSubSection
EndSection
"""
screen_records = []
for i, bus_id in enumerate(devices):
xorg_conf.append(device_section.format(device_id=i, bus_id=bus_id))
xorg_conf.append(screen_section.format(device_id=i, screen_id=i))
screen_records.append('Screen {screen_id} "Screen{screen_id}" 0 0'.format(screen_id=i))
xorg_conf.append(server_layout_section.format(screen_records="\n ".join(screen_records)))
output = "\n".join(xorg_conf)
return output
def startx(display=0):
if platform.system() != "Linux":
raise Exception("Can only run startx on linux")
devices = []
for r in pci_records():
if r.get("Vendor", "") == "NVIDIA Corporation" \
and r["Class"] in ["VGA compatible controller", "3D controller"]:
bus_id = "PCI:" + ":".join(map(lambda x: str(int(x, 16)), re.split(r"[:\.]", r["Slot"])))
devices.append(bus_id)
if not devices:
raise Exception("no nvidia cards found")
try:
fd, path = tempfile.mkstemp()
with open(path, "w") as f:
f.write(generate_xorg_conf(devices))
command = shlex.split("Xorg -noreset +extension GLX +extension RANDR +extension RENDER -config %s :%s" % (path, display))
proc = subprocess.Popen(command)
atexit.register(lambda: proc.poll() is None and proc.kill())
proc.wait()
finally:
os.close(fd)
os.unlink(path)
# fmt: on
if __name__ == "__main__":
startx() | CSR-main | scripts/startx.py |
CSR-main | src/__init__.py |
|
from typing import Any, List
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from src.shared.utils import render_confusion_matrix
from torch.utils.data.dataloader import DataLoader
class ConfusionLogger(Callback):
""" Custom callback to compute metrics at the end of each training epoch"""
def __init__(self, class_names: List[str]):
super().__init__()
self.class_names = class_names
def on_validation_epoch_start(self, trainer, pl_module):
pl_module.val_confmat.reset()
def on_validation_epoch_end(self, trainer, pl_module):
# collect validation data and ground truth labels from dataloader
conf_matrix = pl_module.val_confmat.compute()
conf_matrix = render_confusion_matrix(
conf_matrix.cpu().numpy(), self.class_names)
trainer.logger.experiment.log(
{
f"val_conf": wandb.Image(conf_matrix)
})
def on_test_epoch_start(self, trainer, pl_module):
pl_module.test_confmat.reset()
def on_test_epoch_end(self, trainer, pl_module):
# collect validation data and ground truth labels from dataloader
conf_matrix = pl_module.test_confmat.compute()
conf_matrix = render_confusion_matrix(
conf_matrix.cpu().numpy(), self.class_names)
trainer.logger.experiment.log(
{
f"test_conf": wandb.Image(conf_matrix)
})
class ContrastiveImagePredictionLogger(Callback):
def __init__(self):
super().__init__()
def on_train_batch_start(
self,
trainer: 'pl.Trainer',
pl_module: 'pl.LightningModule',
batch: Any,
batch_idx: int,
dataloader_idx: int,
):
if dataloader_idx == 0 and batch_idx == 0:
# only log first batch in the dataloader
self.__helper(trainer, batch, 'train')
def on_val_batch_start(
self,
trainer: 'pl.Trainer',
pl_module: 'pl.LightningModule',
batch: Any,
batch_idx: int,
dataloader_idx: int,
):
if dataloader_idx == 0 and batch_idx == 0:
# only log first batch in the dataloader
self.__helper(trainer, batch, 'val')
def __helper(
self,
trainer: 'pl.Trainer',
batch: Any,
prefix: str,
):
# Bring the tensors to CPU
query, key = batch
q_img = query['image']
q_mask_1 = query['mask_1']
q_mask_2 = query['mask_2']
q_rooms_ids = query['room_id']
q_trajectory_ids = query['trajectory_id'].cpu().numpy()
q_timesteps = query['timestep'].cpu().numpy()
# has_shuffle_negatives = query['has_shuffle_negative'].cpu().numpy()
k_img = key['image']
k_mask_1 = key['mask_1']
k_mask_2 = key['mask_2']
k_rooms_ids = key['room_id']
k_trajectory_ids = key['trajectory_id'].cpu().numpy()
k_timesteps = key['timestep'].cpu().numpy()
# s_img = query['shuffle_image']
# s_mask_1 = query['shuffle_mask_1']
# s_mask_2 = query['shuffle_mask_2']
# Save the masks
q_masks = [{
"mask_1": {
"mask_data": q_mask_1[i].squeeze().cpu().numpy(),
"class_labels": {1: "mask1"}
},
"mask_2": {
"mask_data": q_mask_2[i].squeeze().cpu().numpy()+1,
"class_labels": {2: "mask2"}
},
"background": {
"mask_data": (q_mask_1[i] + q_mask_2[i]).squeeze().cpu().numpy(),
"class_labels": {0: "background"}
}
} for i in range(q_img.shape[0])]
k_masks = [{
"mask_1": {
"mask_data": k_mask_1[i].squeeze().cpu().numpy(),
"class_labels": {1: "mask1"}
},
"mask_2": {
"mask_data": k_mask_2[i].squeeze().cpu().numpy()+1,
"class_labels": {2: "mask2"}
},
"background": {
"mask_data": (k_mask_1[i] + k_mask_2[i]).squeeze().cpu().numpy(),
"class_labels": {0: "background"}
}
} for i in range(k_img.shape[0])]
# s_masks = [{
# "mask_1": {
# "mask_data": s_mask_1[i].squeeze().cpu().numpy(),
# "class_labels": {1: "mask1"}
# },
# "mask_2": {
# "mask_data": s_mask_2[i].squeeze().cpu().numpy()+1,
# "class_labels": {2: "mask2"}
# },
# "background": {
# "mask_data": (s_mask_1[i] + s_mask_2[i]).squeeze().cpu().numpy(),
# "class_labels": {0: "background"}
# }
# } for i in range(s_img.shape[0])]
trainer.logger.experiment.log({
f"{prefix}_queries": [wandb.Image(x, masks=mask, caption=f"room_id:{room_id}, trajectory_id:{trajectory_id}, timestep:{timestep}")
for x, mask, room_id, trajectory_id, timestep in zip(q_img,
q_masks,
q_rooms_ids,
q_trajectory_ids,
q_timesteps)][:10],
f"{prefix}_keys": [wandb.Image(x, masks=mask, caption=f"room_id:{room_id}, trajectory_id:{trajectory_id}, timestep:{timestep}")
for x, mask, room_id, trajectory_id, timestep in zip(k_img,
k_masks,
k_rooms_ids,
k_trajectory_ids,
k_timesteps)][:10],
})
class ReceptacleImagePredictionLogger(Callback):
def __init__(self, misclassification=True, every_n_val_epochs=5):
super().__init__()
self.every_n_val_epochs = every_n_val_epochs
def on_validation_epoch_end(self, trainer, pl_module):
if trainer.current_epoch % self.every_n_val_epochs != self.every_n_val_epochs - 1:
return
masks = []
images = None
preds = None
rooms_ids = None
trajectory_ids = None
timesteps = None
targets = None
# Bring the tensors to CPU
for step, (val_input, val_label) in enumerate(trainer.datamodule.val_dataloader()):
if step not in pl_module.val_misclass:
break
indices = pl_module.val_misclass[step][0]
pred = pl_module.val_misclass[step][1]
target = pl_module.val_misclass[step][2]
image = val_input['image'][indices]
mask_1 = val_input['mask_1'][indices]
mask_2 = val_input['mask_2'][indices]
rooms_id = val_input['room_id'][indices]
trajectory_id = val_input['trajectory_id'][indices]
timestep = val_input['timestep'][indices]
# Save the masks
masks += [{
"mask_1": {
"mask_data": mask_1[i].squeeze().cpu().numpy(),
"class_labels": {1: "mask1"}
},
"mask_2": {
"mask_data": mask_2[i].squeeze().cpu().numpy()+1,
"class_labels": {2: "mask2"}
},
"background": {
"mask_data": (mask_1[i] + mask_2[i]).squeeze().cpu().numpy(),
"class_labels": {0: "background"}
}
} for i in range(indices.shape[0])]
if images is not None:
images = torch.cat((images, image), 0)
else:
images = image
if targets is not None:
targets = torch.cat((targets, target), 0)
else:
targets = target
if preds is not None:
preds = torch.cat((preds, pred), 0)
else:
preds = pred
if rooms_ids is not None:
rooms_ids = torch.cat((rooms_ids, rooms_id), 0)
else:
rooms_ids = rooms_id
if trajectory_ids is not None:
trajectory_ids = torch.cat((trajectory_ids, trajectory_id), 0)
else:
trajectory_ids = trajectory_id
if timesteps is not None:
timesteps = torch.cat((timesteps, timestep), 0)
else:
timesteps = timestep
trainer.logger.experiment.log({
"val_examples": [wandb.Image(x, masks=mask, caption=f"Pred:{pred}, Label:{y}, room_id:{room_id}, trajectory_id:{trajectory_id}, timestep:{timestep}")
for x, pred, y, mask, room_id, trajectory_id, timestep in zip(images,
preds,
targets,
masks,
rooms_ids,
trajectory_ids,
timesteps)]
})
| CSR-main | src/lightning/custom_callbacks.py |
import os
import random
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.plugins import DDPPlugin
from src.lightning.data_modules.contrastive_data_module import \
ContrastiveDataModule
from src.lightning.modules.moco2_module import MocoV2
from src.lightning.custom_callbacks import ContrastiveImagePredictionLogger
class MocoV2Trainer(object):
def __init__(self, conf):
self.conf = conf
seed_everything(self.conf.seed)
def run(self):
# Init our data pipeline
dm = ContrastiveDataModule(self.conf.batch_size, self.conf.data_path, self.conf.train_object_representation)
# To access the x_dataloader we need to call prepare_data and setup.
dm.prepare_data()
dm.setup()
# Init our model
model = None
if self.conf.pretrain_path is not None and os.path.exists(self.conf.pretrain_path):
model = MocoV2.load_from_checkpoint(self.conf.pretrain_path)
else:
model = MocoV2(num_negatives=self.conf.queue_size)
wandb_logger = WandbLogger(project=self.conf.project_name,
name=self.conf.experiment_name,
job_type='train')
# defining callbacks
checkpoint_callback = ModelCheckpoint(dirpath=self.conf.checkpoint_path,
filename='model/model-{epoch}-{val_loss:.2f}',
verbose=True,
monitor='val_loss',
mode='min',
every_n_val_epochs=5,
save_top_k=-1)
data_callback = ContrastiveImagePredictionLogger()
learning_rate_callback = LearningRateMonitor(logging_interval='epoch')
# set up the trainer
trainer = pl.Trainer(max_epochs=self.conf.epochs,
check_val_every_n_epoch=5,
progress_bar_refresh_rate=self.conf.progress_bar_refresh_rate,
gpus=8,#self.conf.gpus,
logger=wandb_logger,
callbacks=[checkpoint_callback, learning_rate_callback, data_callback],
checkpoint_callback=True,
accelerator=self.conf.accelerator,
plugins=DDPPlugin(find_unused_parameters=False),
amp_level='O2',
precision=16)
# Train the model
trainer.fit(model, dm)
# Evaluate the model on the held out test set
# trainer.test()
# Close wandb run
wandb.finish()
| CSR-main | src/lightning/trainers/moco2_trainer.py |
CSR-main | src/lightning/trainers/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.