code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def get_electra_pretraining_model(model_name, ctx_l,
max_seq_length=128,
hidden_dropout_prob=0.1,
attention_dropout_prob=0.1,
generator_units_scale=None,
generator_layers_scale=None,
params_path=None):
"""
A Electra Pretrain Model is built with a generator and a discriminator, in which
the generator has the same embedding as the discriminator but different backbone.
"""
cfg, tokenizer, _, _ = get_pretrained_electra(
model_name, load_backbone=False)
cfg = ElectraModel.get_cfg().clone_merge(cfg)
cfg.defrost()
cfg.MODEL.hidden_dropout_prob = hidden_dropout_prob
cfg.MODEL.attention_dropout_prob = attention_dropout_prob
cfg.MODEL.max_length = max_seq_length
# Keep the original generator size if not designated
if generator_layers_scale:
cfg.MODEL.generator_layers_scale = generator_layers_scale
if generator_units_scale:
cfg.MODEL.generator_units_scale = generator_units_scale
cfg.freeze()
model = ElectraForPretrain(cfg,
uniform_generator=False,
tied_generator=False,
tied_embeddings=True,
disallow_correct=False,
weight_initializer=TruncNorm(stdev=0.02))
if not params_path:
model.initialize(ctx=ctx_l)
else:
model.load_parameters(params_path, ctx=ctx_l)
model.hybridize()
return cfg, tokenizer, model |
A Electra Pretrain Model is built with a generator and a discriminator, in which
the generator has the same embedding as the discriminator but different backbone.
| get_electra_pretraining_model | python | dmlc/gluon-nlp | scripts/pretraining/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py | Apache-2.0 |
def parameters_option(step_num, model, ckpt_dir, option='Saving'):
"""Save or load the model parameter, marked by step_num."""
param_path = os.path.join(
ckpt_dir, '{}.params'.format(str(step_num).zfill(7)))
logging.info('[step {}], {} model params to/from {}.'.format(
step_num, option, param_path))
if option == 'Saving':
model.save_parameters(param_path)
return param_path
elif option == 'Loading':
model.load_parameters(param_path)
return model
else:
raise NotImplementedError('Unknown Option: {}'.format(option)) | Save or load the model parameter, marked by step_num. | parameters_option | python | dmlc/gluon-nlp | scripts/pretraining/run_electra.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/run_electra.py | Apache-2.0 |
def states_option(step_num, trainer, ckpt_dir, local_rank=0, option='Saving'):
"""Save or load the trainer states, marked by step_num and local rank."""
state_path = os.path.join(ckpt_dir, '{}.states.{}'.format(
str(step_num).zfill(7), str(local_rank).zfill(2)))
logging.info('[step {}], {} trainer states to/from {}.'.format(
step_num, option, state_path))
if option == 'Saving':
trainer.save_states(state_path)
return state_path
elif option == 'Loading':
trainer.load_states(state_path)
return trainer
else:
raise NotImplementedError('Unknown Option: {}'.format(option)) | Save or load the trainer states, marked by step_num and local rank. | states_option | python | dmlc/gluon-nlp | scripts/pretraining/run_electra.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/run_electra.py | Apache-2.0 |
def transform(instance, max_seq_length):
"""Transform instance to inputs for MLM and NSP."""
input_ids = instance.tokens
assert len(input_ids) <= max_seq_length
segment_ids = instance.segment_ids
masked_lm_positions = instance.masked_lm_positions
valid_lengths = len(input_ids)
masked_lm_ids = instance.masked_lm_labels
masked_lm_weights = [1.0] * len(masked_lm_ids)
next_sentence_label = 1 if instance.is_random_next else 0
features = {}
features['input_ids'] = input_ids
features['segment_ids'] = segment_ids
features['masked_lm_positions'] = masked_lm_positions
features['masked_lm_ids'] = masked_lm_ids
features['masked_lm_weights'] = masked_lm_weights
features['next_sentence_labels'] = [next_sentence_label]
features['valid_lengths'] = [valid_lengths]
return features | Transform instance to inputs for MLM and NSP. | transform | python | dmlc/gluon-nlp | scripts/pretraining/bert/create_pretraining_data.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/create_pretraining_data.py | Apache-2.0 |
def write_to_files_np(features, tokenizer, max_seq_length,
max_predictions_per_seq, output_files):
# pylint: disable=unused-argument
"""Write to numpy files from `TrainingInstance`s."""
next_sentence_labels = []
valid_lengths = []
assert len(output_files) == 1, 'numpy format only support single output file'
output_file = output_files[0]
(input_ids, segment_ids, masked_lm_positions, masked_lm_ids,
masked_lm_weights, next_sentence_labels, valid_lengths) = features
total_written = len(next_sentence_labels)
# store variable length numpy array object directly.
outputs = collections.OrderedDict()
outputs['input_ids'] = np.array(input_ids, dtype=object)
outputs['segment_ids'] = np.array(segment_ids, dtype=object)
outputs['masked_lm_positions'] = np.array(masked_lm_positions, dtype=object)
outputs['masked_lm_ids'] = np.array(masked_lm_ids, dtype=object)
outputs['masked_lm_weights'] = np.array(masked_lm_weights, dtype=object)
outputs['next_sentence_labels'] = np.array(next_sentence_labels, dtype='int32')
outputs['valid_lengths'] = np.array(valid_lengths, dtype='int32')
np.savez_compressed(output_file, **outputs)
logging.info('Wrote %d total instances', total_written) | Write to numpy files from `TrainingInstance`s. | write_to_files_np | python | dmlc/gluon-nlp | scripts/pretraining/bert/create_pretraining_data.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/create_pretraining_data.py | Apache-2.0 |
def tokenize_lines_fn(x):
"""
Worker function to tokenize lines based on the tokenizer, and perform vocabulary lookup.
Parameters
----------
lines
Lines to be tokenized of the whole file
tokenizer
The trained tokenizer
Returns
-------
results
A list storing the valid tokenized lines
"""
lines, tokenizer = x
results = []
for line in lines:
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
results.append([])
else:
token_ids = tokenizer.encode(line, int)
if token_ids:
results.append(token_ids)
return results |
Worker function to tokenize lines based on the tokenizer, and perform vocabulary lookup.
Parameters
----------
lines
Lines to be tokenized of the whole file
tokenizer
The trained tokenizer
Returns
-------
results
A list storing the valid tokenized lines
| tokenize_lines_fn | python | dmlc/gluon-nlp | scripts/pretraining/bert/create_pretraining_data.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/create_pretraining_data.py | Apache-2.0 |
def convert_to_npz(instances, max_seq_length):
"""Create masked language model and next sentence prediction samples as numpy arrays."""
input_ids = []
segment_ids = []
masked_lm_positions = []
masked_lm_ids = []
masked_lm_weights = []
next_sentence_labels = []
valid_lengths = []
for inst_index, instance in enumerate(instances):
features = transform(instance, max_seq_length)
input_id = features['input_ids']
segment_id = features['segment_ids']
masked_lm_position = features['masked_lm_positions']
masked_lm_id = features['masked_lm_ids']
masked_lm_weight = features['masked_lm_weights']
next_sentence_label = features['next_sentence_labels'][0]
valid_length = features['valid_lengths'][0]
input_ids.append(np.ascontiguousarray(input_id, dtype='int32'))
segment_ids.append(np.ascontiguousarray(segment_id, dtype='int32'))
masked_lm_positions.append(np.ascontiguousarray(masked_lm_position, dtype='int32'))
masked_lm_ids.append(np.ascontiguousarray(masked_lm_id, dtype='int32'))
masked_lm_weights.append(np.ascontiguousarray(masked_lm_weight, dtype='float32'))
next_sentence_labels.append(next_sentence_label)
valid_lengths.append(valid_length)
# debugging information
if inst_index < 1:
print_example(instance, features)
return input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights,\
next_sentence_labels, segment_ids, valid_lengths | Create masked language model and next sentence prediction samples as numpy arrays. | convert_to_npz | python | dmlc/gluon-nlp | scripts/pretraining/bert/create_pretraining_data.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/create_pretraining_data.py | Apache-2.0 |
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq,
whole_word_mask, vocab, tokenizer,
_MASK_TOKEN, _CLS_TOKEN, _SEP_TOKEN):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token in [_CLS_TOKEN, _SEP_TOKEN]:
continue
# Whole Word Masking means that if we mask all of the subwords
# corresponding to an original word. When a word has been split into
# subwords, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each subword independently, softmaxed
# over the entire vocabulary.
if whole_word_mask and len(cand_indexes) >= 1 and \
not tokenizer.is_first_subword(token):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if random.random() < 0.8:
masked_token = _MASK_TOKEN
else:
# 10% of the time, keep original
if random.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
# generate a random word in [0, vocab_size - 1]
masked_token = random.randint(0, len(vocab) - 1)
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
assert len(masked_lms) <= num_to_predict
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels) | Creates the predictions for the masked LM objective. | create_masked_lm_predictions | python | dmlc/gluon-nlp | scripts/pretraining/bert/create_pretraining_data.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/create_pretraining_data.py | Apache-2.0 |
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if random.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop() | Truncates a pair of sequences to a maximum sequence length. | truncate_seq_pair | python | dmlc/gluon-nlp | scripts/pretraining/bert/create_pretraining_data.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/create_pretraining_data.py | Apache-2.0 |
def prepare_pretrain_npz_dataset(filename, allow_pickle=False):
"""Create dataset based on the numpy npz file"""
if isinstance(filename, (list, tuple)):
assert len(filename) == 1, \
'When .npy/.npz data file is loaded, len(filename) must be 1.' \
' Received len(filename)={}.'.format(len(filename))
filename = filename[0]
logging.debug('start to load file %s ...', filename)
return NumpyDataset(filename, allow_pickle=allow_pickle) | Create dataset based on the numpy npz file | prepare_pretrain_npz_dataset | python | dmlc/gluon-nlp | scripts/pretraining/bert/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/pretraining_utils.py | Apache-2.0 |
def prepare_pretrain_text_dataset(filename, tokenizer, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, whole_word_mask,
random_next_sentence, vocab):
"""Create dataset based on the raw text files"""
dupe_factor = 1
if not isinstance(filename, (list, tuple)):
filename = [filename]
logging.debug('start to load files %s ...', filename)
instances = create_training_instances((filename, tokenizer, max_seq_length,
short_seq_prob, masked_lm_prob,
max_predictions_per_seq,
whole_word_mask, vocab,
dupe_factor, 1, None, None, random_next_sentence))
return mx.gluon.data.ArrayDataset(*instances) | Create dataset based on the raw text files | prepare_pretrain_text_dataset | python | dmlc/gluon-nlp | scripts/pretraining/bert/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/pretraining_utils.py | Apache-2.0 |
def prepare_pretrain_bucket_sampler(dataset, batch_size, shuffle=False, num_buckets=1):
"""Create data sampler based on the dataset"""
if isinstance(dataset, NumpyDataset):
lengths = dataset.get_field('valid_lengths')
else:
lengths = dataset.transform(lambda input_ids, segment_ids, masked_lm_positions, \
masked_lm_ids, masked_lm_weights, \
next_sentence_labels, valid_lengths: \
valid_lengths, lazy=False)
sampler = FixedBucketSampler(lengths,
batch_size=batch_size,
num_buckets=num_buckets,
ratio=0,
shuffle=shuffle)
logging.debug('Sampler created for a new dataset:\n%s', sampler)
return sampler | Create data sampler based on the dataset | prepare_pretrain_bucket_sampler | python | dmlc/gluon-nlp | scripts/pretraining/bert/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/pretraining_utils.py | Apache-2.0 |
def get_pretrain_data_npz(data, batch_size,
shuffle, num_buckets,
vocab, num_parts=1, part_idx=0,
num_dataset_workers=1, num_batch_workers=1,
circle_length=1, repeat=1,
dataset_cached=False, num_max_dataset_cached=0):
"""Get a data iterator from pre-processed npz files.
Parameters
----------
batch_size : int
The batch size per GPU.
shuffle : bool
Whether to shuffle the data.
num_buckets : int
The number of buckets for the FixedBucketSampler for training.
vocab : Vocab
The vocabulary.
num_parts : int
The number of partitions for the dataset.
part_idx : int
The index of the partition to read.
num_dataset_workers : int
The number of worker processes for dataset construction.
num_batch_workers : int
The number of worker processes for batch contruction.
circle_length : int, default is 1
The number of files to be read for a single worker at the same time.
When circle_length is larger than 1, we merge circle_length files.
repeat : int, default is 1
The number of times that files are repeated.
dataset_cached : bool, default is False
Whether or not to cache last processed dataset.
Each processed dataset can only be cached for once.
When there is no new available processed dataset to be fetched,
we pop a cached processed dataset.
num_max_dataset_cached : int, default is 0
Maximum number of cached datasets. It is valid only if dataset_cached is True
"""
num_files = len(glob(data))
logging.info('%d files are found.', num_files)
assert num_files >= num_parts, \
'The number of text files must be no less than the number of ' \
'workers/partitions (%d). Only %d files at %s are found.'%(num_parts, num_files, data)
dataset_params = {'allow_pickle': True}
sampler_params = {'batch_size': batch_size, 'shuffle': shuffle, 'num_buckets': num_buckets}
dataset_fn = prepare_pretrain_npz_dataset
sampler_fn = prepare_pretrain_bucket_sampler
pad_val = vocab.pad_id
batchify_fn = bf.Tuple(
bf.Pad(val=pad_val, round_to=8), # input_id
bf.Pad(val=pad_val), # masked_id
bf.Pad(val=0), # masked_position
bf.Pad(val=0), # masked_weight
bf.Stack(), # next_sentence_label
bf.Pad(val=0, round_to=8), # segment_id
bf.Stack()) # valid_lengths
split_sampler = SplitSampler(num_files, num_parts=num_parts,
part_index=part_idx, repeat=repeat)
dataloader = DatasetLoader(data,
file_sampler=split_sampler,
dataset_fn=dataset_fn,
batch_sampler_fn=sampler_fn,
dataset_params=dataset_params,
batch_sampler_params=sampler_params,
batchify_fn=batchify_fn,
num_dataset_workers=num_dataset_workers,
num_batch_workers=num_batch_workers,
pin_memory=False,
circle_length=circle_length,
dataset_cached=dataset_cached,
num_max_dataset_cached=num_max_dataset_cached)
return dataloader | Get a data iterator from pre-processed npz files.
Parameters
----------
batch_size : int
The batch size per GPU.
shuffle : bool
Whether to shuffle the data.
num_buckets : int
The number of buckets for the FixedBucketSampler for training.
vocab : Vocab
The vocabulary.
num_parts : int
The number of partitions for the dataset.
part_idx : int
The index of the partition to read.
num_dataset_workers : int
The number of worker processes for dataset construction.
num_batch_workers : int
The number of worker processes for batch contruction.
circle_length : int, default is 1
The number of files to be read for a single worker at the same time.
When circle_length is larger than 1, we merge circle_length files.
repeat : int, default is 1
The number of times that files are repeated.
dataset_cached : bool, default is False
Whether or not to cache last processed dataset.
Each processed dataset can only be cached for once.
When there is no new available processed dataset to be fetched,
we pop a cached processed dataset.
num_max_dataset_cached : int, default is 0
Maximum number of cached datasets. It is valid only if dataset_cached is True
| get_pretrain_data_npz | python | dmlc/gluon-nlp | scripts/pretraining/bert/pretraining_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/pretraining_utils.py | Apache-2.0 |
def parameters_option(step_num, model, ckpt_dir, option='Saving', ctx_l=None):
"""Save or load the model parameter, marked by step_num."""
param_path = os.path.join(
ckpt_dir, '{}.params'.format(str(step_num).zfill(7)))
logging.info('[step {}], {} model params to/from {}.'.format(
step_num, option, param_path))
if option == 'Saving':
model.save_parameters(param_path)
elif option == 'Loading':
model.load_parameters(param_path, ctx=ctx_l)
else:
raise NotImplementedError('Unknown Option: {}'.format(option)) | Save or load the model parameter, marked by step_num. | parameters_option | python | dmlc/gluon-nlp | scripts/pretraining/bert/run_pretraining.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/run_pretraining.py | Apache-2.0 |
def states_option(step_num, trainer, ckpt_dir, local_rank=0, option='Saving'):
"""Save or load the trainer states, marked by step_num and local rank."""
state_path = os.path.join(ckpt_dir, '{}.states.{}'.format(
str(step_num).zfill(7), str(local_rank).zfill(2)))
logging.info('[step {}], {} trainer states to/from {}.'.format(
step_num, option, state_path))
if option == 'Saving':
trainer.save_states(state_path)
elif option == 'Loading':
trainer.load_states(state_path)
else:
raise NotImplementedError('Unknown Option: {}'.format(option)) | Save or load the trainer states, marked by step_num and local rank. | states_option | python | dmlc/gluon-nlp | scripts/pretraining/bert/run_pretraining.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/bert/run_pretraining.py | Apache-2.0 |
def create_masked_lm_predictions(*, args, tokens, cls_token_id, sep_token_id, mask_token_id,
non_special_ids):
"""Creates the predictions for the masked LM objective."""
cand_indexes = [i for i, tok in enumerate(tokens) if tok not in (cls_token_id, sep_token_id)]
output_tokens = list(tokens)
random.shuffle(cand_indexes)
num_to_predict = min(args.max_predictions_per_seq,
max(1, int(round(len(tokens) * args.masked_lm_prob))))
mlm_positions = []
mlm_labels = []
covered_indexes = set()
for index in cand_indexes:
if len(mlm_positions) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if random.random() < 0.8:
masked_token = mask_token_id
else:
# 10% of the time, keep original
if random.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = random.choice(non_special_ids)
output_tokens[index] = masked_token
mlm_positions.append(index)
mlm_labels.append(tokens[index])
assert len(mlm_positions) <= num_to_predict
assert len(mlm_positions) == len(mlm_labels)
return output_tokens, mlm_positions, mlm_labels | Creates the predictions for the masked LM objective. | create_masked_lm_predictions | python | dmlc/gluon-nlp | scripts/pretraining/torch/bert/prepare_quickthought.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/torch/bert/prepare_quickthought.py | Apache-2.0 |
def _initializer(function):
"""Initialize state of each process in multiprocessing pool.
The process local state is stored as an attribute of the function
object, which is specified in Pool(..., initargs=(function, )) and by
convention refers to the function executed during map.
"""
# TODO gluonnlp shouldn't provide a slow LegacyHuggingFaceTokenizer here...
_, tokenizer, _, _ = nlp.models.bert.get_pretrained_bert(args.model_name,
load_backbone=False,
load_mlm=False)
function.tokenizer = tokenizer
function.args = args
function.vocab = tokenizer.vocab
function.non_special_ids = tokenizer.vocab[tokenizer.vocab.non_special_tokens]
function.process_idx = 0
tok_type = pa.uint16() if len(tokenizer.vocab) <= np.iinfo(np.uint16).max else pa.uint32()
assert len(tokenizer.vocab) <= np.iinfo(np.uint32).max
length_type = pa.uint16()
assert args.max_seq_length * 2 <= np.iinfo(np.uint16).max
# pa.large_list instead of pa.list_ to use 64bit offsets
# See https://issues.apache.org/jira/browse/ARROW-9773
schema = pa.schema({
"quickthought1": pa.large_list(tok_type),
"quickthought2": pa.large_list(tok_type),
"validlength1": length_type,
"validlength2": length_type,
"mlmpositions1": pa.large_list(length_type),
"mlmpositions2": pa.large_list(length_type),
"mlmlabels1": pa.large_list(tok_type),
"mlmlabels2": pa.large_list(tok_type),
})
function.schema = schema | Initialize state of each process in multiprocessing pool.
The process local state is stored as an attribute of the function
object, which is specified in Pool(..., initargs=(function, )) and by
convention refers to the function executed during map.
| _initializer | python | dmlc/gluon-nlp | scripts/pretraining/torch/bert/prepare_quickthought.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/torch/bert/prepare_quickthought.py | Apache-2.0 |
def parameters_option(step_num, model, args, option='Saving', ctx_l=None):
"""Save or load the model parameter, marked by step_num."""
param_path = os.path.join(args.ckpt_dir, f'{step_num:07}.params')
logging.info(f'[Step {step_num}], {option} model params to/from {param_path}.')
if option == 'Saving':
th.save(model.state_dict(), param_path)
elif option == 'Loading':
model.load_state_dict(th.load(param_path, map_location=args.device))
else:
raise NotImplementedError('Unknown Option: {}'.format(option)) | Save or load the model parameter, marked by step_num. | parameters_option | python | dmlc/gluon-nlp | scripts/pretraining/torch/bert/run_pretraining.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/torch/bert/run_pretraining.py | Apache-2.0 |
def states_option(step_num, optimizer, args, option='Saving'):
"""Save or load the trainer states, marked by step_num and local rank."""
state_path = os.path.join(args.ckpt_dir, f'{step_num:07}.states.{args.local_rank:02}')
logging.info(f'[Step {step_num}], {option} trainer states to/from {state_path}.')
if option == 'Saving':
th.save(optimizer.state_dict(), state_path)
elif option == 'Loading':
optimizer.load_state_dict(th.load(state_path))
else:
raise NotImplementedError('Unknown Option: {}'.format(option)) | Save or load the trainer states, marked by step_num and local rank. | states_option | python | dmlc/gluon-nlp | scripts/pretraining/torch/bert/run_pretraining.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/torch/bert/run_pretraining.py | Apache-2.0 |
def check_both_latin1(src_sentence: str, tgt_sentence: str) -> bool:
"""Check whether the sentence pair can all be encoded in latin1
This is used in
https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/scripts/filter_dataset.py
The idea is to filter the sentences with rare unicode glyphs and are unlikely to be en-de
Returns
-------
ret
Whether both sentences are latin1
"""
try:
src_sentence.encode('latin1')
tgt_sentence.encode('latin1')
except UnicodeEncodeError:
return False
else:
return True | Check whether the sentence pair can all be encoded in latin1
This is used in
https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/scripts/filter_dataset.py
The idea is to filter the sentences with rare unicode glyphs and are unlikely to be en-de
Returns
-------
ret
Whether both sentences are latin1
| check_both_latin1 | python | dmlc/gluon-nlp | scripts/processing/clean_tok_corpus.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/processing/clean_tok_corpus.py | Apache-2.0 |
def check_latin1(sentence: str) -> bool:
"""Check whether the sentence can be encoded in latin1
This is used in
https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/scripts/filter_dataset.py
The idea is to filter the sentences with rare unicode glyphs
Returns
-------
ret
Whether sentences are latin1
"""
try:
sentence.encode('latin1')
except UnicodeEncodeError:
return False
else:
return True | Check whether the sentence can be encoded in latin1
This is used in
https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/scripts/filter_dataset.py
The idea is to filter the sentences with rare unicode glyphs
Returns
-------
ret
Whether sentences are latin1
| check_latin1 | python | dmlc/gluon-nlp | scripts/processing/clean_tok_corpus.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/processing/clean_tok_corpus.py | Apache-2.0 |
def get_line_byte_start(corpus_path: str) -> np.ndarray:
"""Get the start position of each lines in terms of bytes so that we can use seek + read to
load an arbitrary line.
Parameters
----------
corpus_path
The path of the corpus
Returns
-------
line_pos
Shape (#Lens + 1,)
"""
line_pos = [0]
with open(corpus_path, 'rb') as in_f:
pos = 0
for line in in_f:
pos += len(line)
line_pos.append(pos)
return np.array(line_pos, dtype=np.int64) | Get the start position of each lines in terms of bytes so that we can use seek + read to
load an arbitrary line.
Parameters
----------
corpus_path
The path of the corpus
Returns
-------
line_pos
Shape (#Lens + 1,)
| get_line_byte_start | python | dmlc/gluon-nlp | scripts/processing/clean_tok_corpus.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/processing/clean_tok_corpus.py | Apache-2.0 |
def process_parallel_corpus(self, src_corpus_paths: List[str],
tgt_corpus_paths: List[str],
src_out_path: str, tgt_out_path: str,
chunk_size: int = 1024 * 1024,
num_process: int = 8) -> int:
"""Preprocess the parallel corpus
Parameters
----------
src_corpus_paths
Source corpus paths
tgt_corpus_paths
Target corpus paths
src_out_path
Write the results to the source output path
tgt_out_path
Write the results to the target output path
chunk_size
Approximately split the corpus files into multiple chunks
num_process
The number of process
Returns
-------
line_count
The number of lines in the final filtered file
"""
start = time.time()
total_line_count = 0
filtered_line_count = 0
def chunk_iterator(step=10):
for src_path, tgt_path in zip(src_corpus_paths, tgt_corpus_paths):
src_line_pos = get_line_byte_start(src_path)
tgt_line_pos = get_line_byte_start(tgt_path)
src_line_size = src_line_pos[1:] - src_line_pos[:-1]
tgt_line_size = tgt_line_pos[1:] - tgt_line_pos[:-1]
num_src_lines = src_line_pos.shape[0] - 1
num_tgt_lines = tgt_line_pos.shape[0] - 1
assert num_src_lines == num_tgt_lines
src_budget = chunk_size
tgt_budget = chunk_size
src_chunk_start = 0
tgt_chunk_start = 0
src_chunk_size = 0
tgt_chunk_size = 0
for i in range(0, num_src_lines, step):
line_batch_num = min(num_src_lines - i, step)
src_batch_line_size = src_line_size[i:(i + line_batch_num)].sum()
tgt_batch_line_size = tgt_line_size[i:(i + line_batch_num)].sum()
src_budget -= src_batch_line_size
tgt_budget -= tgt_batch_line_size
src_chunk_size += src_batch_line_size
tgt_chunk_size += tgt_batch_line_size
if src_budget <= 0 or tgt_budget <= 0 or i + step >= num_src_lines:
yield src_path, src_chunk_start, src_chunk_size,\
tgt_path, tgt_chunk_start, tgt_chunk_size
src_chunk_start += src_chunk_size
tgt_chunk_start += tgt_chunk_size
src_chunk_size = 0
tgt_chunk_size = 0
src_budget = chunk_size
tgt_budget = chunk_size
with open(src_out_path, 'w', encoding='utf-8', newline='\n') as src_out_f:
with open(tgt_out_path, 'w', encoding='utf-8', newline='\n') as tgt_out_f:
with multiprocessing.Pool(num_process) as pool:
for i, (processed_src_lines, processed_tgt_lines, unfiltered_line_num) in \
enumerate(pool.imap(self.process_chunk, chunk_iterator())):
src_out_f.write('\n'.join(processed_src_lines) + '\n')
tgt_out_f.write('\n'.join(processed_tgt_lines) + '\n')
filtered_line_count += len(processed_src_lines)
total_line_count += unfiltered_line_num
if (i + 1) % 100 == 0:
print('Chunk {}, #Lines Processed: {}, Filtered: {}, Remain: {}'
.format(i + 1, total_line_count,
total_line_count - filtered_line_count,
filtered_line_count))
end = time.time()
print('Done, #Lines {}/{}, Time spent {}'.format(filtered_line_count,
total_line_count,
end - start))
return filtered_line_count | Preprocess the parallel corpus
Parameters
----------
src_corpus_paths
Source corpus paths
tgt_corpus_paths
Target corpus paths
src_out_path
Write the results to the source output path
tgt_out_path
Write the results to the target output path
chunk_size
Approximately split the corpus files into multiple chunks
num_process
The number of process
Returns
-------
line_count
The number of lines in the final filtered file
| process_parallel_corpus | python | dmlc/gluon-nlp | scripts/processing/clean_tok_corpus.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/processing/clean_tok_corpus.py | Apache-2.0 |
def process_mono_corpus(self,
corpus_paths: List[str],
out_path: str,
chunk_size: int = 1024 * 1024,
num_process: int = 8) -> int:
"""Preprocess the mono corpus
Parameters
----------
corpus_paths
Corpus paths
out_path
Write the results to the output path
chunk_size
Approximately split the corpus files into multiple chunks
num_process
The number of process
Returns
-------
line_count
The number of lines in the final filtered file
"""
start = time.time()
total_line_count = 0
filtered_line_count = 0
def chunk_iterator(step=10):
for path in corpus_paths:
line_pos = get_line_byte_start(path)
line_size = line_pos[1:] - line_pos[:-1]
num_lines = line_pos.shape[0] - 1
budget = chunk_size
chunk_start = 0
cur_chunk_size = 0
for i in range(0, num_lines, step):
line_batch_num = min(num_lines - i, step)
batch_line_size = line_size[i:(i + line_batch_num)].sum()
budget -= batch_line_size
cur_chunk_size += batch_line_size
if budget <= 0 or i + step >= num_lines:
yield path, chunk_start, cur_chunk_size
chunk_start += cur_chunk_size
cur_chunk_size = 0
budget = chunk_size
with open(out_path, 'w', encoding='utf-8', newline='\n') as out_f:
with multiprocessing.Pool(num_process) as pool:
for i, (processed_lines, unfiltered_line_num) in \
enumerate(pool.imap(self.process_chunk, chunk_iterator())):
out_f.write('\n'.join(processed_lines) + '\n')
filtered_line_count += len(processed_lines)
total_line_count += unfiltered_line_num
if (i + 1) % 100 == 0:
print('Chunk {}, #Lines Processed: {}, Filtered: {}, Remain: {}'
.format(i + 1, total_line_count,
total_line_count - filtered_line_count,
filtered_line_count))
end = time.time()
print('Done, #Lines {}/{}, Time spent {}'.format(filtered_line_count,
total_line_count,
end - start))
return filtered_line_count | Preprocess the mono corpus
Parameters
----------
corpus_paths
Corpus paths
out_path
Write the results to the output path
chunk_size
Approximately split the corpus files into multiple chunks
num_process
The number of process
Returns
-------
line_count
The number of lines in the final filtered file
| process_mono_corpus | python | dmlc/gluon-nlp | scripts/processing/clean_tok_corpus.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/processing/clean_tok_corpus.py | Apache-2.0 |
def calc_approx_error(expected_tensor: np.ndarray, observed_tensor: np.ndarray) -> float:
'''
Calculating relative error for one tensor
'''
error = observed_tensor - expected_tensor
absolute_error = np.abs(error)
mean_absolute_error = absolute_error.mean()
mean_expected_value = np.abs(expected_tensor).mean()
error = mean_absolute_error / mean_expected_value
return error |
Calculating relative error for one tensor
| calc_approx_error | python | dmlc/gluon-nlp | scripts/question_answering/custom_strategy.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/custom_strategy.py | Apache-2.0 |
def get_approx_errors(expected_tensors, observed_tensors):
'''
Calculating relative error for multiple tensors: Dict[tensors_name: str, tensor: np.ndarray]
'''
errors = {}
for node_name in observed_tensors.keys():
expected_tensor = expected_tensors[node_name][node_name]
observed_tensor = observed_tensors[node_name][node_name]
errors[node_name] = calc_approx_error(expected_tensor, observed_tensor)
return errors |
Calculating relative error for multiple tensors: Dict[tensors_name: str, tensor: np.ndarray]
| get_approx_errors | python | dmlc/gluon-nlp | scripts/question_answering/custom_strategy.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/custom_strategy.py | Apache-2.0 |
def get_qtensors(self, quant_cfg, node_list):
'''
Generating quantized model based on configuration and capturing intermediate tensors
'''
qmodel = self.adaptor.quantize(quant_cfg, self.model, self.calib_dataloader)
tensors = self.adaptor.inspect_tensor(qmodel, self.calib_dataloader, node_list, [1]) # 1 is a batch index
return tensors['activation'][0] # we need to specify that we want activation (layer output) because INC stores also weight tensors
# 0 is the first batch |
Generating quantized model based on configuration and capturing intermediate tensors
| get_qtensors | python | dmlc/gluon-nlp | scripts/question_answering/custom_strategy.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/custom_strategy.py | Apache-2.0 |
def bayesian_params_to_tune_configs(self, params):
'''
Creating configuration from params - changing configurations' indexes for real configurations
'''
node_cfgs = {}
for node_key, configs in self.opwise_quant_cfgs.items():
if node_key in params:
value = int(params[node_key])
value = min(value, len(configs) - 1)
node_cfgs[node_key] = copy.deepcopy(configs[value])
return node_cfgs |
Creating configuration from params - changing configurations' indexes for real configurations
| bayesian_params_to_tune_configs | python | dmlc/gluon-nlp | scripts/question_answering/custom_strategy.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/custom_strategy.py | Apache-2.0 |
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) | Lower text and remove punctuation, articles and extra whitespace. | normalize_answer | python | dmlc/gluon-nlp | scripts/question_answering/eval_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py | Apache-2.0 |
def compute_f1(a_gold, a_pred):
"""
Compute the token-level f1 scores in which the common tokens are considered
as True Positives. Precision and recall are percentages of the number of
common tokens in the prediction and groud truth, respectively.
"""
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1 |
Compute the token-level f1 scores in which the common tokens are considered
as True Positives. Precision and recall are percentages of the number of
common tokens in the prediction and groud truth, respectively.
| compute_f1 | python | dmlc/gluon-nlp | scripts/question_answering/eval_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py | Apache-2.0 |
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
"""
Find the best threshold of the raw scores.
The initial score is set to the number of unanswerable questions,
assuming that each unanswerable question is successfully predicted.
In the following traverse, the best threshold is constantly adjusted
according to the difference from the assumption ('diff').
"""
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
# Rearrange the na_probs in an ascending order, so that the questions
# with higher probability of answerability the sooner will be read.
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
# For the answerable question
diff = scores[qid]
else:
# For the unanswerable question
if preds[qid]:
# Falsely predict the answerability
diff = -1
else:
# Correctly predict the answerability. This is Only true if the
# prediction is blank, which is no the case before revision
diff = 0
cur_score += diff
if cur_score > best_score:
# adjust the best thresh over current thresh (na_probs[qid])
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh |
Find the best threshold of the raw scores.
The initial score is set to the number of unanswerable questions,
assuming that each unanswerable question is successfully predicted.
In the following traverse, the best threshold is constantly adjusted
according to the difference from the assumption ('diff').
| find_best_thresh | python | dmlc/gluon-nlp | scripts/question_answering/eval_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py | Apache-2.0 |
def revise_unanswerable(preds, na_probs, na_prob_thresh):
"""
Revise the predictions results and return a null string for unanswerable question
whose unanswerable probability above the threshold.
Parameters
----------
preds: dict
A dictionary of full prediction of spans
na_probs: dict
A dictionary of unanswerable probabilities
na_prob_thresh: float
threshold of the unanswerable probability
Returns
-------
revised: dict
A dictionary of revised prediction
"""
revised = copy.deepcopy(preds)
for q_id in na_probs.keys():
if na_probs[q_id] > na_prob_thresh:
revised[q_id] = ""
return revised |
Revise the predictions results and return a null string for unanswerable question
whose unanswerable probability above the threshold.
Parameters
----------
preds: dict
A dictionary of full prediction of spans
na_probs: dict
A dictionary of unanswerable probabilities
na_prob_thresh: float
threshold of the unanswerable probability
Returns
-------
revised: dict
A dictionary of revised prediction
| revise_unanswerable | python | dmlc/gluon-nlp | scripts/question_answering/eval_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py | Apache-2.0 |
def squad_eval(data_file, preds, na_probs, na_prob_thresh=0.0, revise=False):
"""
Parameters
----------
data_file
dataset(list) or data_file(str)
preds
predictions dictionary
na_probs
probabilities dictionary of unanswerable
na_prob_thresh
threshold of unanswerable
revise
Wether to get the final predictions with impossible answers replaced
with null string ''
Returns
-------
out_eval
A dictionary of output results
(preds_out)
A dictionary of final predictions
"""
if isinstance(data_file, str):
with open(data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
elif isinstance(data_file, list):
dataset = data_file
if na_probs is None:
na_probs = {k: 0.0 for k in preds}
# not necessary to revise results of SQuAD 1.1
revise = False
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(dataset, preds)
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,
na_prob_thresh)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,
na_prob_thresh)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(
exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(
exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
find_all_best_thresh(out_eval, preds, exact_raw,
f1_raw, na_probs, qid_to_has_ans)
if revise:
thresh = (out_eval['best_exact_thresh'] +
out_eval['best_f1_thresh']) * 0.5
preds_out = revise_unanswerable(preds, na_probs, thresh)
return out_eval, preds_out
else:
return out_eval, preds |
Parameters
----------
data_file
dataset(list) or data_file(str)
preds
predictions dictionary
na_probs
probabilities dictionary of unanswerable
na_prob_thresh
threshold of unanswerable
revise
Wether to get the final predictions with impossible answers replaced
with null string ''
Returns
-------
out_eval
A dictionary of output results
(preds_out)
A dictionary of final predictions
| squad_eval | python | dmlc/gluon-nlp | scripts/question_answering/eval_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/eval_utils.py | Apache-2.0 |
def forward(self, tokens, token_types, valid_length, p_mask):
"""
Parameters
----------
tokens
Shape (batch_size, seq_length)
The merged input tokens
token_types
Shape (batch_size, seq_length)
Token types for the sequences, used to indicate whether the word belongs to the
first sentence or the second one.
valid_length
Shape (batch_size,)
Valid length of the sequence. This is used to mask the padded tokens.
p_mask
The mask that is associated with the tokens.
Returns
-------
start_logits
Shape (batch_size, sequence_length)
The log-softmax scores that the position is the start position.
end_logits
Shape (batch_size, sequence_length)
The log-softmax scores that the position is the end position.
"""
# Get contextual embedding with the shape (batch_size, sequence_length, C)
if self.use_segmentation:
contextual_embeddings = self.backbone(tokens, token_types, valid_length)
else:
contextual_embeddings = self.backbone(tokens, valid_length)
scores = self.qa_outputs(contextual_embeddings)
start_scores = scores[:, :, 0]
end_scores = scores[:, :, 1]
start_logits = masked_logsoftmax(start_scores, mask=p_mask, axis=-1)
end_logits = masked_logsoftmax(end_scores, mask=p_mask, axis=-1)
return start_logits, end_logits |
Parameters
----------
tokens
Shape (batch_size, seq_length)
The merged input tokens
token_types
Shape (batch_size, seq_length)
Token types for the sequences, used to indicate whether the word belongs to the
first sentence or the second one.
valid_length
Shape (batch_size,)
Valid length of the sequence. This is used to mask the padded tokens.
p_mask
The mask that is associated with the tokens.
Returns
-------
start_logits
Shape (batch_size, sequence_length)
The log-softmax scores that the position is the start position.
end_logits
Shape (batch_size, sequence_length)
The log-softmax scores that the position is the end position.
| forward | python | dmlc/gluon-nlp | scripts/question_answering/models.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/models.py | Apache-2.0 |
def inference(self, tokens, token_types, valid_length, p_mask,
start_top_n: int = 5, end_top_n: int = 5):
"""Get the inference result with beam search
Parameters
----------
tokens
The input tokens. Shape (batch_size, sequence_length)
token_types
The input token types. Shape (batch_size, sequence_length)
valid_length
The valid length of the tokens. Shape (batch_size,)
p_mask
The mask which indicates that some tokens won't be used in the calculation.
Shape (batch_size, sequence_length)
start_top_n
The number of candidates to select for the start position.
end_top_n
The number of candidates to select for the end position.
Returns
-------
start_top_logits
The top start logits
Shape (batch_size, start_top_n)
start_top_index
Index of the top start logits
Shape (batch_size, start_top_n)
end_top_logits
The top end logits.
Shape (batch_size, end_top_n)
end_top_index
Index of the top end logits
Shape (batch_size, end_top_n)
"""
# Shape (batch_size, sequence_length, C)
if self.use_segmentation:
contextual_embeddings = self.backbone(tokens, token_types, valid_length)
else:
contextual_embeddings = self.backbone(tokens, valid_length)
scores = self.qa_outputs(contextual_embeddings)
start_scores = scores[:, :, 0]
end_scores = scores[:, :, 1]
start_logits = masked_logsoftmax(start_scores, mask=p_mask, axis=-1)
end_logits = masked_logsoftmax(end_scores, mask=p_mask, axis=-1)
# The shape of start_top_index will be (..., start_top_n)
start_top_logits, start_top_index = mx.npx.topk(start_logits, k=start_top_n, axis=-1,
ret_typ='both')
# Note that end_top_index and end_top_log_probs have shape (bsz, start_n_top, end_n_top)
# So that for each start position, there are end_n_top end positions on the third dim.
end_top_logits, end_top_index = mx.npx.topk(end_logits, k=end_top_n, axis=-1,
ret_typ='both')
return start_top_logits, start_top_index, end_top_logits, end_top_index | Get the inference result with beam search
Parameters
----------
tokens
The input tokens. Shape (batch_size, sequence_length)
token_types
The input token types. Shape (batch_size, sequence_length)
valid_length
The valid length of the tokens. Shape (batch_size,)
p_mask
The mask which indicates that some tokens won't be used in the calculation.
Shape (batch_size, sequence_length)
start_top_n
The number of candidates to select for the start position.
end_top_n
The number of candidates to select for the end position.
Returns
-------
start_top_logits
The top start logits
Shape (batch_size, start_top_n)
start_top_index
Index of the top start logits
Shape (batch_size, start_top_n)
end_top_logits
The top end logits.
Shape (batch_size, end_top_n)
end_top_index
Index of the top end logits
Shape (batch_size, end_top_n)
| inference | python | dmlc/gluon-nlp | scripts/question_answering/models.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/models.py | Apache-2.0 |
def get_end_logits(self, contextual_embedding, start_positions, p_mask):
"""
Parameters
----------
contextual_embedding
Shape (batch_size, sequence_length, C)
start_positions
Shape (batch_size, N)
We process multiple candidates simultaneously
p_mask
Shape (batch_size, sequence_length)
Returns
-------
end_logits
Shape (batch_size, N, sequence_length)
"""
# Select the features at the start_positions
# start_feature will have shape (batch_size, N, C)
start_features = select_vectors_by_position(contextual_embedding, start_positions)
# Concatenate the start_feature and the contextual_embedding
contextual_embedding = np.expand_dims(contextual_embedding, axis=1) # (B, 1, T, C)
start_features = np.expand_dims(start_features, axis=2) # (B, N, 1, C)
concat_features = np.concatenate([npx.broadcast_like(start_features,
contextual_embedding, 2, 2),
npx.broadcast_like(contextual_embedding,
start_features, 1, 1)],
axis=-1) # (B, N, T, 2C)
end_scores = self.end_scores(concat_features)
end_scores = np.squeeze(end_scores, -1)
end_logits = masked_logsoftmax(end_scores, mask=np.expand_dims(p_mask, axis=1),
axis=-1)
return end_logits |
Parameters
----------
contextual_embedding
Shape (batch_size, sequence_length, C)
start_positions
Shape (batch_size, N)
We process multiple candidates simultaneously
p_mask
Shape (batch_size, sequence_length)
Returns
-------
end_logits
Shape (batch_size, N, sequence_length)
| get_end_logits | python | dmlc/gluon-nlp | scripts/question_answering/models.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/models.py | Apache-2.0 |
def get_answerable_logits(self, contextual_embedding, p_mask):
"""Get the answerable logits.
Parameters
----------
contextual_embedding
Shape (batch_size, sequence_length, C)
p_mask
Shape (batch_size, sequence_length)
Mask the sequence.
0 --> Denote that the element is masked,
1 --> Denote that the element is not masked
Returns
-------
answerable_logits
Shape (batch_size, 2)
"""
# Shape (batch_size, sequence_length)
start_scores = np.squeeze(self.start_scores(contextual_embedding), -1)
start_score_weights = masked_softmax(start_scores, p_mask, axis=-1)
start_agg_feature = npx.batch_dot(np.expand_dims(start_score_weights, axis=1),
contextual_embedding)
start_agg_feature = np.squeeze(start_agg_feature, 1)
cls_feature = contextual_embedding[:, 0, :]
answerable_scores = self.answerable_scores(np.concatenate([start_agg_feature,
cls_feature], axis=-1))
answerable_logits = npx.log_softmax(answerable_scores, axis=-1)
return answerable_logits | Get the answerable logits.
Parameters
----------
contextual_embedding
Shape (batch_size, sequence_length, C)
p_mask
Shape (batch_size, sequence_length)
Mask the sequence.
0 --> Denote that the element is masked,
1 --> Denote that the element is not masked
Returns
-------
answerable_logits
Shape (batch_size, 2)
| get_answerable_logits | python | dmlc/gluon-nlp | scripts/question_answering/models.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/models.py | Apache-2.0 |
def forward(self, tokens, token_types, valid_length, p_mask, start_position):
"""
Parameters
----------
tokens
Shape (batch_size, sequence_length)
token_types
Shape (batch_size, sequence_length)
valid_length
Shape (batch_size,)
p_mask
Shape (batch_size, sequence_length)
start_position
Shape (batch_size,)
Returns
-------
start_logits
Shape (batch_size, sequence_length)
end_logits
Shape (batch_size, sequence_length)
answerable_logits
"""
backbone_net = self.backbone
if self.quantized_backbone != None:
backbone_net = self.quantized_backbone
if self.use_segmentation:
contextual_embeddings = backbone_net(tokens, token_types, valid_length)
else:
contextual_embeddings = backbone_net(tokens, valid_length)
start_logits = self.get_start_logits(contextual_embeddings, p_mask)
end_logits = self.get_end_logits(contextual_embeddings,
np.expand_dims(start_position, axis=1),
p_mask)
end_logits = np.squeeze(end_logits, axis=1)
answerable_logits = self.get_answerable_logits(contextual_embeddings, p_mask)
return start_logits, end_logits, answerable_logits |
Parameters
----------
tokens
Shape (batch_size, sequence_length)
token_types
Shape (batch_size, sequence_length)
valid_length
Shape (batch_size,)
p_mask
Shape (batch_size, sequence_length)
start_position
Shape (batch_size,)
Returns
-------
start_logits
Shape (batch_size, sequence_length)
end_logits
Shape (batch_size, sequence_length)
answerable_logits
| forward | python | dmlc/gluon-nlp | scripts/question_answering/models.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/models.py | Apache-2.0 |
def inference(self, tokens, token_types, valid_length, p_mask,
start_top_n: int = 5, end_top_n: int = 5):
"""Get the inference result with beam search
Parameters
----------
tokens
The input tokens. Shape (batch_size, sequence_length)
token_types
The input token types. Shape (batch_size, sequence_length)
valid_length
The valid length of the tokens. Shape (batch_size,)
p_mask
The mask which indicates that some tokens won't be used in the calculation.
Shape (batch_size, sequence_length)
start_top_n
The number of candidates to select for the start position.
end_top_n
The number of candidates to select for the end position.
Returns
-------
start_top_logits
The top start logits
Shape (batch_size, start_top_n)
start_top_index
Index of the top start logits
Shape (batch_size, start_top_n)
end_top_logits
The top end logits.
Shape (batch_size, start_top_n, end_top_n)
end_top_index
Index of the top end logits
Shape (batch_size, start_top_n, end_top_n)
answerable_logits
The answerable logits. Here 0 --> answerable and 1 --> not answerable.
Shape (batch_size, sequence_length, 2)
"""
backbone_net = self.backbone
if self.quantized_backbone != None:
backbone_net = self.quantized_backbone
# Shape (batch_size, sequence_length, C)
if self.use_segmentation:
contextual_embeddings = backbone_net(tokens, token_types, valid_length)
else:
contextual_embeddings = backbone_net(tokens, valid_length)
start_logits = self.get_start_logits(contextual_embeddings, p_mask)
# The shape of start_top_index will be (..., start_top_n)
start_top_logits, start_top_index = mx.npx.topk(start_logits, k=start_top_n, axis=-1,
ret_typ='both')
end_logits = self.get_end_logits(contextual_embeddings, start_top_index, p_mask)
# Note that end_top_index and end_top_log_probs have shape (bsz, start_n_top, end_n_top)
# So that for each start position, there are end_n_top end positions on the third dim.
end_top_logits, end_top_index = mx.npx.topk(end_logits, k=end_top_n, axis=-1,
ret_typ='both')
answerable_logits = self.get_answerable_logits(contextual_embeddings, p_mask)
return start_top_logits, start_top_index, end_top_logits, end_top_index, \
answerable_logits | Get the inference result with beam search
Parameters
----------
tokens
The input tokens. Shape (batch_size, sequence_length)
token_types
The input token types. Shape (batch_size, sequence_length)
valid_length
The valid length of the tokens. Shape (batch_size,)
p_mask
The mask which indicates that some tokens won't be used in the calculation.
Shape (batch_size, sequence_length)
start_top_n
The number of candidates to select for the start position.
end_top_n
The number of candidates to select for the end position.
Returns
-------
start_top_logits
The top start logits
Shape (batch_size, start_top_n)
start_top_index
Index of the top start logits
Shape (batch_size, start_top_n)
end_top_logits
The top end logits.
Shape (batch_size, start_top_n, end_top_n)
end_top_index
Index of the top end logits
Shape (batch_size, start_top_n, end_top_n)
answerable_logits
The answerable logits. Here 0 --> answerable and 1 --> not answerable.
Shape (batch_size, sequence_length, 2)
| inference | python | dmlc/gluon-nlp | scripts/question_answering/models.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/models.py | Apache-2.0 |
def __init__(self, tokenizer, doc_stride, max_seq_length, max_query_length):
"""
Parameters
----------
tokenizer
The tokenizer
doc_stride
The stride to chunk the document
max_seq_length
Maximum length of the merged data
max_query_length
Maximum query length
"""
self._tokenizer = tokenizer
self._doc_stride = doc_stride
self._max_seq_length = max_seq_length
self._max_query_length = max_query_length
vocab = tokenizer.vocab
self.pad_id = vocab.pad_id
# For roberta model, taking sepecial token <s> as [CLS] and </s> as [SEP]
self.cls_id = vocab.bos_id if 'cls_token' not in vocab.special_token_keys else vocab.cls_id
self.sep_id = vocab.eos_id if 'sep_token' not in vocab.special_token_keys else vocab.sep_id
# TODO(sxjscience) Consider to combine the NamedTuple and batchify functionality.
self.BatchifyFunction = bf.NamedTuple(ChunkFeature,
{'qas_id': bf.List(),
'data': bf.Pad(val=self.pad_id, round_to=args.round_to),
'valid_length': bf.Stack(),
'segment_ids': bf.Pad(round_to=args.round_to),
'masks': bf.Pad(val=1, round_to=args.round_to),
'is_impossible': bf.Stack(),
'gt_start': bf.Stack(),
'gt_end': bf.Stack(),
'context_offset': bf.Stack(),
'chunk_start': bf.Stack(),
'chunk_length': bf.Stack()}) |
Parameters
----------
tokenizer
The tokenizer
doc_stride
The stride to chunk the document
max_seq_length
Maximum length of the merged data
max_query_length
Maximum query length
| __init__ | python | dmlc/gluon-nlp | scripts/question_answering/run_squad.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py | Apache-2.0 |
def process_sample(self, feature: SquadFeature):
"""Process the data to the following format.
Note that we mask all the special tokens except the CLS token. The reason for not masking
the CLS token is that if the question is not answerable, we will set the start and end to
be 0.
Merged: <CLS> Question <SEP> Context <SEP>
Segment IDs: 0 0 0 1 1
Mask: 0 1 1 0 1
Here, we need to emphasize that when mask = 1, the data are actually not masked!
Parameters
----------
feature
Tokenized SQuAD feature
Returns
-------
ret
Divide the feature into multiple chunks and extract the feature which contains
the following:
- data
The data that concatenates the query and the context + special tokens
- valid_length
The valid_length of the data
- segment_ids
We assign the query part as segment 0 and the context part as segment 1.
- masks
We mask all the special tokens. 1 --> not masked, 0 --> masked.
- is_impossible
Whether the provided context is impossible to answer or not.
- gt_start
The ground-truth start location of the span
- gt_end
The ground-truth end location of the span
- chunk_start
The start of the chunk
- chunk_length
The length of the chunk
"""
ret = []
truncated_query_ids = feature.query_token_ids[:self._max_query_length]
chunks = feature.get_chunks(
doc_stride=self._doc_stride,
max_chunk_length=self._max_seq_length - len(truncated_query_ids) - 3)
for chunk in chunks:
data = np.array([self.cls_id] + truncated_query_ids + [self.sep_id] +
feature.context_token_ids[chunk.start:(chunk.start + chunk.length)] +
[self.sep_id], dtype=np.int32)
valid_length = len(data)
segment_ids = np.array([0] + [0] * len(truncated_query_ids) +
[0] + [1] * chunk.length + [1], dtype=np.int32)
masks = np.array([0] + [1] * len(truncated_query_ids) + [1] + [0] * chunk.length + [1],
dtype=np.int32)
context_offset = len(truncated_query_ids) + 2
if chunk.gt_start_pos is None and chunk.gt_end_pos is None:
start_pos = 0
end_pos = 0
else:
# Here, we increase the start and end because we put query before context
start_pos = chunk.gt_start_pos + context_offset
end_pos = chunk.gt_end_pos + context_offset
chunk_feature = ChunkFeature(qas_id=feature.qas_id,
data=data,
valid_length=valid_length,
segment_ids=segment_ids,
masks=masks,
is_impossible=chunk.is_impossible,
gt_start=start_pos,
gt_end=end_pos,
context_offset=context_offset,
chunk_start=chunk.start,
chunk_length=chunk.length)
ret.append(chunk_feature)
return ret | Process the data to the following format.
Note that we mask all the special tokens except the CLS token. The reason for not masking
the CLS token is that if the question is not answerable, we will set the start and end to
be 0.
Merged: <CLS> Question <SEP> Context <SEP>
Segment IDs: 0 0 0 1 1
Mask: 0 1 1 0 1
Here, we need to emphasize that when mask = 1, the data are actually not masked!
Parameters
----------
feature
Tokenized SQuAD feature
Returns
-------
ret
Divide the feature into multiple chunks and extract the feature which contains
the following:
- data
The data that concatenates the query and the context + special tokens
- valid_length
The valid_length of the data
- segment_ids
We assign the query part as segment 0 and the context part as segment 1.
- masks
We mask all the special tokens. 1 --> not masked, 0 --> masked.
- is_impossible
Whether the provided context is impossible to answer or not.
- gt_start
The ground-truth start location of the span
- gt_end
The ground-truth end location of the span
- chunk_start
The start of the chunk
- chunk_length
The length of the chunk
| process_sample | python | dmlc/gluon-nlp | scripts/question_answering/run_squad.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py | Apache-2.0 |
def get_train(self, features, skip_unreliable=True):
"""Get the training dataset
Parameters
----------
features
skip_unreliable
Whether to skip the unreliable spans in the training set
Returns
-------
train_dataset
num_token_answer_mismatch
num_unreliable
"""
train_dataset = []
num_token_answer_mismatch = 0
num_unreliable = 0
for feature in features:
if feature.token_answer_mismatch:
num_token_answer_mismatch += 1
if feature.unreliable_span:
num_unreliable += 1
if skip_unreliable and feature.unreliable_span:
# Skip when not reliable
continue
# Process the feature
chunk_features = self.process_sample(feature)
train_dataset.extend(chunk_features)
return train_dataset, num_token_answer_mismatch, num_unreliable | Get the training dataset
Parameters
----------
features
skip_unreliable
Whether to skip the unreliable spans in the training set
Returns
-------
train_dataset
num_token_answer_mismatch
num_unreliable
| get_train | python | dmlc/gluon-nlp | scripts/question_answering/run_squad.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py | Apache-2.0 |
def get_squad_features(args, tokenizer, segment):
"""
Get processed data features of SQuADExampls
Parameters
----------
args : argparse.Namespace
tokenizer:
Tokenizer instance
segment: str
train or dev
Returns
-------
data_features
The list of processed data features
"""
data_cache_path = os.path.join(CACHE_PATH,
'{}_{}_squad_{}.ndjson'.format(
segment, args.model_name, args.version))
is_training = (segment == 'train')
if os.path.exists(data_cache_path) and not args.overwrite_cache:
data_features = []
with open(data_cache_path, 'r') as f:
for line in f:
data_features.append(SquadFeature.from_json(line))
logging.info('Found cached data features, load from {}'.format(data_cache_path))
else:
data_examples = get_squad_examples(args.data_dir, segment=segment, version=args.version)
start = time.time()
num_process = min(cpu_count(), 8)
logging.info('Tokenize Data:')
with Pool(num_process) as pool:
data_features = pool.map(functools.partial(convert_squad_example_to_feature,
tokenizer=tokenizer,
is_training=is_training), data_examples)
logging.info('Done! Time spent:{:.2f} seconds'.format(time.time() - start))
with open(data_cache_path, 'w', encoding='utf-8') as f:
for feature in data_features:
f.write(feature.to_json() + '\n')
return data_features |
Get processed data features of SQuADExampls
Parameters
----------
args : argparse.Namespace
tokenizer:
Tokenizer instance
segment: str
train or dev
Returns
-------
data_features
The list of processed data features
| get_squad_features | python | dmlc/gluon-nlp | scripts/question_answering/run_squad.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py | Apache-2.0 |
def get_network(model_name,
ctx_l,
dropout=0.1,
checkpoint_path=None,
backbone_path=None,
dtype='float32'):
"""
Get the network that fine-tune the Question Answering Task
Parameters
----------
model_name : str
The model name of the backbone model
ctx_l :
Context list of training device like [mx.gpu(0), mx.gpu(1)]
dropout : float
Dropout probability of the task specified layer
checkpoint_path: str
Path to a Fine-tuned checkpoint
backbone_path: str
Path to the backbone model to be loaded in qa_net
Returns
-------
cfg
tokenizer
qa_net
use_segmentation
"""
# Create the network
use_segmentation = 'roberta' not in model_name and 'xlmr' not in model_name
Model, cfg, tokenizer, download_params_path, _ = \
get_backbone(model_name, load_backbone=not backbone_path)
backbone = Model.from_cfg(cfg, use_pooler=False, dtype=dtype)
# Load local backbone parameters if backbone_path provided.
# Otherwise, download backbone parameters from gluon zoo.
backbone_params_path = backbone_path if backbone_path else download_params_path
if checkpoint_path is None:
backbone.load_parameters(backbone_params_path, ignore_extra=True,
ctx=ctx_l, cast_dtype=True)
num_params, num_fixed_params\
= count_parameters(deduplicate_param_dict(backbone.collect_params()))
logging.info(
'Loading Backbone Model from {}, with total/fixd parameters={}/{}'.format(
backbone_params_path, num_params, num_fixed_params))
qa_net = ModelForQAConditionalV1(backbone=backbone,
dropout_prob=dropout,
use_segmentation=use_segmentation,
weight_initializer=TruncNorm(stdev=0.02))
if checkpoint_path is None:
# Ignore the UserWarning during initialization,
# There is no need to re-initialize the parameters of backbone
qa_net.initialize(ctx=ctx_l)
else:
qa_net.load_parameters(checkpoint_path, ctx=ctx_l, cast_dtype=True)
qa_net.hybridize()
return cfg, tokenizer, qa_net, use_segmentation |
Get the network that fine-tune the Question Answering Task
Parameters
----------
model_name : str
The model name of the backbone model
ctx_l :
Context list of training device like [mx.gpu(0), mx.gpu(1)]
dropout : float
Dropout probability of the task specified layer
checkpoint_path: str
Path to a Fine-tuned checkpoint
backbone_path: str
Path to the backbone model to be loaded in qa_net
Returns
-------
cfg
tokenizer
qa_net
use_segmentation
| get_network | python | dmlc/gluon-nlp | scripts/question_answering/run_squad.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py | Apache-2.0 |
def setup_logging(args, local_rank):
"""
Setup logging configuration as well as random seed
"""
logging_config(args.output_dir,
name='finetune_squad{}'.format(args.version),# avoid race
overwrite_handler=True,
console=(local_rank == 0))
logging.info(args)
set_seed(args.seed)
logging.debug('Random seed set to {}'.format(args.seed)) |
Setup logging configuration as well as random seed
| setup_logging | python | dmlc/gluon-nlp | scripts/question_answering/run_squad.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py | Apache-2.0 |
def predict_extended(original_feature,
chunked_features,
results,
n_best_size,
max_answer_length=64,
start_top_n=5,
end_top_n=5):
"""Get prediction results for SQuAD.
Start Logits: (B, N_start)
End Logits: (B, N_start, N_end)
Parameters
----------
original_feature:
The original SquadFeature before chunked
chunked_features
List of ChunkFeatures
results
List of model predictions for span start and span end.
n_best_size
Best N results written to file
max_answer_length
Maximum length of the answer tokens.
start_top_n
Number of start-position candidates
end_top_n
Number of end-position candidates
Returns
-------
not_answerable_score
Model's estimate that the question is not answerable.
prediction
The final prediction.
nbest_json
n-best predictions with their probabilities.
"""
not_answerable_score = 1000000 # Score for not-answerable. We set it to be a large and positive
# If one chunk votes for answerable, we will treat the context as answerable,
# Thus, the overall not_answerable_score = min(chunk_not_answerable_score)
all_start_idx = []
all_end_idx = []
all_pred_score = []
context_length = len(original_feature.context_token_ids)
token_max_context_score = np.full((len(chunked_features), context_length),
-np.inf,
dtype=np.float32)
for i, chunked_feature in enumerate(chunked_features):
chunk_start = chunked_feature.chunk_start
chunk_length = chunked_feature.chunk_length
for j in range(chunk_start, chunk_start + chunk_length):
# This is a heuristic score
# TODO investigate the impact
token_max_context_score[i, j] = min(j - chunk_start,
chunk_start + chunk_length - 1 - j) \
+ 0.01 * chunk_length
token_max_chunk_id = token_max_context_score.argmax(axis=0)
for chunk_id, (result, chunk_feature) in enumerate(zip(results, chunked_features)):
# We use the log-likelihood as the not answerable score.
# Thus, a high score indicates that the answer is not answerable
cur_not_answerable_score = float(result.answerable_logits[1])
not_answerable_score = min(not_answerable_score, cur_not_answerable_score)
# Calculate the start_logits + end_logits as the overall score
context_offset = chunk_feature.context_offset
chunk_start = chunk_feature.chunk_start
chunk_length = chunk_feature.chunk_length
for i in range(start_top_n):
for j in range(end_top_n):
pred_score = result.start_top_logits[i] + result.end_top_logits[i, j]
start_index = result.start_top_index[i]
end_index = result.end_top_index[i, j]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the answer span is in the query tokens or out of
# the chunk. We throw out all invalid predictions.
if not (context_offset <= start_index < context_offset + chunk_length) or \
not (context_offset <= end_index < context_offset + chunk_length) or \
end_index < start_index:
continue
pred_answer_length = end_index - start_index + 1
if pred_answer_length > max_answer_length:
continue
start_idx = int(start_index - context_offset + chunk_start)
end_idx = int(end_index - context_offset + chunk_start)
if token_max_chunk_id[start_idx] != chunk_id:
continue
all_start_idx.append(start_idx)
all_end_idx.append(end_idx)
all_pred_score.append(pred_score)
sorted_start_end_score = sorted(zip(all_start_idx, all_end_idx, all_pred_score),
key=lambda args: args[-1], reverse=True)
nbest = []
context_text = original_feature.context_text
context_token_offsets = original_feature.context_token_offsets
seen_predictions = set()
for start_idx, end_idx, pred_score in sorted_start_end_score:
if len(seen_predictions) >= n_best_size:
break
pred_answer = context_text[context_token_offsets[start_idx][0]:
context_token_offsets[end_idx][1]]
seen_predictions.add(pred_answer)
nbest.append((pred_answer, pred_score))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if len(nbest) == 0:
nbest.append(('', float('-inf')))
all_scores = np.array([ele[1] for ele in nbest], dtype=np.float32)
probs = np.exp(all_scores) / np.sum(np.exp(all_scores))
nbest_json = []
for i, (entry, prob) in enumerate(zip(nbest, probs)):
output = collections.OrderedDict()
output['text'] = entry[0]
output['probability'] = float(prob)
nbest_json.append(output)
assert len(nbest_json) >= 1
return not_answerable_score, nbest[0][0], nbest_json | Get prediction results for SQuAD.
Start Logits: (B, N_start)
End Logits: (B, N_start, N_end)
Parameters
----------
original_feature:
The original SquadFeature before chunked
chunked_features
List of ChunkFeatures
results
List of model predictions for span start and span end.
n_best_size
Best N results written to file
max_answer_length
Maximum length of the answer tokens.
start_top_n
Number of start-position candidates
end_top_n
Number of end-position candidates
Returns
-------
not_answerable_score
Model's estimate that the question is not answerable.
prediction
The final prediction.
nbest_json
n-best predictions with their probabilities.
| predict_extended | python | dmlc/gluon-nlp | scripts/question_answering/run_squad.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py | Apache-2.0 |
def collect(self, name, op_name, arr):
"""Callback function for collecting min and max values from an NDArray."""
if name not in self.include_layers:
return
arr = arr.copyto(mx.cpu()).asnumpy()
min_range = np.min(arr)
max_range = np.max(arr)
if (name.find("sg_onednn_fully_connected_eltwise") != -1 or op_name.find("LayerNorm") != -1) \
and max_range > self.clip_max:
max_range = self.clip_max
elif name.find('sg_onednn_fully_connected') != -1 and min_range < self.clip_min:
min_range = self.clip_min
if name in self.min_max_dict:
cur_min_max = self.min_max_dict[name]
self.min_max_dict[name] = (min(cur_min_max[0], min_range),
max(cur_min_max[1], max_range))
else:
self.min_max_dict[name] = (min_range, max_range) | Callback function for collecting min and max values from an NDArray. | collect | python | dmlc/gluon-nlp | scripts/question_answering/run_squad.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py | Apache-2.0 |
def eval_validation(ckpt_name, best_eval):
"""
Model inference during validation or final evaluation.
"""
dev_dataloader = mx.gluon.data.DataLoader(
dev_all_chunk_features,
batchify_fn=dataset_processor.BatchifyFunction,
batch_size=args.eval_batch_size,
num_workers=0,
shuffle=False)
if args.dtype == 'int8':
quantize_and_calibrate(qa_net, dev_dataloader)
log_interval = args.eval_log_interval
all_results = []
epoch_tic = time.time()
tic = time.time()
epoch_size = len(dev_features)
total_num = 0
log_num = 0
for batch_idx, dev_batch in enumerate(grouper(dev_dataloader, len(ctx_l))):
# Predict for each chunk
for sample, ctx in zip(dev_batch, ctx_l):
if sample is None:
continue
# Copy the data to device
tokens = sample.data.as_in_ctx(ctx)
total_num += len(tokens)
log_num += len(tokens)
segment_ids = sample.segment_ids.as_in_ctx(ctx) if use_segmentation else None
valid_length = sample.valid_length.as_in_ctx(ctx)
p_mask = sample.masks.as_in_ctx(ctx)
p_mask = 1 - p_mask # In the network, we use 1 --> no_mask, 0 --> mask
start_top_logits, start_top_index, end_top_logits, end_top_index, answerable_logits \
= qa_net.inference(tokens, segment_ids, valid_length, p_mask,
args.start_top_n, args.end_top_n)
for i, qas_id in enumerate(sample.qas_id):
result = RawResultExtended(qas_id=qas_id,
start_top_logits=start_top_logits[i].asnumpy(),
start_top_index=start_top_index[i].asnumpy(),
end_top_logits=end_top_logits[i].asnumpy(),
end_top_index=end_top_index[i].asnumpy(),
answerable_logits=answerable_logits[i].asnumpy())
all_results.append(result)
# logging
if (batch_idx + 1) % log_interval == 0:
# Output the loss of per step
toc = time.time()
logging.info(
'[batch {}], Time cost={:.2f},'
' Throughput={:.2f} samples/s, ETA={:.2f}h'.format(
batch_idx + 1, toc - tic, log_num / (toc - tic),
(epoch_size - total_num) / (total_num / (toc - epoch_tic)) / 3600))
tic = time.time()
log_num = 0
epoch_toc = time.time()
logging.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
no_answer_score_json = collections.OrderedDict()
for index, (left_index, right_index) in enumerate(zip(dev_chunk_feature_ptr[:-1],
dev_chunk_feature_ptr[1:])):
chunked_features = dev_all_chunk_features[left_index:right_index]
results = all_results[left_index:right_index]
original_feature = dev_features[index]
qas_ids = set([result.qas_id for result in results] +
[feature.qas_id for feature in chunked_features])
assert len(qas_ids) == 1, 'Mismatch Occured between features and results'
example_qas_id = list(qas_ids)[0]
assert example_qas_id == original_feature.qas_id, \
'Mismatch Occured between original feature and chunked features'
not_answerable_score, best_pred, nbest_json = predict_extended(
original_feature=original_feature,
chunked_features=chunked_features,
results=results,
n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length,
start_top_n=args.start_top_n,
end_top_n=args.end_top_n)
no_answer_score_json[example_qas_id] = not_answerable_score
all_predictions[example_qas_id] = best_pred
all_nbest_json[example_qas_id] = nbest_json
if args.version == '2.0':
exact = 'best_exact'
f1 = 'best_f1'
na_prob = no_answer_score_json
else:
exact = 'exact'
f1 = 'f1'
na_prob = None
cur_eval, revised_predictions = squad_eval(
dev_data_path, all_predictions, na_prob, revise=na_prob is not None)
logging.info('The evaluated results are {}'.format(json.dumps(cur_eval)))
cur_metrics = 0.5 * (cur_eval[exact] + cur_eval[f1])
if best_eval:
best_metrics = 0.5 * (best_eval[exact] + best_eval[f1])
else:
best_metrics = 0.
if cur_metrics > best_metrics:
logging.info('The evaluated files are saved in {}'.format(args.output_dir))
output_prediction_file = os.path.join(args.output_dir, 'predictions.json')
output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')
na_prob_file = os.path.join(args.output_dir, 'na_prob.json')
revised_prediction_file = os.path.join(args.output_dir, 'revised_predictions.json')
with open(output_prediction_file, 'w') as of:
of.write(json.dumps(all_predictions, indent=4) + '\n')
with open(output_nbest_file, 'w') as of:
of.write(json.dumps(all_nbest_json, indent=4) + '\n')
with open(na_prob_file, 'w') as of:
of.write(json.dumps(no_answer_score_json, indent=4) + '\n')
with open(revised_prediction_file, 'w') as of:
of.write(json.dumps(revised_predictions, indent=4) + '\n')
best_eval = cur_eval
best_eval.update({'best_ckpt': ckpt_name})
return best_eval |
Model inference during validation or final evaluation.
| eval_validation | python | dmlc/gluon-nlp | scripts/question_answering/run_squad.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad.py | Apache-2.0 |
def __init__(self, tokenizer, doc_stride, max_seq_length, max_query_length):
"""
Parameters
----------
tokenizer
The tokenizer
doc_stride
The stride to chunk the document
max_seq_length
Maximum length of the merged data
max_query_length
Maximum query length
"""
self._tokenizer = tokenizer
self._doc_stride = doc_stride
self._max_seq_length = max_seq_length
self._max_query_length = max_query_length
vocab = tokenizer.vocab
self.pad_id = vocab.pad_id
# For roberta model, taking sepecial token <s> as [CLS] and </s> as [SEP]
self.cls_id = vocab.bos_id if 'cls_token' not in vocab.special_token_keys else vocab.cls_id
self.sep_id = vocab.eos_id if 'sep_token' not in vocab.special_token_keys else vocab.sep_id
# TODO(sxjscience) Consider to combine the NamedTuple and batchify functionality.
# Here, we use round_to=8 to improve the throughput.
self.BatchifyFunction = bf.NamedTuple(ChunkFeature,
{'qas_id': bf.List(),
'data': bf.Pad(val=self.pad_id, round_to=8),
'valid_length': bf.Stack(),
'segment_ids': bf.Pad(round_to=8),
'masks': bf.Pad(val=1, round_to=8),
'is_impossible': bf.Stack(),
'gt_start': bf.Stack(),
'gt_end': bf.Stack(),
'context_offset': bf.Stack(),
'chunk_start': bf.Stack(),
'chunk_length': bf.Stack()}) |
Parameters
----------
tokenizer
The tokenizer
doc_stride
The stride to chunk the document
max_seq_length
Maximum length of the merged data
max_query_length
Maximum query length
| __init__ | python | dmlc/gluon-nlp | scripts/question_answering/run_squad_albert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad_albert.py | Apache-2.0 |
def process_sample(self, feature: SquadFeature):
"""Process the data to the following format.
Note that we mask all the special tokens except the CLS token. The reason for not masking
the CLS token is that if the question is not answerable, we will set the start and end to
be 0.
Merged: <CLS> Question <SEP> Context <SEP>
Segment IDs: 0 0 0 1 1
Mask: 0 1 1 0 1
Here, we need to emphasize that when mask = 1, the data are actually not masked!
Parameters
----------
feature
Tokenized SQuAD feature
Returns
-------
ret
Divide the feature into multiple chunks and extract the feature which contains
the following:
- data
The data that concatenates the query and the context + special tokens
- valid_length
The valid_length of the data
- segment_ids
We assign the query part as segment 0 and the context part as segment 1.
- masks
We mask all the special tokens. 1 --> not masked, 0 --> masked.
- is_impossible
Whether the provided context is impossible to answer or not.
- gt_start
The ground-truth start location of the span
- gt_end
The ground-truth end location of the span
- chunk_start
The start of the chunk
- chunk_length
The length of the chunk
"""
ret = []
truncated_query_ids = feature.query_token_ids[:self._max_query_length]
chunks = feature.get_chunks(
doc_stride=self._doc_stride,
max_chunk_length=self._max_seq_length - len(truncated_query_ids) - 3)
for chunk in chunks:
data = np.array([self.cls_id] + truncated_query_ids + [self.sep_id] +
feature.context_token_ids[chunk.start:(chunk.start + chunk.length)] +
[self.sep_id], dtype=np.int32)
valid_length = len(data)
segment_ids = np.array([0] + [0] * len(truncated_query_ids) +
[0] + [1] * chunk.length + [1], dtype=np.int32)
masks = np.array([0] + [1] * len(truncated_query_ids) + [1] + [0] * chunk.length + [1],
dtype=np.int32)
context_offset = len(truncated_query_ids) + 2
if chunk.gt_start_pos is None and chunk.gt_end_pos is None:
start_pos = 0
end_pos = 0
else:
# Here, we increase the start and end because we put query before context
start_pos = chunk.gt_start_pos + context_offset
end_pos = chunk.gt_end_pos + context_offset
chunk_feature = ChunkFeature(qas_id=feature.qas_id,
data=data,
valid_length=valid_length,
segment_ids=segment_ids,
masks=masks,
is_impossible=chunk.is_impossible,
gt_start=start_pos,
gt_end=end_pos,
context_offset=context_offset,
chunk_start=chunk.start,
chunk_length=chunk.length)
ret.append(chunk_feature)
return ret | Process the data to the following format.
Note that we mask all the special tokens except the CLS token. The reason for not masking
the CLS token is that if the question is not answerable, we will set the start and end to
be 0.
Merged: <CLS> Question <SEP> Context <SEP>
Segment IDs: 0 0 0 1 1
Mask: 0 1 1 0 1
Here, we need to emphasize that when mask = 1, the data are actually not masked!
Parameters
----------
feature
Tokenized SQuAD feature
Returns
-------
ret
Divide the feature into multiple chunks and extract the feature which contains
the following:
- data
The data that concatenates the query and the context + special tokens
- valid_length
The valid_length of the data
- segment_ids
We assign the query part as segment 0 and the context part as segment 1.
- masks
We mask all the special tokens. 1 --> not masked, 0 --> masked.
- is_impossible
Whether the provided context is impossible to answer or not.
- gt_start
The ground-truth start location of the span
- gt_end
The ground-truth end location of the span
- chunk_start
The start of the chunk
- chunk_length
The length of the chunk
| process_sample | python | dmlc/gluon-nlp | scripts/question_answering/run_squad_albert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad_albert.py | Apache-2.0 |
def get_train(self, features, skip_unreliable=True):
"""Get the training dataset
Parameters
----------
features
skip_unreliable
Whether to skip the unreliable spans in the training set
Returns
-------
train_dataset
num_token_answer_mismatch
num_unreliable
"""
train_dataset = []
num_token_answer_mismatch = 0
num_unreliable = 0
for feature in features:
if feature.token_answer_mismatch:
num_token_answer_mismatch += 1
if feature.unreliable_span:
num_unreliable += 1
if skip_unreliable and feature.unreliable_span:
# Skip when not reliable
continue
# Process the feature
chunk_features = self.process_sample(feature)
train_dataset.extend(chunk_features)
return train_dataset, num_token_answer_mismatch, num_unreliable | Get the training dataset
Parameters
----------
features
skip_unreliable
Whether to skip the unreliable spans in the training set
Returns
-------
train_dataset
num_token_answer_mismatch
num_unreliable
| get_train | python | dmlc/gluon-nlp | scripts/question_answering/run_squad_albert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad_albert.py | Apache-2.0 |
def get_squad_features(args, tokenizer, segment):
"""
Get processed data features of SQuADExampls
Parameters
----------
args : argparse.Namespace
tokenizer:
Tokenizer instance
segment: str
train or dev
Returns
-------
data_features
The list of processed data features
"""
data_cache_path = os.path.join(CACHE_PATH,
'{}_{}_squad_{}.ndjson'.format(
segment, args.model_name, args.version))
is_training = (segment == 'train')
if os.path.exists(data_cache_path) and not args.overwrite_cache:
data_features = []
with open(data_cache_path, 'r') as f:
for line in f:
data_features.append(SquadFeature.from_json(line))
logging.info('Found cached data features, load from {}'.format(data_cache_path))
else:
data_examples = get_squad_examples(args.data_dir, segment=segment, version=args.version)
start = time.time()
num_process = min(cpu_count(), 8)
logging.info('Tokenize Data:')
with Pool(num_process) as pool:
data_features = pool.map(functools.partial(convert_squad_example_to_feature,
tokenizer=tokenizer,
is_training=is_training), data_examples)
logging.info('Done! Time spent:{:.2f} seconds'.format(time.time() - start))
with open(data_cache_path, 'w') as f:
for feature in data_features:
f.write(feature.to_json() + '\n')
return data_features |
Get processed data features of SQuADExampls
Parameters
----------
args : argparse.Namespace
tokenizer:
Tokenizer instance
segment: str
train or dev
Returns
-------
data_features
The list of processed data features
| get_squad_features | python | dmlc/gluon-nlp | scripts/question_answering/run_squad_albert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad_albert.py | Apache-2.0 |
def get_network(model_name,
ctx_l,
dropout=0.1,
checkpoint_path=None,
backbone_path=None,
dtype='float32'):
"""
Get the network that fine-tune the Question Answering Task
Parameters
----------
model_name : str
The model name of the backbone model
ctx_l :
Context list of training device like [mx.gpu(0), mx.gpu(1)]
dropout : float
Dropout probability of the task specified layer
checkpoint_path: str
Path to a Fine-tuned checkpoint
backbone_path: str
Path to the backbone model to be loaded in qa_net
Returns
-------
cfg
tokenizer
qa_net
use_segmentation
"""
# Create the network
use_segmentation = 'roberta' not in model_name and 'xlmr' not in model_name
Model, cfg, tokenizer, download_params_path, _ = \
get_backbone(model_name, load_backbone=not backbone_path)
backbone = Model.from_cfg(cfg, use_pooler=False, dtype=dtype)
# Load local backbone parameters if backbone_path provided.
# Otherwise, download backbone parameters from gluon zoo.
backbone_params_path = backbone_path if backbone_path else download_params_path
if checkpoint_path is None:
backbone.load_parameters(backbone_params_path, ignore_extra=True,
ctx=ctx_l, cast_dtype=True)
num_params, num_fixed_params\
= count_parameters(deduplicate_param_dict(backbone.collect_params()))
logging.info(
'Loading Backbone Model from {}, with total/fixd parameters={}/{}'.format(
backbone_params_path, num_params, num_fixed_params))
qa_net = ModelForQAConditionalV1(backbone=backbone,
dropout_prob=dropout,
use_segmentation=use_segmentation,
weight_initializer=TruncNorm(stdev=0.02))
if checkpoint_path is None:
# Ignore the UserWarning during initialization,
# There is no need to re-initialize the parameters of backbone
qa_net.initialize(ctx=ctx_l)
else:
qa_net.load_parameters(checkpoint_path, ctx=ctx_l, cast_dtype=True)
qa_net.hybridize()
return cfg, tokenizer, qa_net, use_segmentation |
Get the network that fine-tune the Question Answering Task
Parameters
----------
model_name : str
The model name of the backbone model
ctx_l :
Context list of training device like [mx.gpu(0), mx.gpu(1)]
dropout : float
Dropout probability of the task specified layer
checkpoint_path: str
Path to a Fine-tuned checkpoint
backbone_path: str
Path to the backbone model to be loaded in qa_net
Returns
-------
cfg
tokenizer
qa_net
use_segmentation
| get_network | python | dmlc/gluon-nlp | scripts/question_answering/run_squad_albert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad_albert.py | Apache-2.0 |
def setup_logging(args, local_rank):
"""
Setup logging configuration as well as random seed
"""
logging_config(args.output_dir,
name='finetune_squad{}'.format(args.version), # avoid race
overwrite_handler=True,
console=(local_rank == 0))
logging.info(args)
set_seed(args.seed)
logging.debug('Random seed set to {}'.format(args.seed)) |
Setup logging configuration as well as random seed
| setup_logging | python | dmlc/gluon-nlp | scripts/question_answering/run_squad_albert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad_albert.py | Apache-2.0 |
def predict_extended(original_feature,
chunked_features,
results,
n_best_size,
max_answer_length=64,
start_top_n=5,
end_top_n=5):
"""Get prediction results for SQuAD.
Start Logits: (B, N_start)
End Logits: (B, N_start, N_end)
Parameters
----------
original_feature:
The original SquadFeature before chunked
chunked_features
List of ChunkFeatures
results
List of model predictions for span start and span end.
n_best_size
Best N results written to file
max_answer_length
Maximum length of the answer tokens.
start_top_n
Number of start-position candidates
end_top_n
Number of end-position candidates
Returns
-------
not_answerable_score
Model's estimate that the question is not answerable.
prediction
The final prediction.
nbest_json
n-best predictions with their probabilities.
"""
not_answerable_score = 1000000 # Score for not-answerable. We set it to be a large and positive
# If one chunk votes for answerable, we will treat the context as answerable,
# Thus, the overall not_answerable_score = min(chunk_not_answerable_score)
all_start_idx = []
all_end_idx = []
all_pred_score = []
context_length = len(original_feature.context_token_ids)
token_max_context_score = np.full((len(chunked_features), context_length),
-np.inf,
dtype=np.float32)
for i, chunked_feature in enumerate(chunked_features):
chunk_start = chunked_feature.chunk_start
chunk_length = chunked_feature.chunk_length
for j in range(chunk_start, chunk_start + chunk_length):
# This is a heuristic score
# TODO investigate the impact
token_max_context_score[i, j] = min(j - chunk_start,
chunk_start + chunk_length - 1 - j) \
+ 0.01 * chunk_length
token_max_chunk_id = token_max_context_score.argmax(axis=0)
for chunk_id, (result, chunk_feature) in enumerate(zip(results, chunked_features)):
# We use the log-likelihood as the not answerable score.
# Thus, a high score indicates that the answer is not answerable
cur_not_answerable_score = float(result.answerable_logits[1])
not_answerable_score = min(not_answerable_score, cur_not_answerable_score)
# Calculate the start_logits + end_logits as the overall score
context_offset = chunk_feature.context_offset
chunk_start = chunk_feature.chunk_start
chunk_length = chunk_feature.chunk_length
for i in range(start_top_n):
for j in range(end_top_n):
pred_score = result.start_top_logits[i] + result.end_top_logits[i, j]
start_index = result.start_top_index[i]
end_index = result.end_top_index[i, j]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the answer span is in the query tokens or out of
# the chunk. We throw out all invalid predictions.
if not (context_offset <= start_index < context_offset + chunk_length) or \
not (context_offset <= end_index < context_offset + chunk_length) or \
end_index < start_index:
continue
pred_answer_length = end_index - start_index + 1
if pred_answer_length > max_answer_length:
continue
start_idx = int(start_index - context_offset + chunk_start)
end_idx = int(end_index - context_offset + chunk_start)
if token_max_chunk_id[start_idx] != chunk_id:
continue
all_start_idx.append(start_idx)
all_end_idx.append(end_idx)
all_pred_score.append(pred_score)
sorted_start_end_score = sorted(zip(all_start_idx, all_end_idx, all_pred_score),
key=lambda args: args[-1], reverse=True)
nbest = []
context_text = original_feature.context_text
context_token_offsets = original_feature.context_token_offsets
seen_predictions = set()
for start_idx, end_idx, pred_score in sorted_start_end_score:
if len(seen_predictions) >= n_best_size:
break
pred_answer = context_text[context_token_offsets[start_idx][0]:
context_token_offsets[end_idx][1]]
seen_predictions.add(pred_answer)
nbest.append((pred_answer, pred_score))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if len(nbest) == 0:
nbest.append(('', float('-inf')))
all_scores = np.array([ele[1] for ele in nbest], dtype=np.float32)
probs = np.exp(all_scores) / np.sum(np.exp(all_scores))
nbest_json = []
for i, (entry, prob) in enumerate(zip(nbest, probs)):
output = collections.OrderedDict()
output['text'] = entry[0]
output['probability'] = float(prob)
nbest_json.append(output)
assert len(nbest_json) >= 1
return not_answerable_score, nbest[0][0], nbest_json | Get prediction results for SQuAD.
Start Logits: (B, N_start)
End Logits: (B, N_start, N_end)
Parameters
----------
original_feature:
The original SquadFeature before chunked
chunked_features
List of ChunkFeatures
results
List of model predictions for span start and span end.
n_best_size
Best N results written to file
max_answer_length
Maximum length of the answer tokens.
start_top_n
Number of start-position candidates
end_top_n
Number of end-position candidates
Returns
-------
not_answerable_score
Model's estimate that the question is not answerable.
prediction
The final prediction.
nbest_json
n-best predictions with their probabilities.
| predict_extended | python | dmlc/gluon-nlp | scripts/question_answering/run_squad_albert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad_albert.py | Apache-2.0 |
def eval_validation(backbone):
"""
Model inference during validation or final evaluation.
"""
del qa_net.quantized_backbone
qa_net.quantized_backbone = backbone
dev_dataloader = mx.gluon.data.DataLoader(
dev_all_chunk_features,
batchify_fn=dataset_processor.BatchifyFunction,
batch_size=args.eval_batch_size,
num_workers=0,
shuffle=False)
log_interval = args.eval_log_interval
all_results = []
epoch_tic = time.time()
tic = time.time()
epoch_size = len(dev_features)
total_num = 0
log_num = 0
best_eval = {}
for batch_idx, dev_batch in enumerate(grouper(dev_dataloader, len(ctx_l))):
# Predict for each chunk
for sample, ctx in zip(dev_batch, ctx_l):
if sample is None:
continue
# Copy the data to device
tokens = sample.data.as_in_ctx(ctx)
total_num += len(tokens)
log_num += len(tokens)
segment_ids = sample.segment_ids.as_in_ctx(ctx) if use_segmentation else None
valid_length = sample.valid_length.as_in_ctx(ctx)
p_mask = sample.masks.as_in_ctx(ctx)
p_mask = 1 - p_mask # In the network, we use 1 --> no_mask, 0 --> mask
start_top_logits, start_top_index, end_top_logits, end_top_index, answerable_logits \
= qa_net.inference(tokens, segment_ids, valid_length, p_mask,
args.start_top_n, args.end_top_n)
for i, qas_id in enumerate(sample.qas_id):
result = RawResultExtended(qas_id=qas_id,
start_top_logits=start_top_logits[i].asnumpy(),
start_top_index=start_top_index[i].asnumpy(),
end_top_logits=end_top_logits[i].asnumpy(),
end_top_index=end_top_index[i].asnumpy(),
answerable_logits=answerable_logits[i].asnumpy())
all_results.append(result)
# logging
if (batch_idx + 1) % log_interval == 0:
# Output the loss of per step
toc = time.time()
logging.info(
'[batch {}], Time cost={:.2f},'
' Throughput={:.2f} samples/s, ETA={:.2f}h'.format(
batch_idx + 1, toc - tic, log_num / (toc - tic),
(epoch_size - total_num) / (total_num / (toc - epoch_tic)) / 3600))
tic = time.time()
log_num = 0
epoch_toc = time.time()
logging.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,
total_num / (epoch_toc - epoch_tic))
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
no_answer_score_json = collections.OrderedDict()
for index, (left_index, right_index) in enumerate(zip(dev_chunk_feature_ptr[:-1],
dev_chunk_feature_ptr[1:])):
chunked_features = dev_all_chunk_features[left_index:right_index]
results = all_results[left_index:right_index]
original_feature = dev_features[index]
qas_ids = set([result.qas_id for result in results] +
[feature.qas_id for feature in chunked_features])
assert len(qas_ids) == 1, 'Mismatch Occured between features and results'
example_qas_id = list(qas_ids)[0]
assert example_qas_id == original_feature.qas_id, \
'Mismatch Occured between original feature and chunked features'
not_answerable_score, best_pred, nbest_json = predict_extended(
original_feature=original_feature,
chunked_features=chunked_features,
results=results,
n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length,
start_top_n=args.start_top_n,
end_top_n=args.end_top_n)
no_answer_score_json[example_qas_id] = not_answerable_score
all_predictions[example_qas_id] = best_pred
all_nbest_json[example_qas_id] = nbest_json
if args.version == '2.0':
exact = 'best_exact'
f1 = 'best_f1'
na_prob = no_answer_score_json
else:
exact = 'exact'
f1 = 'f1'
na_prob = None
cur_eval, revised_predictions = squad_eval(
dev_dataset, all_predictions, na_prob, revise=na_prob is not None)
logging.info('The evaluated results are {}'.format(json.dumps(cur_eval)))
cur_metrics = 0.5 * (cur_eval[exact] + cur_eval[f1])
if best_eval:
best_metrics = 0.5 * (best_eval[exact] + best_eval[f1])
else:
best_metrics = 0.
if cur_metrics > best_metrics:
logging.info('The evaluated files are saved in {}'.format(args.output_dir))
output_prediction_file = os.path.join(args.output_dir, 'predictions.json')
output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')
na_prob_file = os.path.join(args.output_dir, 'na_prob.json')
revised_prediction_file = os.path.join(args.output_dir, 'revised_predictions.json')
with open(output_prediction_file, 'w') as of:
of.write(json.dumps(all_predictions, indent=4) + '\n')
with open(output_nbest_file, 'w') as of:
of.write(json.dumps(all_nbest_json, indent=4) + '\n')
with open(na_prob_file, 'w') as of:
of.write(json.dumps(no_answer_score_json, indent=4) + '\n')
with open(revised_prediction_file, 'w') as of:
of.write(json.dumps(revised_predictions, indent=4) + '\n')
best_eval = cur_eval
best_eval.update({'best_ckpt': 'mybest'})
return best_eval['best_f1']/100 |
Model inference during validation or final evaluation.
| eval_validation | python | dmlc/gluon-nlp | scripts/question_answering/run_squad_albert.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/run_squad_albert.py | Apache-2.0 |
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace.
This is from the official evaluate-v2.0.py in SQuAD.
"""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) | Lower text and remove punctuation, articles and extra whitespace.
This is from the official evaluate-v2.0.py in SQuAD.
| normalize_answer | python | dmlc/gluon-nlp | scripts/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/squad_utils.py | Apache-2.0 |
def get_chunks(self, doc_stride, max_chunk_length=None):
"""Get a sequence of chunks for the squad feature.
In reality, the document will be too long for the NLP model, and we will split it into
multiple chunks.
For example, consider the following
Doc: the man went to the store and bought a gallon of milk
We may divide it into four chunks:
Chunk 1: the man went to the
Chunk 2: to the store and bought
Chunk 3: and bought a gallon of
Chunk 4: gallon of milk
We will use our network to extract features for each chunk,
and do the aggregation afterwards. Here, one token may appear in multiple chunks.
We can vote the output based on some heuristic score functions.
Parameters
----------
doc_stride
The stride used when the context is too large and is split across several features.
max_chunk_length
The maximum size of the chunk
Returns
-------
ret
List of DocChunk objects
"""
doc_ptr = 0
max_chunk_length = max_chunk_length if max_chunk_length is not None else \
len(self.context_token_ids)
ret = []
while doc_ptr < len(self.context_token_ids):
chunk_length = min(max_chunk_length, len(self.context_token_ids) - doc_ptr)
if self.gt_answer_text is None:
chunk_gt_start_pos = None
chunk_gt_end_pos = None
chunk_is_impossible = True
else:
if self.gt_start_pos is not None and self.gt_end_pos is not None and\
self.gt_start_pos >= doc_ptr and self.gt_end_pos < doc_ptr + chunk_length:
# The chunk contains the ground-truth annotation
chunk_gt_start_pos = self.gt_start_pos - doc_ptr
chunk_gt_end_pos = self.gt_end_pos - doc_ptr
chunk_is_impossible = False
else:
chunk_gt_start_pos = None
chunk_gt_end_pos = None
chunk_is_impossible = True
ret.append(DocChunk(start=doc_ptr,
length=chunk_length,
is_impossible=chunk_is_impossible,
gt_start_pos=chunk_gt_start_pos,
gt_end_pos=chunk_gt_end_pos))
if doc_ptr + chunk_length == len(self.context_token_ids):
break
doc_ptr += doc_stride
return ret | Get a sequence of chunks for the squad feature.
In reality, the document will be too long for the NLP model, and we will split it into
multiple chunks.
For example, consider the following
Doc: the man went to the store and bought a gallon of milk
We may divide it into four chunks:
Chunk 1: the man went to the
Chunk 2: to the store and bought
Chunk 3: and bought a gallon of
Chunk 4: gallon of milk
We will use our network to extract features for each chunk,
and do the aggregation afterwards. Here, one token may appear in multiple chunks.
We can vote the output based on some heuristic score functions.
Parameters
----------
doc_stride
The stride used when the context is too large and is split across several features.
max_chunk_length
The maximum size of the chunk
Returns
-------
ret
List of DocChunk objects
| get_chunks | python | dmlc/gluon-nlp | scripts/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/squad_utils.py | Apache-2.0 |
def get_squad_examples_from_json(json_file: str, is_training: bool) -> List[SquadExample]:
"""
Read the whole entry of raw json file and convert it to examples.
Parameters
----------
json_file
The path to the json file
is_training
Whether or not training
Returns
-------
ret
List of SquadExample objects
"""
with open(json_file, 'r') as f:
data = json.load(f)
examples = []
for entry in tqdm(data['data']):
title = entry['title']
for paragraph in entry['paragraphs']:
context_text = paragraph['context']
for qa in paragraph['qas']:
qas_id = qa['id']
query_text = qa['question']
start_position = None
end_position = None
answer_text = None
answers = None
if "is_impossible" in qa:
is_impossible = qa["is_impossible"]
else:
is_impossible = False
if not is_impossible:
if is_training:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position = answer["answer_start"]
end_position = start_position + len(answer_text)
if context_text[start_position:end_position] != answer_text:
warnings.warn(
'Mismatch start/end and answer_text, start/end={}/{},'
' answer text={}. qas={}'
.format(start_position, end_position, answer_text, qas_id))
else:
answers = qa["answers"]
example = SquadExample(
qas_id=qas_id,
query_text=query_text,
context_text=context_text,
answer_text=answer_text,
start_position=start_position,
end_position=end_position,
title=title,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
return examples |
Read the whole entry of raw json file and convert it to examples.
Parameters
----------
json_file
The path to the json file
is_training
Whether or not training
Returns
-------
ret
List of SquadExample objects
| get_squad_examples_from_json | python | dmlc/gluon-nlp | scripts/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/squad_utils.py | Apache-2.0 |
def get_squad_examples(data_dir, segment='train', version='1.1'):
"""
Parameters
----------
data_dir
The directory of the data
segment
The segment
version
Version of the SQuAD
Returns
-------
examples
A list of SquadExampls objects
"""
if version == '1.1':
train_path = os.path.join(data_dir, 'train-v1.1.json')
dev_path = os.path.join(data_dir, 'dev-v1.1.json')
elif version == '2.0':
train_path = os.path.join(data_dir, 'train-v2.0.json')
dev_path = os.path.join(data_dir, 'dev-v2.0.json')
else:
raise NotImplementedError
if segment == 'train':
examples = get_squad_examples_from_json(train_path, is_training=True)
elif segment == 'dev':
examples = get_squad_examples_from_json(dev_path, is_training=False)
else:
raise NotImplementedError
return examples |
Parameters
----------
data_dir
The directory of the data
segment
The segment
version
Version of the SQuAD
Returns
-------
examples
A list of SquadExampls objects
| get_squad_examples | python | dmlc/gluon-nlp | scripts/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/squad_utils.py | Apache-2.0 |
def convert_squad_example_to_feature(example: SquadExample,
tokenizer: BaseTokenizerWithVocab,
is_training: bool):
"""
Convert a SquadExample object to a SquadFeature object with the designated tokenizer.
There are accually few examples can not be converted properly with token level tokenization,
due to the ground-truth are given by the start position and the answer text, and some examples
are annotated with wrong labels. Thus, attribute unreliable_span and token_answer_mismatch are
used to indicate these senarios.
Parameters
----------
example
A single squad example
tokenizer
The trained tokenizer
is_training
Whether to deal with the training case
Returns
-------
feature
A SquadFeature
"""
context_text = example.context_text
answer_text = example.answer_text
query_text = example.query_text
context_token_ids, offsets = tokenizer.encode_with_offsets(context_text, int)
query_token_ids = tokenizer.encode(query_text, int)
gt_answer_text = answer_text
gt_span_start_pos, gt_span_end_pos = None, None
token_answer_mismatch = False
unreliable_span = False
np_offsets = np.array(offsets)
if is_training and not example.is_impossible:
assert example.start_position >= 0 and example.end_position >= 0
# We convert the character-level offsets to token-level offsets
# Also, if the answer after tokenization + detokenization is not the same as the original
# answer, we try to localize the answer text and do a rematch
candidates = [(example.start_position, example.end_position)]
all_possible_start_pos = {example.start_position}
find_all_candidates = False
lower_idx, upper_idx = None, None
first_lower_idx, first_upper_idx = None, None
while len(candidates) > 0:
start_position, end_position = candidates.pop()
# Match the token offsets
token_start_ends = match_tokens_with_char_spans(np_offsets,
np.array([[start_position,
end_position]]))
lower_idx = int(token_start_ends[0][0])
upper_idx = int(token_start_ends[0][1])
if not find_all_candidates:
first_lower_idx = lower_idx
first_upper_idx = upper_idx
# The new start pos and end_pos are the lower_idx and upper_idx
sliced_answer = context_text[offsets[lower_idx][0]:offsets[upper_idx][1]]
norm_sliced_answer = normalize_answer(sliced_answer)
norm_answer = normalize_answer(answer_text)
if norm_sliced_answer != norm_answer:
if not find_all_candidates:
# Try to find a better start+end of the answer and insert all positions to the
# candidates
find_all_candidates = True
pos = context_text.find(answer_text)
while pos != -1:
if pos not in all_possible_start_pos:
all_possible_start_pos.add(pos)
candidates.append((pos, pos + len(answer_text)))
pos = context_text.find(answer_text, pos + 1)
elif len(candidates) == 0:
token_answer_mismatch = True
lower_idx = first_lower_idx
upper_idx = first_upper_idx
if int_float_regex.match(answer_text):
# Find an integer/float and the sample won't be reliable.
# The span-based approach is not suitable for this scenario and we will
# set the unreliable span flag.
unreliable_span = True
else:
break
gt_span_start_pos = lower_idx
gt_span_end_pos = upper_idx
feature = SquadFeature(qas_id=example.qas_id,
query_token_ids=query_token_ids,
context_text=context_text,
context_token_ids=context_token_ids,
context_token_offsets=offsets,
is_impossible=example.is_impossible,
token_answer_mismatch=token_answer_mismatch,
unreliable_span=unreliable_span,
gt_answer_text=gt_answer_text,
gt_start_pos=gt_span_start_pos,
gt_end_pos=gt_span_end_pos)
return feature |
Convert a SquadExample object to a SquadFeature object with the designated tokenizer.
There are accually few examples can not be converted properly with token level tokenization,
due to the ground-truth are given by the start position and the answer text, and some examples
are annotated with wrong labels. Thus, attribute unreliable_span and token_answer_mismatch are
used to indicate these senarios.
Parameters
----------
example
A single squad example
tokenizer
The trained tokenizer
is_training
Whether to deal with the training case
Returns
-------
feature
A SquadFeature
| convert_squad_example_to_feature | python | dmlc/gluon-nlp | scripts/question_answering/squad_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/scripts/question_answering/squad_utils.py | Apache-2.0 |
def gen_self_attn_mask(data,
valid_length=None,
dtype: type = np.float32,
attn_type: str = 'full',
layout: str = 'NT'):
"""Generate the mask used for the encoder, i.e, self-attention.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data with two samples:
.. code-block:: none
data =
[['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP' ],
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']]
valid_length =
[8, 6]
- attn_type = 'causal'
Each token will attend to itself + the tokens before.
It will not attend to tokens in the future.
For our example, the mask of the first sample is
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
.. code-block:: none
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 0, 0, 0, 0, 0, 0, 0
'the': 1, 1, 0, 0, 0, 0, 0, 0
'force': 1, 1, 1, 0, 0, 0, 0, 0
'be': 1, 1, 1, 1, 0, 0, 0, 0
'with': 1, 1, 1, 1, 1, 0, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
- attn_type = 'full'
Each token will attend to both the tokens before and in the future
For our example, the mask of the first sample is
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1, 1, 1, 1, 1
'can': 1, 1, 1, 1, 1, 1, 1, 1
'now': 1, 1, 1, 1, 1, 1, 1, 1
'use': 1, 1, 1, 1, 1, 1, 1, 1
'numpy': 1, 1, 1, 1, 1, 1, 1, 1
'in': 1, 1, 1, 1, 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 1
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
.. code-block:: none
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 1, 1, 1, 1, 1, 0, 0
'the': 1, 1, 1, 1, 1, 1, 0, 0
'force': 1, 1, 1, 1, 1, 1, 0, 0
'be': 1, 1, 1, 1, 1, 1, 0, 0
'with': 1, 1, 1, 1, 1, 1, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
Parameters
----------
data
The data.
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
dtype
Data type of the mask
attn_type
Can be 'full' or 'causal'
layout
The layout of the data
Returns
-------
mask
Shape (batch_size, seq_length, seq_length)
"""
if layout == 'NT':
batch_axis, time_axis = 0, 1
elif layout == 'TN':
batch_axis, time_axis = 1, 0
else:
raise NotImplementedError('Unsupported layout={}'.format(layout))
if attn_type == 'full':
if valid_length is not None:
valid_length = valid_length.astype(dtype)
steps = npx.arange_like(data, axis=time_axis) # (seq_length,)
mask1 = (npx.reshape(steps, (1, 1, -1))
< npx.reshape(valid_length, (-2, 1, 1)))
mask2 = (npx.reshape(steps, (1, -1, 1))
< npx.reshape(valid_length, (-2, 1, 1)))
mask = mask1 * mask2
else:
# TODO(sxjscience) optimize
seq_len_ones = np.ones_like(npx.arange_like(data, axis=time_axis)) # (seq_length,)
batch_ones = np.ones_like(npx.arange_like(data, axis=batch_axis)) # (batch_size,)
mask = batch_ones.reshape((-1, 1, 1)) * seq_len_ones.reshape((1, -1, 1))\
* seq_len_ones.reshape((1, 1, -1))
elif attn_type == 'causal':
steps = npx.arange_like(data, axis=time_axis)
# mask: (seq_length, seq_length)
# batch_mask: (batch_size, seq_length)
mask = (np.expand_dims(steps, axis=0) <= np.expand_dims(steps, axis=1)).astype(dtype)
if valid_length is not None:
valid_length = valid_length.astype(dtype)
batch_mask = (np.expand_dims(steps, axis=0) < np.expand_dims(valid_length, axis=-1)).astype(dtype)
mask = mask * np.expand_dims(batch_mask, axis=-1)
else:
batch_ones = np.ones_like(npx.arange_like(data, axis=batch_axis),
dtype=dtype) # (batch_size,)
mask = mask * batch_ones.reshape((-1, 1, 1))
else:
raise NotImplementedError
return mask.astype(np.bool) | Generate the mask used for the encoder, i.e, self-attention.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data with two samples:
.. code-block:: none
data =
[['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP' ],
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']]
valid_length =
[8, 6]
- attn_type = 'causal'
Each token will attend to itself + the tokens before.
It will not attend to tokens in the future.
For our example, the mask of the first sample is
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
.. code-block:: none
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 0, 0, 0, 0, 0, 0, 0
'the': 1, 1, 0, 0, 0, 0, 0, 0
'force': 1, 1, 1, 0, 0, 0, 0, 0
'be': 1, 1, 1, 1, 0, 0, 0, 0
'with': 1, 1, 1, 1, 1, 0, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
- attn_type = 'full'
Each token will attend to both the tokens before and in the future
For our example, the mask of the first sample is
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1, 1, 1, 1, 1
'can': 1, 1, 1, 1, 1, 1, 1, 1
'now': 1, 1, 1, 1, 1, 1, 1, 1
'use': 1, 1, 1, 1, 1, 1, 1, 1
'numpy': 1, 1, 1, 1, 1, 1, 1, 1
'in': 1, 1, 1, 1, 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 1
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
.. code-block:: none
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 1, 1, 1, 1, 1, 0, 0
'the': 1, 1, 1, 1, 1, 1, 0, 0
'force': 1, 1, 1, 1, 1, 1, 0, 0
'be': 1, 1, 1, 1, 1, 1, 0, 0
'with': 1, 1, 1, 1, 1, 1, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
Parameters
----------
data
The data.
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
dtype
Data type of the mask
attn_type
Can be 'full' or 'causal'
layout
The layout of the data
Returns
-------
mask
Shape (batch_size, seq_length, seq_length)
| gen_self_attn_mask | python | dmlc/gluon-nlp | src/gluonnlp/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py | Apache-2.0 |
def gen_mem_attn_mask(mem, mem_valid_length, data, data_valid_length=None,
dtype=np.float32, layout: str = 'NT'):
"""Generate the mask used for the decoder. All query slots are attended to the memory slots.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data + mem with a batch of two samples:
.. code-block:: none
mem = [['I', 'can', 'now', 'use'],
['May', 'the', 'force', '<PAD>']]
mem_valid_length =
[4, 3]
data =
[['numpy', 'in', 'Gluon@@', 'NLP' ],
['be', 'with', 'you', '<PAD>']]
data_valid_length =
[4, 3]
For our example, the mask of the first sample is
.. code-block:: none
['I', 'can', 'now', 'use']
'numpy': 1, 1, 1, 1
'in': 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1
'NLP': 1, 1, 1, 1
The mask of the second sample is
.. code-block:: none
['be', 'with', 'you', '<PAD>']
'May': 1, 1, 1, 0
'the': 1, 1, 1, 0
'force': 1, 1, 1, 0
'<PAD>': 0, 0, 0, 0
Parameters
----------
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length :
Shape (batch_size,)
data
- layout = 'NT'
Shape (batch_size, query_length, C_data)
- layout = 'TN'
Shape (query_length, batch_size, C_data)
data_valid_length :
Shape (batch_size,)
dtype
Data type of the mask
layout
Layout of the data + mem tensor
Returns
-------
mask :
Shape (batch_size, query_length, mem_length)
"""
if layout == 'NT':
batch_axis, time_axis = 0, 1
elif layout == 'TN':
batch_axis, time_axis = 1, 0
else:
raise NotImplementedError('Unsupported layout={}'.format(layout))
mem_valid_length = mem_valid_length.astype(dtype)
mem_steps = npx.arange_like(mem, axis=time_axis) # (mem_length,)
data_steps = npx.arange_like(data, axis=time_axis) # (query_length,)
mem_mask = (npx.reshape(mem_steps, (1, 1, -1))
< npx.reshape(mem_valid_length, (-2, 1, 1))).astype(dtype) # (B, 1, mem_length)
if data_valid_length is not None:
data_valid_length = data_valid_length.astype(dtype)
data_mask = (npx.reshape(data_steps, (1, -1, 1))
< npx.reshape(data_valid_length, (-2, 1, 1))).astype(dtype) # (B, query_length, 1)
mask = mem_mask * data_mask
else:
query_length_ones = np.ones_like(data_steps)
mask = query_length_ones.reshape((1, -1, 1)) * mem_mask
return mask.astype(np.bool) | Generate the mask used for the decoder. All query slots are attended to the memory slots.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data + mem with a batch of two samples:
.. code-block:: none
mem = [['I', 'can', 'now', 'use'],
['May', 'the', 'force', '<PAD>']]
mem_valid_length =
[4, 3]
data =
[['numpy', 'in', 'Gluon@@', 'NLP' ],
['be', 'with', 'you', '<PAD>']]
data_valid_length =
[4, 3]
For our example, the mask of the first sample is
.. code-block:: none
['I', 'can', 'now', 'use']
'numpy': 1, 1, 1, 1
'in': 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1
'NLP': 1, 1, 1, 1
The mask of the second sample is
.. code-block:: none
['be', 'with', 'you', '<PAD>']
'May': 1, 1, 1, 0
'the': 1, 1, 1, 0
'force': 1, 1, 1, 0
'<PAD>': 0, 0, 0, 0
Parameters
----------
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length :
Shape (batch_size,)
data
- layout = 'NT'
Shape (batch_size, query_length, C_data)
- layout = 'TN'
Shape (query_length, batch_size, C_data)
data_valid_length :
Shape (batch_size,)
dtype
Data type of the mask
layout
Layout of the data + mem tensor
Returns
-------
mask :
Shape (batch_size, query_length, mem_length)
| gen_mem_attn_mask | python | dmlc/gluon-nlp | src/gluonnlp/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py | Apache-2.0 |
def masked_softmax(att_score, mask, axis: int = -1, temperature=None):
"""Ignore the masked elements when calculating the softmax. The mask can be broadcastable.
Parameters
----------
att_score : Symbol or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
1 --> The element is not masked
0 --> The element is masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
temperature
The temperature. It scales down the scores before applying the softmax.
Returns
-------
att_weights : Symborl or NDArray
Shape (..., length, ...)
"""
if mask is None:
return npx.softmax(att_score, axis=axis, temperature=temperature)
else:
return npx.masked_softmax(att_score, mask=mask.astype(np.bool),
axis=axis, temperature=temperature) | Ignore the masked elements when calculating the softmax. The mask can be broadcastable.
Parameters
----------
att_score : Symbol or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
1 --> The element is not masked
0 --> The element is masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
temperature
The temperature. It scales down the scores before applying the softmax.
Returns
-------
att_weights : Symborl or NDArray
Shape (..., length, ...)
| masked_softmax | python | dmlc/gluon-nlp | src/gluonnlp/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py | Apache-2.0 |
def masked_logsoftmax(att_score, mask, axis: int = -1):
"""Ignore the masked elements when calculating the softmax. The mask can be broadcastable.
Parameters
----------
att_score : Symborl or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
mask = 1 --> not masked
mask = 0 --> masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
logits : Symborl or NDArray
Shape (..., length, ...)
The masked values will be all zero
"""
if mask is None:
return npx.log_softmax(att_score, axis=axis)
else:
mask = mask.astype(np.bool)
return np.where(mask, npx.masked_log_softmax(att_score, mask, axis=axis), -np.inf) | Ignore the masked elements when calculating the softmax. The mask can be broadcastable.
Parameters
----------
att_score : Symborl or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
mask = 1 --> not masked
mask = 0 --> masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
logits : Symborl or NDArray
Shape (..., length, ...)
The masked values will be all zero
| masked_logsoftmax | python | dmlc/gluon-nlp | src/gluonnlp/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py | Apache-2.0 |
def multi_head_dot_attn(query, key, value,
mask=None,
edge_scores=None,
dropout: float = 0.0,
scaled: bool = True, normalized: bool = False,
eps: float = 1E-6, query_head_units: Optional[int] = None,
layout: str = 'NKT',
use_einsum: bool = False):
"""Multihead dot product attention between the query, key, value.
scaled is False, normalized is False:
D(h_q, h_k) = <h_q, h_k>
scaled is True, normalized is False:
D(h_q, h_k) = <h_q, h_k> / sqrt(dim_q)
scaled is False, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||>
scaled is True, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> / sqrt(dim_q)
If edge_scores is provided, we will calcualte the attention as
scores = D(h_q, h_k) + EdgeScore_{q, k}
Parameters
----------
query
Query. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, query_length, key_dim)
- layout is 'NTK'
Shape (batch_size, query_length, num_heads, key_dim)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads, key_dim)
key
Key. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, key_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, key_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, key_dim)
value
Value. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, value_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, value_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, value_dim)
mask
Mask between query and memory. Shape (batch_size, query_length, mem_length)
edge_scores
The edge attention score. Shape can be any shape that is broadcastable to
(batch_size, num_heads, query_length, mem_length)
dropout
Dropout rate
scaled
Whether to divide the attention weights by the sqrt of the query dimension.
This is first proposed in "[NIPS2017] Attention is all you need."::
.. code-block:: none
score = <h_q, h_k> / sqrt(dim_q)
normalized
If turned on, the cosine distance is used, i.e::
.. code-block:: none
score = <h_q / ||h_q||, h_k / ||h_k||>
eps
The epsilon value used in L2 normalization
query_head_units
The units of each query head. If it's empty, we will estimate it via the
shape_array of the query.
layout
This stands for the layout of the attention cell. The shape of the input/output will depend
on the layout. Currently, we support 'NKT', 'NTK' and 'TNK' in which
'N' means the batch_size, 'K' means the head, and 'T' means the length dimension.
use_einsum
Whether to use einsum for the computation
Returns
-------
context_vec
- layout is 'NKT' or 'NTK'
Shape (batch_size, query_length, num_heads * value_units)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads * value_units)
additional_info
scores:
Shape (batch_size, num_head, query_length, mem_length)
attn_weight:
Shape (batch_size, num_head, query_length, mem_length)
"""
# TODO(sxjscience) Profile layout
if normalized:
query = l2_normalize(query, axis=-1, eps=eps)
key = l2_normalize(key, axis=-1, eps=eps)
if scaled:
if query_head_units is None:
raise NotImplementedError('You will need to specify query_head_units!')
else:
scale = math.sqrt(query_head_units)
else:
scale = None
if layout == 'NKT':
# 1. Expand the dimension of the mask:
# (B, L_query, L_mem) --> (B, 1, L_query, L_mem)
if mask is not None:
mask = np.expand_dims(mask, axis=1).astype(np.bool)
# 2. Calculate the attention weights
# Score: (B, N, L_query, C_Q) X (B, N, L_mem, C_Q) --> (B, N, L_query, L_mem)
scores = npx.batch_dot(query, key, transpose_b=True)
if edge_scores is not None:
scores = scores + edge_scores
attn_weights = masked_softmax(scores, mask, axis=-1, temperature=scale)
attn_weights = npx.dropout(attn_weights, p=dropout)
# 3. Calculate the context vector
# (B, N, L_query, L_mem) X (B, N, L_mem, C_V) --> (B, L_query, N * C_V)
if use_einsum:
context_vec = np.einsum('bnij,bnjc->binc', attn_weights, value)
else:
context_vec = npx.batch_dot(attn_weights, value).transpose((0, 2, 1, 3))
context_vec = npx.reshape(context_vec, (-2, -2, -1))
elif layout == 'NTK':
# 1. Expand the dimension of the mask:
# (B, L_query, L_mem) --> (B, 1, L_query, L_mem)
if mask is not None:
mask = np.expand_dims(mask, axis=1).astype(np.bool)
# 2. Calculate the attention weights
# Score: (B, L_query, N, C_Q) X (B, L_mem, N, C_Q) --> (B, N, L_query, L_mem)
if use_einsum:
scores = np.einsum('binc,bjnc->bnij', query, key)
else:
scores = npx.batch_dot(np.swapaxes(query, 1, 2), np.swapaxes(key, 1, 2),
transpose_b=True)
if edge_scores is not None:
scores = scores + edge_scores
attn_weights = masked_softmax(scores, mask, axis=-1, temperature=scale)
attn_weights = npx.dropout(attn_weights, p=dropout)
# 3. Calculate the context vector
# (B, N, L_query, L_mem) X (B, L_mem, N, C_V) --> (B, L_query, N * C_V)
if use_einsum:
context_vec = np.einsum('bnij,bjnc->binc', attn_weights, value)
else:
context_vec = npx.batch_dot(attn_weights,
np.swapaxes(value, 1, 2)).transpose((0, 2, 1, 3))
context_vec = npx.reshape(context_vec, (-2, -2, -1))
elif layout == 'TNK':
# 1. Expand the dimension of the mask:
# (B, L_query, L_mem) --> (B, 1, L_query, L_mem)
if mask is not None:
mask = np.expand_dims(mask, axis=1).astype(np.bool)
# 2. Calculate the attention weights
# Score: (L_query, B, N, C_Q) X (L_mem, B, N, C_Q) --> (B, N, L_query, L_mem)
# This layout structure can be implemented very efficiently because B, N are consecutive
# to each other. To have a clear picture of what's happening, we may consider the
# (i, j)th element of the output
# out[i, j, :, :] = query[:, i, j, :] X key[:, i, j, :].T, which is just one GEMM call
# We can thus implement the whole kernel via a single call of batched GEMM with stride.
if use_einsum:
scores = np.einsum('ibnc,jbnc->bnij', query, key)
else:
scores = npx.batch_dot(query.transpose((1, 2, 0, 3)),
key.transpose((1, 2, 3, 0)))
if edge_scores is not None:
scores = scores + edge_scores
attn_weights = masked_softmax(scores, mask, axis=-1, temperature=scale)
attn_weights = npx.dropout(attn_weights, p=dropout)
# 3. Calculate the context vector
# (B, N, L_query, L_mem) X (L_mem, B, N, C_V) --> (L_query, B, N * C_V)
# Again, we can implement it via a single call to batched GEMM with stride.
# Shape (B, N, L_query, C_V)
if use_einsum:
context_vec = np.einsum('bnij,jbnc->ibnc', attn_weights, value)
else:
context_vec = npx.batch_dot(attn_weights,
value.transpose((1, 2, 0, 3))).transpose((2, 0, 1, 3))
context_vec = npx.reshape(context_vec, (-2, -2, -1))
else:
raise NotImplementedError('layout="{}" is not supported! '
'We only support layout = "NKT", "NTK", and "TNK".'
.format(layout))
return context_vec, [scores, attn_weights] | Multihead dot product attention between the query, key, value.
scaled is False, normalized is False:
D(h_q, h_k) = <h_q, h_k>
scaled is True, normalized is False:
D(h_q, h_k) = <h_q, h_k> / sqrt(dim_q)
scaled is False, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||>
scaled is True, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> / sqrt(dim_q)
If edge_scores is provided, we will calcualte the attention as
scores = D(h_q, h_k) + EdgeScore_{q, k}
Parameters
----------
query
Query. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, query_length, key_dim)
- layout is 'NTK'
Shape (batch_size, query_length, num_heads, key_dim)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads, key_dim)
key
Key. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, key_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, key_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, key_dim)
value
Value. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, value_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, value_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, value_dim)
mask
Mask between query and memory. Shape (batch_size, query_length, mem_length)
edge_scores
The edge attention score. Shape can be any shape that is broadcastable to
(batch_size, num_heads, query_length, mem_length)
dropout
Dropout rate
scaled
Whether to divide the attention weights by the sqrt of the query dimension.
This is first proposed in "[NIPS2017] Attention is all you need."::
.. code-block:: none
score = <h_q, h_k> / sqrt(dim_q)
normalized
If turned on, the cosine distance is used, i.e::
.. code-block:: none
score = <h_q / ||h_q||, h_k / ||h_k||>
eps
The epsilon value used in L2 normalization
query_head_units
The units of each query head. If it's empty, we will estimate it via the
shape_array of the query.
layout
This stands for the layout of the attention cell. The shape of the input/output will depend
on the layout. Currently, we support 'NKT', 'NTK' and 'TNK' in which
'N' means the batch_size, 'K' means the head, and 'T' means the length dimension.
use_einsum
Whether to use einsum for the computation
Returns
-------
context_vec
- layout is 'NKT' or 'NTK'
Shape (batch_size, query_length, num_heads * value_units)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads * value_units)
additional_info
scores:
Shape (batch_size, num_head, query_length, mem_length)
attn_weight:
Shape (batch_size, num_head, query_length, mem_length)
| multi_head_dot_attn | python | dmlc/gluon-nlp | src/gluonnlp/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py | Apache-2.0 |
def gen_rel_position(data, past_data=None, dtype=np.int32, layout='NT'):
"""Create a matrix of relative position for RelAttentionScoreCell.
The relative position is defined as the index difference: `mem_i` - `query_j`.
Note, though, that the implementation here makes sense in self-attention's setting,
but not in cross-attention's. Hence, both `mem_i` and `query_j` are time indices from
`data` (or, in incremental decoding's case, the concatenated sequence from the current
stepwise `data` and the previous steps `past_data`).
Parameters
----------
data
The data. Under incremental decoding, seq_length = 1.
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
past_data
This is only used under incremental decoding. Stacked data from previous steps.
dtype
Data type of the mask
layout
Layout of the data + past_data
Returns
-------
relative_position :
Shape (query_length, mem_length) where query_length = mem_length = seq_length
"""
time_axis = 1 if layout == 'NT' else 0
if past_data is None:
position = npx.arange_like(data, axis=time_axis)
else:
# for incremental decoding only, where past data is of the shape:
# NT(NTK): (B, L_seq, num_heads, n_kv) -> (B, L_seq, inner_dim)
# TN(TNK): (L_seq, B, num_heads, n_kv) -> (L_seq, B, inner_dim)
past_data = npx.reshape(past_data, (-2, -2, -5))
position = npx.arange_like(
np.concatenate([past_data, data], axis=time_axis),
axis=time_axis
)
query_position = np.expand_dims(position, axis=-1)
mem_position = np.expand_dims(position, axis=0)
relative_position = mem_position - query_position
return relative_position.astype(np.int32) # shape (L_seq, L_seq) | Create a matrix of relative position for RelAttentionScoreCell.
The relative position is defined as the index difference: `mem_i` - `query_j`.
Note, though, that the implementation here makes sense in self-attention's setting,
but not in cross-attention's. Hence, both `mem_i` and `query_j` are time indices from
`data` (or, in incremental decoding's case, the concatenated sequence from the current
stepwise `data` and the previous steps `past_data`).
Parameters
----------
data
The data. Under incremental decoding, seq_length = 1.
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
past_data
This is only used under incremental decoding. Stacked data from previous steps.
dtype
Data type of the mask
layout
Layout of the data + past_data
Returns
-------
relative_position :
Shape (query_length, mem_length) where query_length = mem_length = seq_length
| gen_rel_position | python | dmlc/gluon-nlp | src/gluonnlp/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py | Apache-2.0 |
def __init__(self, query_units,
num_heads,
pos_embed_units: Optional[int] = None,
max_distance=None,
bidirectional=False,
num_buckets=None,
method='transformer_xl',
dropout: float = 0.0,
dtype='float32',
layout='NTK',
use_einsum=False,
embed_initializer=None):
"""
Parameters
----------
query_units
num_heads
pos_embed_units
max_distance
bidirectional
num_buckets
method
dropout
dtype
layout
use_einsum
"""
super().__init__()
self._dropout = dropout
self._method = method
self._query_units = query_units
self._num_heads = num_heads
self._bidirectional = bidirectional
self._num_buckets = num_buckets
assert query_units % num_heads == 0, 'The units must be divisible by the number of heads.'
self._head_query_units = query_units // num_heads
self._max_distance = max_distance
self._pos_embed_units = pos_embed_units
self._dtype = dtype
self._use_einsum = use_einsum
self._layout = layout
if self._layout not in ['NKT', 'NTK', 'TNK']:
raise ValueError('layout="{}" is not supported'.format(self._layout))
if method == 'transformer_xl':
if pos_embed_units is None:
pos_embed_units = self._num_heads * self._head_query_units
self._rel_pos_embed = SinusoidalPositionalEmbedding(units=pos_embed_units,
dtype=self._dtype)
self._rel_proj = nn.Dense(units=query_units,
in_units=pos_embed_units,
flatten=False,
use_bias=False,
dtype=self._dtype)
self._dropout_layer = nn.Dropout(dropout)
elif method == 'shaw':
assert self._max_distance is not None, 'Must set max_distance when method="shaw".'
if self._bidirectional:
vocab_size = self._max_distance * 2 + 1
else:
vocab_size = self._max_distance + 1
self._rel_pos_embed = LearnedPositionalEmbedding(
units=self._num_heads * self._head_query_units,
max_length=vocab_size,
weight_initializer=mx.init.Xavier(rnd_type="gaussian",
factor_type="in",
magnitude=1),
mode='wrap' if self._bidirectional else 'raise',
dtype=self._dtype)
elif method == 't5':
if self._num_buckets is None:
self._num_buckets = 32
if self._max_distance is None:
self._max_distance = 128
self._rel_pos_embed = BucketPositionalEmbedding(
units=num_heads,
num_buckets=self._num_buckets,
max_distance=self._max_distance,
bidirectional=self._bidirectional,
embed_initializer=embed_initializer,
dtype=self._dtype)
else:
raise NotImplementedError('method="{}" is currently not supported!'.format(method)) |
Parameters
----------
query_units
num_heads
pos_embed_units
max_distance
bidirectional
num_buckets
method
dropout
dtype
layout
use_einsum
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py | Apache-2.0 |
def forward(self, rel_positions, query=None):
"""Forward function
Parameters
----------
rel_positions
The relative shifts. Shape (query_length, mem_length).
Each element represents the shift between the :math:`i-th` element of query and
the :math:`j-th` element of memory.
query
The query for computing the relative scores. The shape depends on the layout.
If we use T5 attention, the query will not be used.
Returns
-------
rel_scores
The relative attention scores
Can have shape (batch_size, num_heads, query_length, mem_length)
or (num_heads, query_length, mem_length)
"""
if self._method == 'transformer_xl' or self._method == 'shaw':
assert query is not None, 'Must specify query if method={}'.format(self._method)
if self._bidirectional:
if self._max_distance is not None:
rel_positions = np.clip(rel_positions,
a_min=-self._max_distance, a_max=self._max_distance)
else:
if self._max_distance is not None:
rel_positions = np.clip(rel_positions,
a_min=0, a_max=self._max_distance)
# uniq_rel.shape = (#uniq,), rev_index.shape = (L_q, L_m)
uniq_rel, rev_index = np.unique(rel_positions, return_inverse=True)
uniq_rel_pos_embed = self._rel_pos_embed(uniq_rel)
if self._method == 'transformer_xl':
uniq_rel_pos_embed = self._rel_proj(self._dropout_layer(uniq_rel_pos_embed))
# Shape (#uniq, K, C_q)
uniq_rel_pos_embed = npx.reshape(uniq_rel_pos_embed,
(-2, self._num_heads, self._head_query_units))
# Calculate the dot-product between query and the relative positional embeddings.
# After the calculation, rel_score.shape = (L_q, #uniq, N, K)
if self._layout == 'NKT':
# query_for_rel: (N, K, L_q, C_q)
if self._use_einsum:
rel_score = np.einsum('bnid,jnd->ijbn', query, uniq_rel_pos_embed)
else:
rel_score = np.transpose(
np.matmul(query,
np.transpose(uniq_rel_pos_embed, (1, 2, 0))),
(2, 3, 0, 1)
)
elif self._layout == 'NTK':
# query_for_rel: (N, L_q, K, C_q)
if self._use_einsum:
rel_score = np.einsum('bind,jnd->ijbn', query, uniq_rel_pos_embed)
else:
rel_score = np.transpose(
np.matmul(np.swapaxes(query, 1, 2),
np.transpose(uniq_rel_pos_embed, (1, 2, 0))),
(2, 3, 0, 1)
)
elif self._layout == 'TNK':
# query_for_rel: (L_q, N, K, C_q)
if self._use_einsum:
rel_score = np.einsum('ibnd,jnd->ijbn', query, uniq_rel_pos_embed)
else:
rel_score = np.transpose(
np.matmul(np.transpose(query, (1, 2, 0, 3)),
np.transpose(uniq_rel_pos_embed, (1, 2, 0))),
(2, 3, 0, 1)
)
else:
raise NotImplementedError
# We use gather_nd to select the elements
# TODO(sxjscience) Use advanced indexing once available
rev_index = npx.reshape_like(rev_index, rel_positions).astype(np.int32)
query_idx = np.expand_dims(npx.arange_like(rel_positions, axis=0).astype(np.int32),
axis=-1) + np.zeros_like(rev_index)
rel_score = npx.gather_nd(rel_score, np.stack([query_idx, rev_index]))
rel_score = np.transpose(rel_score, (2, 3, 0, 1))
elif self._method == 't5':
# shape is (K, L_q, L_m)
rel_score = self._rel_pos_embed(rel_positions).transpose((2, 0, 1))
else:
raise NotImplementedError
return rel_score | Forward function
Parameters
----------
rel_positions
The relative shifts. Shape (query_length, mem_length).
Each element represents the shift between the :math:`i-th` element of query and
the :math:`j-th` element of memory.
query
The query for computing the relative scores. The shape depends on the layout.
If we use T5 attention, the query will not be used.
Returns
-------
rel_scores
The relative attention scores
Can have shape (batch_size, num_heads, query_length, mem_length)
or (num_heads, query_length, mem_length)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/attention_cell.py | Apache-2.0 |
def get_home_dir():
"""Get home directory for storing datasets/models/pre-trained word embeddings"""
_home_dir = os.environ.get('GLUONNLP_HOME', os.path.join('~', '.gluonnlp'))
# expand ~ to actual path
_home_dir = os.path.expanduser(_home_dir)
return _home_dir | Get home directory for storing datasets/models/pre-trained word embeddings | get_home_dir | python | dmlc/gluon-nlp | src/gluonnlp/base.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py | Apache-2.0 |
def get_data_home_dir():
"""Get home directory for storing the datasets"""
home_dir = get_home_dir()
return os.path.join(home_dir, 'datasets') | Get home directory for storing the datasets | get_data_home_dir | python | dmlc/gluon-nlp | src/gluonnlp/base.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py | Apache-2.0 |
def get_model_zoo_home_dir():
"""Get the local directory for storing pretrained models"""
home_dir = get_home_dir()
return os.path.join(home_dir, 'models') | Get the local directory for storing pretrained models | get_model_zoo_home_dir | python | dmlc/gluon-nlp | src/gluonnlp/base.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py | Apache-2.0 |
def get_model_zoo_checksum_dir():
"""Get the directory that stores the checksums of the artifacts in the model zoo """
curr_dir = os.path.realpath(os.path.dirname(os.path.realpath(__file__)))
check_sum_dir = os.path.join(curr_dir, 'models', 'model_zoo_checksums')
return check_sum_dir | Get the directory that stores the checksums of the artifacts in the model zoo | get_model_zoo_checksum_dir | python | dmlc/gluon-nlp | src/gluonnlp/base.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py | Apache-2.0 |
def get_repo_url():
"""Return the base URL for Gluon dataset and model repository """
default_repo = 's3://gluonnlp-numpy-data'
repo_url = os.environ.get('GLUONNLP_REPO_URL', default_repo)
if repo_url[-1] != '/':
repo_url = repo_url + '/'
return repo_url | Return the base URL for Gluon dataset and model repository | get_repo_url | python | dmlc/gluon-nlp | src/gluonnlp/base.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py | Apache-2.0 |
def get_repo_model_zoo_url():
"""Return the base URL for GluonNLP Model Zoo"""
repo_url = get_repo_url()
model_zoo_url = repo_url + 'models/'
return model_zoo_url | Return the base URL for GluonNLP Model Zoo | get_repo_model_zoo_url | python | dmlc/gluon-nlp | src/gluonnlp/base.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/base.py | Apache-2.0 |
def get_norm_layer(normalization: str = 'layer_norm',
axis: int = -1,
epsilon: float = 1e-5,
in_channels: int = 0, **kwargs):
"""
Get the normalization layer based on the type
Parameters
----------
normalization
The type of the layer normalization from ['layer_norm', 'no_norm', 'batch_norm']
axis
The axis to normalize the
epsilon
The epsilon of the normalization layer
in_channels
Input channel
Returns
-------
norm_layer
The layer normalization layer
"""
if isinstance(normalization, str):
if normalization == 'layer_norm':
norm_layer = nn.LayerNorm(axis=axis, epsilon=epsilon, in_channels=in_channels,
**kwargs)
elif normalization == 'no_norm':
norm_layer = NoNorm(in_channels=in_channels, **kwargs)
elif normalization == 'rms_norm':
norm_layer = RMSNorm(in_channels=in_channels, **kwargs)
elif normalization == 'identity':
norm_layer = IdentityActivation()
elif normalization == 'batch_norm':
norm_layer = nn.BatchNorm(axis=axis, epsilon=epsilon, in_channels=in_channels, **kwargs)
else:
raise NotImplementedError('normalization={} is not supported'.format(normalization))
return norm_layer
else:
raise NotImplementedError('The type of normalization must be str') |
Get the normalization layer based on the type
Parameters
----------
normalization
The type of the layer normalization from ['layer_norm', 'no_norm', 'batch_norm']
axis
The axis to normalize the
epsilon
The epsilon of the normalization layer
in_channels
Input channel
Returns
-------
norm_layer
The layer normalization layer
| get_norm_layer | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def _fmt_and_check_cutoffs(cutoffs, vocab_size):
"""Parse and get the cutoffs used in adaptive embedding + adaptive softmax
Parameters
----------
cutoffs
The cutoffs of the
vocab_size
Size of the vocabulary
Returns
-------
cutoffs
The parsed cutoffs, will be [0, c0, c1, ..., c_{k-1}, V]
If the original cutoffs is empty or is None, return None
"""
# Sanity checks
if cutoffs is None:
return None
if isinstance(cutoffs, int):
cutoffs = [cutoffs]
else:
cutoffs = list(cutoffs)
if len(cutoffs) == 0:
return None
if cutoffs != sorted(cutoffs):
raise ValueError('cutoffs must be a sorted list of cutoff values. '
'Got {}, but expected {}'.format(cutoffs, sorted(cutoffs)))
if len(set(cutoffs)) != len(cutoffs):
raise ValueError('cutoffs cannot contain duplicates! cutoffs={}'.format(cutoffs))
if not cutoffs:
raise ValueError('cutoffs must not be empty. Got {}'.format(cutoffs))
if cutoffs[0] <= 0:
raise ValueError('The first cutoff value ({}) must be greater 0.'.format(cutoffs[0]))
if cutoffs[-1] >= vocab_size:
raise ValueError(
'The last cutoff value ({}) must be smaller than vocab_size ({}).'.format(
cutoffs[-1], vocab_size))
return cutoffs | Parse and get the cutoffs used in adaptive embedding + adaptive softmax
Parameters
----------
cutoffs
The cutoffs of the
vocab_size
Size of the vocabulary
Returns
-------
cutoffs
The parsed cutoffs, will be [0, c0, c1, ..., c_{k-1}, V]
If the original cutoffs is empty or is None, return None
| _fmt_and_check_cutoffs | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def get_activation(act: Optional[Union[str, HybridBlock]]) -> HybridBlock:
"""Get the activation based on the string
Parameters
----------
act
The activation
Returns
-------
ret
The activation layer
"""
if act is None:
return lambda x: x
if isinstance(act, str):
if act == 'leaky':
# TODO(sxjscience) Add regex matching here to parse `leaky(0.1)`
return nn.LeakyReLU(0.1)
elif act == 'identity':
return IdentityActivation()
elif act == 'elu':
return ELU()
elif act == 'gelu':
return GELU(mode='erf')
elif act == 'gelu(tanh)':
return GELU(mode='tanh')
elif act == 'gelu(sigmoid)':
return GELU(mode='sigmoid')
elif act in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']:
return nn.Activation(act)
else:
raise NotImplementedError('act={} is not supported'.format(act))
else:
return act | Get the activation based on the string
Parameters
----------
act
The activation
Returns
-------
ret
The activation layer
| get_activation | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def __init__(self, units: int, dtype: Union[str, type] = 'float32'):
"""Use a geometric sequence of timescales.
Parameters
----------
units
The number of units for positional embedding
dtype
The dtype of the inner positional embeddings
"""
super().__init__()
def _init_sinusoidal_base(units):
half_units = units // 2
val = np.log(10000) / (half_units - 1)
val = np.exp(np.arange(half_units, dtype=np.float32) * -val)
return val
self._units = units
self._dtype = dtype
sinusoidal_base = _init_sinusoidal_base(units)
self.base_mult = Constant(sinusoidal_base) | Use a geometric sequence of timescales.
Parameters
----------
units
The number of units for positional embedding
dtype
The dtype of the inner positional embeddings
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def forward(self, positions):
"""
Parameters
----------
positions : NDArray
Shape (..., )
Returns
-------
ret :
Shape (..., units)
"""
emb = np.expand_dims(positions.astype(self._dtype), axis=-1) * self.base_mult.data()
sin_emb = np.sin(emb)
cos_emb = np.cos(emb)
if self._units % 2 == 0:
return np.concatenate([sin_emb, cos_emb], axis=-1)
else:
return np.concatenate(
[sin_emb, cos_emb, np.expand_dims(np.zeros_like(positions).astype(self._dtype),
axis=-1)], axis=-1) |
Parameters
----------
positions : NDArray
Shape (..., )
Returns
-------
ret :
Shape (..., units)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def __init__(self,
units: int = 512,
hidden_size: int = 2048,
use_bias=True,
activation_dropout: float = 0.0,
dropout: float = 0.1,
weight_initializer=None,
bias_initializer='zeros',
activation='relu',
use_gated_activation=False,
normalization: str = 'layer_norm',
layer_norm_eps: float = 1E-5,
pre_norm: bool = False,
dtype='float32',
**kwargs):
"""
Parameters
----------
units
hidden_size
activation_dropout
dropout
weight_initializer
bias_initializer
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
"""
super().__init__()
self._dtype = dtype
self._pre_norm = pre_norm
self._use_gated_activation = use_gated_activation
self._kwargs = OrderedDict([
('units', units),
('hidden_size', hidden_size),
('activation_dropout', activation_dropout),
('activation', activation),
('dropout', dropout),
('normalization', normalization),
('layer_norm_eps', layer_norm_eps),
('pre_norm', pre_norm),
('dtype', self._dtype)
])
self.dropout_layer = nn.Dropout(dropout)
self.activation_dropout_layer = nn.Dropout(activation_dropout)
self.ffn_1 = nn.Dense(units=hidden_size,
in_units=units,
flatten=False,
use_bias=use_bias,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
if use_gated_activation:
self.gated_ffn_1 = nn.Dense(units=hidden_size,
in_units=units,
flatten=False,
use_bias=use_bias,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.activation = get_activation(activation)
self.ffn_2 = nn.Dense(units=units,
in_units=hidden_size,
flatten=False,
use_bias=use_bias,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
# TODO(sxjscience) We may need to set the dtype flag in LayerNorm, need to double check
self.layer_norm = get_norm_layer(in_channels=units,
normalization=normalization,
epsilon=layer_norm_eps,
**kwargs) |
Parameters
----------
units
hidden_size
activation_dropout
dropout
weight_initializer
bias_initializer
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def forward(self, data):
"""
Parameters
----------
F
data :
Shape (B, seq_length, C_in)
Returns
-------
out :
Shape (B, seq_length, C_out)
"""
residual = data
if self._pre_norm:
data = self.layer_norm(data)
if self._use_gated_activation:
gated_out = self.activation(self.gated_ffn_1(data))
out = gated_out * self.ffn_1(data)
else:
out = self.activation(self.ffn_1(data))
out = self.activation_dropout_layer(out)
out = self.ffn_2(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.layer_norm(out)
return out |
Parameters
----------
F
data :
Shape (B, seq_length, C_in)
Returns
-------
out :
Shape (B, seq_length, C_out)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def __init__(self, vocab_size: int,
embed_size: int,
units: int,
cutoffs: Optional[Union[int, List]] = None,
div_val: float = 1.0,
dtype='float32',
scaled=True,
embedding_initializer: InitializerType = None,
weight_initializer: InitializerType = None):
"""
Parameters
----------
vocab_size
The size of the vocabulary
embed_size
The base size of the embedding vectors. The embedding size of each cluster will be
[embed_size / div_val**0, embed_size / div_val**1, embed_size / div_val**2, ...]
units
The number of units after the mapping
cutoffs
The cutoffs to slice the vocab to multiple clusters. It should be a sorted list. Each
value should be between 1 --> vocab_size - 1.
div_val
The base denominator for computing the size of the embedding vector in each cluster.
dtype
The data type of layer
scaled
Whether to scale the embedding by sqrt(units)
embedding_initializer
Initializer of the embedding vectors
weight_initializer
Initializer of projection layers
bias_initializer
Initializer of the bias
"""
super().__init__()
cutoffs = _fmt_and_check_cutoffs(cutoffs, vocab_size)
if cutoffs is None:
assert div_val == 1.0
self._dtype = dtype
self._kwargs = OrderedDict([
('cutoffs', cutoffs),
('vocab_size', vocab_size),
('embed_size', embed_size),
('units', units),
('div_val', div_val),
('dtype', dtype),
('scaled', scaled)
])
self._vocab_size = vocab_size
self._cutoffs = cutoffs
self._units = units
self._embed_size = embed_size
self._div_val = div_val
self._scaled = scaled
if self._scaled:
self._emb_scale = units**0.5
if div_val == 1.0:
self.embed0_weight = Parameter('embed0_weight',
shape=(vocab_size, embed_size),
init=embedding_initializer,
allow_deferred_init=True)
if units != embed_size:
self.inter_proj0_weight = Parameter('inter_proj0_weight',
shape=(embed_size, units),
init=weight_initializer,
allow_deferred_init=True)
else:
self.proj_layers = None
else:
self.proj_layers = nn.HybridSequential()
for i, (l_idx, r_idx) in enumerate(zip([0] + cutoffs, cutoffs + [vocab_size])):
inner_embed_size = int(embed_size / div_val**i)
if inner_embed_size == 0:
raise ValueError('div_val = {} is too large for the layer. Currently, the '
'cutoffs are {} and the embed_size is {}. Using the '
'div_val = {} will cause some clusters to have '
'embed_size=0.'.format(div_val, cutoffs, embed_size,
div_val))
setattr(
self, 'embed{}_weight'.format(i),
Parameter('embed{}_weight'.format(i),
shape=(r_idx - l_idx, inner_embed_size),
init=embedding_initializer,
allow_deferred_init=True))
setattr(self, 'inter_proj{}_weight'.format(i),
Parameter('inter_proj{}_weight'.format(i),
shape=(inner_embed_size, units),
init=weight_initializer,
allow_deferred_init=True)) |
Parameters
----------
vocab_size
The size of the vocabulary
embed_size
The base size of the embedding vectors. The embedding size of each cluster will be
[embed_size / div_val**0, embed_size / div_val**1, embed_size / div_val**2, ...]
units
The number of units after the mapping
cutoffs
The cutoffs to slice the vocab to multiple clusters. It should be a sorted list. Each
value should be between 1 --> vocab_size - 1.
div_val
The base denominator for computing the size of the embedding vector in each cluster.
dtype
The data type of layer
scaled
Whether to scale the embedding by sqrt(units)
embedding_initializer
Initializer of the embedding vectors
weight_initializer
Initializer of projection layers
bias_initializer
Initializer of the bias
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def forward(self, inp): # pylint: disable=arguments-differ
"""
Parameters
----------
inp
Shape (...,)
Returns
-------
out
Shape (..., units)
"""
if self._div_val == 1.0:
emb = np.take(getattr(self, 'embed0_weight').data(), inp, axis=0)
if self._units != self._embed_size:
emb = np.dot(emb, getattr(self, 'inter_proj0_weight').data())
else:
emb = None
for i, (l_idx, r_idx) in enumerate(zip([0] + self._cutoffs,
self._cutoffs + [self._vocab_size])):
emb_i = np.take(getattr(self, 'embed{}_weight'.format(i)).data(),
inp - l_idx, axis=0,
mode='clip')
emb_i = np.dot(emb_i, getattr(self, 'inter_proj{}_weight'.format(i)).data())
if emb is None:
emb = emb_i
else:
emb = np.where(np.expand_dims((inp >= l_idx) * (inp < r_idx), axis=-1),
emb_i, emb)
if self._scaled:
emb = emb * self._emb_scale
return emb |
Parameters
----------
inp
Shape (...,)
Returns
-------
out
Shape (..., units)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def __init__(self, vocab_size: int, embed_size: int, in_units: int,
cutoffs: Optional[Union[int, List]] = None,
div_val: float = 1.0,
dtype='float32',
use_bias=True,
weight_initializer: InitializerType = None,
bias_initializer: InitializerType = None):
"""
Parameters
----------
vocab_size
Size of the vocabulary
embed_size
Base embedding size. The hidden will be first projected to
embed_size and then project to vocab_size
in_units
The number of input units
cutoffs
The cutoff values
div_val
The base denominator for computing the size of the embedding vector in each cluster.
dtype
Data type
use_bias
Whether to use bias when computing the scores for the tokens
weight_initializer
bias_initializer
"""
super().__init__()
cutoffs = _fmt_and_check_cutoffs(cutoffs, vocab_size)
if cutoffs is None:
assert div_val == 1.0
self._vocab_size = vocab_size
self._embed_size = embed_size
self._in_units = in_units
self._cutoffs = cutoffs
self._div_val = div_val
if cutoffs is not None:
self._num_tail_clusters = len(self._cutoffs)
self._dtype = dtype
self._kwargs = OrderedDict([
('cutoffs', cutoffs),
('vocab_size', vocab_size),
('embed_size', embed_size),
('in_units', in_units),
('div_val', div_val),
('dtype', dtype),
('use_bias', use_bias)
])
if cutoffs is not None:
self.tail_cluster_score_proj = nn.Dense(units=self._num_tail_clusters,
in_units=embed_size,
flatten=False,
use_bias=use_bias,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
self.inter_proj_l = nn.HybridSequential()
self.out_proj_l = nn.HybridSequential()
if div_val == 1.0:
if in_units != embed_size:
self.inter_proj_l.add(nn.Dense(in_units=in_units,
units=embed_size,
flatten=False,
use_bias=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer))
self.out_proj_l.add(nn.Dense(in_units=embed_size,
units=vocab_size,
flatten=False,
use_bias=use_bias,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer))
else:
for i, (l_idx, r_idx) in enumerate(zip([0] + self._cutoffs,
self._cutoffs + [vocab_size])):
ele_embed_size = int(embed_size / (div_val ** i))
self.inter_proj_l.add(nn.Dense(in_units=in_units,
units=ele_embed_size,
flatten=False,
use_bias=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer))
self.out_proj_l.add(nn.Dense(in_units=ele_embed_size,
units=r_idx - l_idx,
flatten=False,
use_bias=use_bias,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)) |
Parameters
----------
vocab_size
Size of the vocabulary
embed_size
Base embedding size. The hidden will be first projected to
embed_size and then project to vocab_size
in_units
The number of input units
cutoffs
The cutoff values
div_val
The base denominator for computing the size of the embedding vector in each cluster.
dtype
Data type
use_bias
Whether to use bias when computing the scores for the tokens
weight_initializer
bias_initializer
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def get_logits(self, hidden):
"""Get all the logits.
Parameters
----------
hidden
The hidden representation/ Shape (..., in_units)
Returns
-------
logits
Shape (..., :math:`|V|`)
"""
if self._cutoffs is None:
if self._in_units != self._embed_size:
hidden = self.inter_proj_l[0](hidden)
logits = self.out_proj_l[0](hidden)
return logits
else:
all_logits = []
if self._div_val == 1.0:
if self._in_units == self._embed_size:
all_scores = self.out_proj_l[0](hidden)
tail_cluster_scores = self.tail_cluster_score_proj(hidden)
else:
inter_hidden = self.inter_proj_l[0](hidden)
all_scores = self.out_proj_l[0](inter_hidden)
tail_cluster_scores = self.tail_cluster_score_proj(inter_hidden)
all_scores_l = np.split(all_scores, self._cutoffs, axis=-1)
head_scores = all_scores_l[0]
else:
inter_hidden = self.inter_proj_l[0](hidden)
head_scores = self.out_proj_l[0](inter_hidden)
tail_cluster_scores = self.tail_cluster_score_proj(inter_hidden)
head_tail_cluster_logits = \
npx.log_softmax(np.concatenate([head_scores, tail_cluster_scores],
axis=-1), axis=-1)
head_logits, tail_cluster_logits = \
np.split(head_tail_cluster_logits, [self._cutoffs[0]], axis=-1)
tail_cluster_logits = np.split(tail_cluster_logits, self._num_tail_clusters, axis=-1)
all_logits.append(head_logits)
for i in range(1, len(self._cutoffs) + 1):
if self._div_val == 1.0:
ele_scores = all_scores_l[i]
else:
ele_scores = self.out_proj_l[i](self.inter_proj_l[i](hidden))
ele_logits = npx.log_softmax(ele_scores, axis=-1)
ele_logits = tail_cluster_logits[-i] + ele_logits
all_logits.append(ele_logits)
return np.concatenate(all_logits, axis=-1) | Get all the logits.
Parameters
----------
hidden
The hidden representation/ Shape (..., in_units)
Returns
-------
logits
Shape (..., :math:`|V|`)
| get_logits | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def forward(self, hidden, target):
"""
Parameters
----------
hidden
The hidden representation
Shape (..., in_units)
target
The target representation
Shape (...,)
Returns
-------
sel_logits
The log probability that each hidden has when label == target
"""
# TODO(sxjscience) The computation here can be greatly accelerated! Due to the
# missing feature of index_update, we are not able to do this here.
logits = self.get_logits(hidden)
sel_logits = npx.pick(logits, target, axis=-1)
return sel_logits |
Parameters
----------
hidden
The hidden representation
Shape (..., in_units)
target
The target representation
Shape (...,)
Returns
-------
sel_logits
The log probability that each hidden has when label == target
| forward | python | dmlc/gluon-nlp | src/gluonnlp/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/layers.py | Apache-2.0 |
def forward(self, pred, label):
"""
Parameters
----------
pred :
The predictions of the network. Shape (..., V)
label :
The labels. Shape (..., )
Returns
-------
loss :
Shape (..., )
"""
if not self._from_logits:
pred = npx.log_softmax(pred, axis=-1)
log_likelihood = npx.pick(pred, label, axis=-1)
all_scores = pred.sum(axis=-1)
loss = - (1 - self._alpha) * log_likelihood\
- self._alpha / float(self._num_labels) * all_scores
return loss |
Parameters
----------
pred :
The predictions of the network. Shape (..., V)
label :
The labels. Shape (..., )
Returns
-------
loss :
Shape (..., )
| forward | python | dmlc/gluon-nlp | src/gluonnlp/loss.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/loss.py | Apache-2.0 |
def select_vectors_by_position(data, positions):
"""Select each batch with the given positions.
Once advanced indexing can be hybridized, we can revise the implementation.
out[i, j, ...] = data[i, positions[i, j], ...]
Parameters
----------
data
Input tensor of contextualized token embeddings
Shape (batch_size, seq_length, ...)
positions
Input tensor of the positions.
Shape (batch_size, num_sel_positions).
For each sample in the batch, the values in this tensor must not exceed
the length of the sequence.
Returns
-------
out
The selection result.
Shape (batch_size, num_sel_positions, ...)
"""
# Here, we use gather_nd to select the output from data:
# Need to compute
# out[i, j, :] = in[i, masked_position[i, j], :]
# Thus, construct a indices with shape [2, batch_size, num_masked_position], where
# indices[0, i, j] = i
# indices[1, i, j] = masked_position[i, j]
# Then, out = gather_nd(in, indices)
positions = positions.astype(np.int32)
# batch_idx.shape = (batch_size, 1) as [[0], [1], [2], ...]
batch_idx = np.expand_dims(npx.arange_like(positions, axis=0),
axis=1).astype(np.int32)
batch_idx = batch_idx + np.zeros_like(positions)
indices = np.stack([batch_idx, positions])
# TODO(sxjscience) We can revise the implementation to advanced indexing
# once the bug in MXNet is solved:
# https://github.com/apache/incubator-mxnet/issues/18919
out = npx.gather_nd(data, indices)
return out | Select each batch with the given positions.
Once advanced indexing can be hybridized, we can revise the implementation.
out[i, j, ...] = data[i, positions[i, j], ...]
Parameters
----------
data
Input tensor of contextualized token embeddings
Shape (batch_size, seq_length, ...)
positions
Input tensor of the positions.
Shape (batch_size, num_sel_positions).
For each sample in the batch, the values in this tensor must not exceed
the length of the sequence.
Returns
-------
out
The selection result.
Shape (batch_size, num_sel_positions, ...)
| select_vectors_by_position | python | dmlc/gluon-nlp | src/gluonnlp/op.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py | Apache-2.0 |
def add_vectors_by_position(data, increment, positions):
"""Scatter each batch with the given positions.
data[i, positions[i, j], ...] += increment[i, j, ...]
Parameters
----------
data
Input tensor of the array to be updated.
Shape (batch_size, seq_length, ...)
increment
Input tensor of token ids
Shape (batch_size, num_disp_position, ...)
positions
Input tensor of the positions.
Shape (batch_size, num_disp_position).
For each sample in the batch, the values in this tensor must not exceed
the length of the sequence.
Returns
-------
out
The updated result.
Shape (batch_size, seq_length, ...)
"""
# Here, we use index_add to disperse the output from data:
# Need to compute
# out[i, masked_position[i, j], :] = in[i, j, :]
# Thus, construct an indices with shape [2, batch_size * num_masked_position], where
# indices[0, i * num_masked_position + j] = i
# indices[1, i * num_masked_position + j] = masked_position[i, j]
# And convert data to the shape of the (batch_size * num_masked_position, )
# Then, out = npx.index_add(data, indices, increment)
positions = positions.astype(np.int32)
# batch_idx.shape = (batch_size, 1) as [[0], [1], [2], ...]
batch_idx = np.expand_dims(npx.arange_like(positions, axis=0),
axis=1).astype(np.int32)
batch_idx = batch_idx + np.zeros_like(positions)
indices = np.stack([batch_idx.reshape((-1,)), positions.reshape((-1,))])
out = npx.index_add(data, indices, npx.reshape(increment, (-5, -4)))
return out | Scatter each batch with the given positions.
data[i, positions[i, j], ...] += increment[i, j, ...]
Parameters
----------
data
Input tensor of the array to be updated.
Shape (batch_size, seq_length, ...)
increment
Input tensor of token ids
Shape (batch_size, num_disp_position, ...)
positions
Input tensor of the positions.
Shape (batch_size, num_disp_position).
For each sample in the batch, the values in this tensor must not exceed
the length of the sequence.
Returns
-------
out
The updated result.
Shape (batch_size, seq_length, ...)
| add_vectors_by_position | python | dmlc/gluon-nlp | src/gluonnlp/op.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py | Apache-2.0 |
def update_vectors_by_position(data, val, positions):
"""
Update each batch with the given positions. Considered as a reversed process of
"select_vectors_by_position", this is an operator similar to "add_vectors_by_position"
that updates the results instead of adding.
data[i, positions[i, j], :] = val[i, j, :]
Parameters
----------
data
Input tensor of the array to be updated.
Shape (batch_size, seq_length)
val
Input tensor of token ids
Shape (batch_size, num_disp_position)
positions
Input tensor of the positions.
Shape (batch_size, num_disp_position).
For each sample in the batch, the values in this tensor must not exceed
the length of the sequence.
Returns
-------
out
The updated result.
Shape (batch_size, seq_length)
"""
positions = positions.astype(np.int32)
# batch_idx.shape = (batch_size, 1) as [[0], [1], [2], ...]
batch_idx = np.expand_dims(npx.arange_like(positions, axis=0),
axis=1).astype(np.int32)
batch_idx = batch_idx + np.zeros_like(positions)
indices = np.stack([batch_idx.reshape((-1,)), positions.reshape((-1,))])
out = npx.index_update(data, indices, npx.reshape(val, (-5, -4)))
return out |
Update each batch with the given positions. Considered as a reversed process of
"select_vectors_by_position", this is an operator similar to "add_vectors_by_position"
that updates the results instead of adding.
data[i, positions[i, j], :] = val[i, j, :]
Parameters
----------
data
Input tensor of the array to be updated.
Shape (batch_size, seq_length)
val
Input tensor of token ids
Shape (batch_size, num_disp_position)
positions
Input tensor of the positions.
Shape (batch_size, num_disp_position).
For each sample in the batch, the values in this tensor must not exceed
the length of the sequence.
Returns
-------
out
The updated result.
Shape (batch_size, seq_length)
| update_vectors_by_position | python | dmlc/gluon-nlp | src/gluonnlp/op.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py | Apache-2.0 |
def gumbel_softmax(logits, temperature: float = 1.0, eps: float = 1E-10,
hard=True, use_np_gumbel: bool = True):
r"""Perform the gumbel-softmax trick to generate differentiable one-hot vectors from the input
logits.
Here, the gumbel distribution is
Gumbel(\alpha) = -log (-log U) + \log \alpha, in which U is the uniform(0, 1) distribution.
A nice property of Gumbel is:
\argmax({Gumbel(\alpha_i)}) \sim multinomial(\alpha_i)
The Gumbel-Softmax trick is to use the softmax + straight-through estimator to produce
one-hot vectors that represent the sampling result.
References:
1. https://en.wikipedia.org/wiki/Gumbel_distribution
2. [ICLR2017] Categorical Reparameterization with Gumbel-Softmax
Parameters
----------
logits
Logits. Shape (..., V)
temperature
The temperature that controls the
eps
The eps for stability of gradient
hard
Whether to use the straight-through estimator to produce one-hot vectors.
use_np_gumbel
Whether to use the random.gumble operator
Returns
-------
ret
The returned output. Shape (..., V)
"""
# TODO(sxjscience) Investigate the impact of random.gumbel:
# Actually, random.gumble has no eps and may have problem in calculating the gradient.
if use_np_gumbel:
gumbels = np.random.gumbel(np.zeros_like(logits))
else:
u = np.random.uniform(np.zeros_like(logits), 1)
gumbels = -np.log(-np.log(u + eps) + eps)
y = npx.softmax((gumbels + logits) / temperature, axis=-1)
if hard:
y_hard = np.max(y, axis=-1, keepdims=True) == y
y_hard = npx.stop_gradient(y_hard - y) + y
return y_hard
else:
return y | Perform the gumbel-softmax trick to generate differentiable one-hot vectors from the input
logits.
Here, the gumbel distribution is
Gumbel(\alpha) = -log (-log U) + \log \alpha, in which U is the uniform(0, 1) distribution.
A nice property of Gumbel is:
\argmax({Gumbel(\alpha_i)}) \sim multinomial(\alpha_i)
The Gumbel-Softmax trick is to use the softmax + straight-through estimator to produce
one-hot vectors that represent the sampling result.
References:
1. https://en.wikipedia.org/wiki/Gumbel_distribution
2. [ICLR2017] Categorical Reparameterization with Gumbel-Softmax
Parameters
----------
logits
Logits. Shape (..., V)
temperature
The temperature that controls the
eps
The eps for stability of gradient
hard
Whether to use the straight-through estimator to produce one-hot vectors.
use_np_gumbel
Whether to use the random.gumble operator
Returns
-------
ret
The returned output. Shape (..., V)
| gumbel_softmax | python | dmlc/gluon-nlp | src/gluonnlp/op.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py | Apache-2.0 |
def trunc_gumbel(logits, truncation):
"""Sample from the TruncGumbel distribution.
The cumulative density function (CDF) of the Truncated Gumbel distribution is defined as
TruncGumbel(\alpha, truncation) \prop max(Gumbel(\alpha), truncation)
To sample from the distribution, we can use the CDF inversion technique.
References:
1. [NIPS2014] A* Sampling, https://papers.nips.cc/paper/5449-a-sampling.pdf
2. https://cmaddis.github.io/gumbel-machinery
Parameters
----------
logits
The logits. Shape (...,)
truncation
The truncation. Shape (...,)
Returns
-------
samples
Samples from the TruncGumbel(logits, truncation)
Shape (...,)
"""
gumbels = np.random.gumbel(np.zeros_like(logits)) + logits
return -np.log(np.exp(-gumbels) + np.exp(-truncation)) | Sample from the TruncGumbel distribution.
The cumulative density function (CDF) of the Truncated Gumbel distribution is defined as
TruncGumbel(lpha, truncation) \prop max(Gumbel(lpha), truncation)
To sample from the distribution, we can use the CDF inversion technique.
References:
1. [NIPS2014] A* Sampling, https://papers.nips.cc/paper/5449-a-sampling.pdf
2. https://cmaddis.github.io/gumbel-machinery
Parameters
----------
logits
The logits. Shape (...,)
truncation
The truncation. Shape (...,)
Returns
-------
samples
Samples from the TruncGumbel(logits, truncation)
Shape (...,)
| trunc_gumbel | python | dmlc/gluon-nlp | src/gluonnlp/op.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py | Apache-2.0 |
def relative_position_bucket(relative_position,
bidirectional: bool = True,
num_buckets: int = 32,
max_distance: int = 128):
"""Map the relative position to buckets. The implementation is consistent with that
in [mesh_tensorflow](https://github.com/tensorflow/mesh/blob/c59988047e49b4d2af05603e3170724cdbadc467/mesh_tensorflow/transformer/transformer_layers.py#L595-L637)
where relative position is defined as `mem_i - query_j`. Thus, a positive value indicates
that the memory slot is in a later timestamp than the query slot.
After handling the bidirectional case (see below), the implementation uses the first half
of buckets to store exact differences and the second half to store the differences after
a logrithmic transformation.
Parameters
----------
relative_position
Shape (...,)
bidirectional
Whether we are dealing with bidirectional attention.
If it's bidirectional, positive shifts are mapped to [0, num_buckets // 2),
and negative shifts are mapped to [num_buckets // 2, num_buckets).
num_buckets
The number of buckets.
max_distance
Maximum distance. Positions that fall outside of 'max_distance' will be trimmed.
Returns
-------
buckets
Shape (...,).
It has the same shape as the `relative_position`. It will have int32 type.
"""
ret = 0
relative_position = -relative_position
if bidirectional:
assert num_buckets % 2 == 0, 'When bidirectional is True, the number of buckets must be ' \
'divisible by 2.'
num_buckets //= 2
ret = ret + (relative_position < 0).astype(np.int32) * num_buckets
relative_position = np.abs(relative_position)
else:
# Clip all the negative values to 0
relative_position = np.clip(relative_position, a_min=0, a_max=None)
# Now, the relative_position is in the range [0, inf)
# Half of the buckets deal with the exact increments,
# i.e., 0, 1, 2, ..., max_exact - 1, where max_exact = num_buckets // 2
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to
# max_distance
val_if_large = max_exact + (
np.log(relative_position.astype(np.float32) / max_exact)
/ math.log(max_distance / max_exact) * (num_buckets - max_exact)).astype(np.int32)
val_if_large = np.minimum(val_if_large, num_buckets - 1)
ret = ret + np.where(is_small, relative_position, val_if_large)
return ret | Map the relative position to buckets. The implementation is consistent with that
in [mesh_tensorflow](https://github.com/tensorflow/mesh/blob/c59988047e49b4d2af05603e3170724cdbadc467/mesh_tensorflow/transformer/transformer_layers.py#L595-L637)
where relative position is defined as `mem_i - query_j`. Thus, a positive value indicates
that the memory slot is in a later timestamp than the query slot.
After handling the bidirectional case (see below), the implementation uses the first half
of buckets to store exact differences and the second half to store the differences after
a logrithmic transformation.
Parameters
----------
relative_position
Shape (...,)
bidirectional
Whether we are dealing with bidirectional attention.
If it's bidirectional, positive shifts are mapped to [0, num_buckets // 2),
and negative shifts are mapped to [num_buckets // 2, num_buckets).
num_buckets
The number of buckets.
max_distance
Maximum distance. Positions that fall outside of 'max_distance' will be trimmed.
Returns
-------
buckets
Shape (...,).
It has the same shape as the `relative_position`. It will have int32 type.
| relative_position_bucket | python | dmlc/gluon-nlp | src/gluonnlp/op.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/op.py | Apache-2.0 |
def _expand_to_beam_size(data, beam_size, batch_size, state_batch_axis=None):
"""Tile all the states to have batch_size * beam_size on the batch axis.
Parameters
----------
data : A single mx.np.ndarray or nested container with mx.np.ndarray
Each mx.np.ndarray should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.
beam_size : int
Beam size
batch_size : int
Batch size
state_batch_axis : Nested structure of dictionary, default None.
Descriptors for states, usually from decoder's ``state_batch_axis()``.
When None, this method assumes that the batch axis is the first dimension.
Returns
-------
new_states : Object that contains mx.np.ndarray
Each mx.np.ndarray should have shape batch_size * beam_size on the batch axis.
"""
if isinstance(data, (list, tuple)):
if state_batch_axis is not None:
# TODO(sxjscience) Better Exception Handling
return [_expand_to_beam_size(d, beam_size, batch_size, batch_axis)
for d, batch_axis in zip(data, state_batch_axis)]
else:
return [_expand_to_beam_size(d, beam_size, batch_size, None) for d in data]
elif isinstance(data, dict):
if state_batch_axis is not None:
return {k: _expand_to_beam_size(v, beam_size, batch_size, state_batch_axis[k])
for k, v in data.items()}
else:
return {k: _expand_to_beam_size(v, beam_size, batch_size, None)
for k, v in data.items()}
elif isinstance(data, mx.np.ndarray):
if state_batch_axis is None:
batch_axis = 0
else:
batch_axis = state_batch_axis
if data.shape[batch_axis] != batch_size:
raise ValueError('The batch size of all the inner elements in states must be '
'{}, Found shape={}, inferred batch axis={}'.format(batch_size, data.shape, batch_axis))
new_shape = list(data.shape)
new_shape[batch_axis] = batch_size * beam_size
new_shape = tuple(new_shape)
bcast_new_shape = new_shape[:batch_axis] + (batch_size, beam_size) + new_shape[(batch_axis + 1):]
return mx.np.expand_dims(data, batch_axis + 1).broadcast_to(bcast_new_shape).reshape(new_shape)
elif data is None:
return None
else:
raise NotImplementedError | Tile all the states to have batch_size * beam_size on the batch axis.
Parameters
----------
data : A single mx.np.ndarray or nested container with mx.np.ndarray
Each mx.np.ndarray should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.
beam_size : int
Beam size
batch_size : int
Batch size
state_batch_axis : Nested structure of dictionary, default None.
Descriptors for states, usually from decoder's ``state_batch_axis()``.
When None, this method assumes that the batch axis is the first dimension.
Returns
-------
new_states : Object that contains mx.np.ndarray
Each mx.np.ndarray should have shape batch_size * beam_size on the batch axis.
| _expand_to_beam_size | python | dmlc/gluon-nlp | src/gluonnlp/sequence_sampler.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py | Apache-2.0 |
def _choose_states(states, indices, state_batch_axis=None):
"""
Parameters
----------
states : Object contains mx.np.ndarray
indices : mx.np.ndarray
Indices of the states to take. Shape (N,).
state_batch_axis
Descriptors for states, it is generated from decoder's ``state_batch_axis``.
When None, this method assumes that the batch axis is the first dimension.
Returns
-------
new_states : Object contains mx.np.ndarray
Each mx.np.ndarray should have shape (..., N, ...).
"""
if isinstance(states, (list, tuple)):
if state_batch_axis is not None:
return [_choose_states(d, indices, b_axis)
for d, b_axis in zip(states, state_batch_axis)]
else:
return [_choose_states(d, indices, None) for d in states]
elif isinstance(states, dict):
if state_batch_axis is not None:
return {k: _choose_states(v, indices, state_batch_axis[k]) for k, v in states.items()}
else:
return {k: _choose_states(v, indices, None) for k, v in states.items()}
elif isinstance(states, mx.np.ndarray):
if state_batch_axis is None:
batch_axis = 0
else:
batch_axis = state_batch_axis
states = mx.np.take(states, indices, axis=batch_axis)
return states
else:
raise TypeError('The type of the states is not supported, type(states) = {}'.format(type(states))) |
Parameters
----------
states : Object contains mx.np.ndarray
indices : mx.np.ndarray
Indices of the states to take. Shape (N,).
state_batch_axis
Descriptors for states, it is generated from decoder's ``state_batch_axis``.
When None, this method assumes that the batch axis is the first dimension.
Returns
-------
new_states : Object contains mx.np.ndarray
Each mx.np.ndarray should have shape (..., N, ...).
| _choose_states | python | dmlc/gluon-nlp | src/gluonnlp/sequence_sampler.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py | Apache-2.0 |
def __init__(self, beam_size, vocab_size, eos_id, scorer, state_batch_axis,
stochastic=False):
"""
Parameters
----------
beam_size : int
vocab_size : int
eos_id : int
scorer : BeamSearchScorer
state_batch_axis :
stochastic: bool
prefix : None
params : None
"""
super().__init__()
self._beam_size = beam_size
self._vocab_size = vocab_size
self._eos_id = eos_id
self._scorer = scorer
self._state_batch_axis = state_batch_axis
self.stochastic = stochastic
assert eos_id is None or eos_id >= 0, 'eos_id cannot be negative! Received eos_id={}'.format(eos_id) |
Parameters
----------
beam_size : int
vocab_size : int
eos_id : int
scorer : BeamSearchScorer
state_batch_axis :
stochastic: bool
prefix : None
params : None
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/sequence_sampler.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py | Apache-2.0 |
def gumbel_with_maximum(self, phi, T, dim=-1):
"""Calculate the Gumbel with maximum.
Parameters
----------
phi : mx.np.ndarray
Shape (batch_size, beam_size, L).
T : mx.np.ndarray
The previous scores. Shape (batch_size, beam_size)
"""
g_phi = phi + mx.np.random.gumbel(mx.np.zeros_like(phi))
Z = g_phi.max(dim)
g = self.shift_gumbel_maximum(g_phi, T, dim, Z=Z)
return g | Calculate the Gumbel with maximum.
Parameters
----------
phi : mx.np.ndarray
Shape (batch_size, beam_size, L).
T : mx.np.ndarray
The previous scores. Shape (batch_size, beam_size)
| gumbel_with_maximum | python | dmlc/gluon-nlp | src/gluonnlp/sequence_sampler.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py | Apache-2.0 |
def shift_gumbel_maximum(self, g_phi, T, axis=-1, Z=None):
"""
Parameters
----------
g_phi : mx.np.ndarray
Shape (batch_size, beam_size, L).
T : mx.np.ndarray
The previous scores. Shape (batch_size, beam_size)
axis
The axis
Z
The Z value
"""
if Z is None:
Z = g_phi.max(axis=axis)
T_ = mx.npx.reshape(T, (-4, 1))
Z_ = mx.npx.reshape(Z, (-4, 1))
u = T_ - g_phi + mx.np.log1p(-mx.np.exp(g_phi - Z_) + 1e-5)
return T_ - mx.npx.relu(u) - mx.np.log1p(mx.np.exp(-mx.np.abs(u))) |
Parameters
----------
g_phi : mx.np.ndarray
Shape (batch_size, beam_size, L).
T : mx.np.ndarray
The previous scores. Shape (batch_size, beam_size)
axis
The axis
Z
The Z value
| shift_gumbel_maximum | python | dmlc/gluon-nlp | src/gluonnlp/sequence_sampler.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py | Apache-2.0 |
def forward(self, samples, valid_length, outputs, scores, step, beam_alive_mask, # pylint: disable=arguments-differ
states, batch_shift):
"""
Parameters
----------
samples : mx.np.ndarray
The current samples generated by beam search.
Shape (batch_size, beam_size, L).
valid_length : mx.np.ndarray
The current valid lengths of the samples
outputs : mx.np.ndarray
Outputs from predictor. If from_logits was set to True in scorer, then it's the
log probability of the current step. Else, it's the unnormalized outputs before
softmax or log_softmax.
Shape (batch_size * beam_size, V).
scores : mx.np.ndarray
The previous scores. Shape (batch_size, beam_size)
step : mx.np.ndarray
The current step for doing beam search. Begins from 1. Shape ()
beam_alive_mask : mx.np.ndarray
Shape (batch_size, beam_size)
states : nested structure of mx.np.ndarray
Each mx.np.ndarray should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.
batch_shift : mx.np.ndarray
Contains [0, beam_size, 2 * beam_size, ..., (batch_size - 1) * beam_size].
Shape (batch_size,)
Returns
-------
new_samples : mx.np.ndarray or an empty list
The updated samples.
When single_step is False, shape (batch_size, beam_size, L + 1)
new_valid_length : mx.np.ndarray
Valid lengths of the samples. Shape (batch_size, beam_size)
new_scores : mx.np.ndarray
Shape (batch_size, beam_size)
chosen_word_ids : mx.np.ndarray
The chosen word ids of the step. Shape (batch_size, beam_size). If it's negative,
no word will be appended to the beam.
beam_alive_mask : mx.np.ndarray
Shape (batch_size, beam_size)
new_states : nested structure of mx.np.ndarray
Inner mx.np.ndarrays have shape (batch_size * beam_size, ...)
"""
beam_size = self._beam_size
vocab_size = self._vocab_size
beam_alive_mask_bcast = mx.np.expand_dims(beam_alive_mask, axis=2)
candidate_scores = self._scorer(mx.npx.reshape(outputs, (-6, -1, beam_size, -2)),
scores, step)
if self.stochastic:
if step == 1:
candidate_scores_gumbel\
= candidate_scores[:1]\
+ mx.np.random.gumbel(mx.np.zeros_like(candidate_scores[:1]))
candidate_scores_residual = candidate_scores[1:]
candidate_scores = mx.np.concatenate((candidate_scores_gumbel,
candidate_scores_residual), axis=0)
else:
candidate_scores = self.gumbel_with_maximum(candidate_scores, scores, -1)
# Concat the candidate scores and the scores of the finished beams
# The resulting candidate score will have shape (batch_size, beam_size * |V| + beam_size)
candidate_scores = mx.np.where(beam_alive_mask_bcast,
candidate_scores,
mx.np.full_like(candidate_scores, LARGE_NEGATIVE_FLOAT))
finished_scores = mx.np.where(beam_alive_mask,
mx.np.full_like(scores,
LARGE_NEGATIVE_FLOAT),
scores)
candidate_scores = mx.np.concatenate([mx.npx.reshape(candidate_scores, (-2, -1)),
finished_scores],
axis=1)
# Get the top K scores
# new_scores and indices will have shape (batch_size, beam_size)
new_scores, indices = mx.npx.topk(candidate_scores, axis=1, k=beam_size, ret_typ='both')
indices = indices.astype(mx.np.int32)
use_prev = (indices >= (beam_size * vocab_size)).astype(mx.np.int32)
chosen_word_ids = mx.np.mod(indices, vocab_size)
beam_ids = mx.np.where(use_prev, indices - beam_size * vocab_size,
mx.np.floor(indices / vocab_size).astype(mx.np.int32))
batch_beam_indices = beam_ids + mx.np.expand_dims(batch_shift, axis=1)
chosen_word_ids = mx.np.where(use_prev, - mx.np.ones_like(indices), chosen_word_ids)
# Update the samples and vaild_length
# TODO(sxjscience) The current implementation is quite tricky
# We should wait for hybridizable advanced indexing to avoid this
selected_samples = mx.np.take(mx.npx.reshape(samples, (-5, -2)),
batch_beam_indices.reshape((-1,)), axis=0)
new_samples = mx.npx.reshape(mx.np.concatenate([selected_samples,
chosen_word_ids.reshape((-1, 1))],
axis=1),
(-6, -1, beam_size, -2))
new_valid_length = mx.np.take(valid_length.reshape((-1,)),
batch_beam_indices.reshape((-1,)),
axis=0).reshape((-1, beam_size)) + 1 - use_prev
# Update the states
new_states = _choose_states(states, batch_beam_indices.reshape((-1,)),
self._state_batch_axis)
# Update the alive mask.
beam_alive_mask = mx.np.take(beam_alive_mask.reshape((-1,)),
batch_beam_indices.reshape((-1,)), axis=0)\
.reshape((-1, beam_size))
if self._eos_id is not None:
beam_alive_mask = beam_alive_mask * (chosen_word_ids != self._eos_id).astype(mx.np.float32)
return new_samples, new_valid_length, new_scores, chosen_word_ids,\
beam_alive_mask, new_states |
Parameters
----------
samples : mx.np.ndarray
The current samples generated by beam search.
Shape (batch_size, beam_size, L).
valid_length : mx.np.ndarray
The current valid lengths of the samples
outputs : mx.np.ndarray
Outputs from predictor. If from_logits was set to True in scorer, then it's the
log probability of the current step. Else, it's the unnormalized outputs before
softmax or log_softmax.
Shape (batch_size * beam_size, V).
scores : mx.np.ndarray
The previous scores. Shape (batch_size, beam_size)
step : mx.np.ndarray
The current step for doing beam search. Begins from 1. Shape ()
beam_alive_mask : mx.np.ndarray
Shape (batch_size, beam_size)
states : nested structure of mx.np.ndarray
Each mx.np.ndarray should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.
batch_shift : mx.np.ndarray
Contains [0, beam_size, 2 * beam_size, ..., (batch_size - 1) * beam_size].
Shape (batch_size,)
Returns
-------
new_samples : mx.np.ndarray or an empty list
The updated samples.
When single_step is False, shape (batch_size, beam_size, L + 1)
new_valid_length : mx.np.ndarray
Valid lengths of the samples. Shape (batch_size, beam_size)
new_scores : mx.np.ndarray
Shape (batch_size, beam_size)
chosen_word_ids : mx.np.ndarray
The chosen word ids of the step. Shape (batch_size, beam_size). If it's negative,
no word will be appended to the beam.
beam_alive_mask : mx.np.ndarray
Shape (batch_size, beam_size)
new_states : nested structure of mx.np.ndarray
Inner mx.np.ndarrays have shape (batch_size * beam_size, ...)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/sequence_sampler.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/sequence_sampler.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.