code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def forward(self, inputs, token_types, valid_length,
masked_positions):
"""Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of the token. For example, if the inputs contain two sequences,
we will set different token types for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
if self.backbone_model.layout == 'TN':
mlm_features = select_vectors_by_position(np.swapaxes(contextual_embeddings, 0, 1),
masked_positions)
else:
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
intermediate_output = self.mlm_decoder(mlm_features)
if self.backbone_model.embed_size != self.backbone_model.units:
scores = self.embedding_table(
intermediate_output[:, :, :self.backbone_model.embed_size])
extra_scores = self.extra_table(
intermediate_output[:, :, self.backbone_model.embed_size:])
mlm_scores = scores + extra_scores
else:
mlm_scores = self.embedding_table(intermediate_output)
return contextual_embeddings, pooled_out, mlm_scores | Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
The type of the token. For example, if the inputs contain two sequences,
we will set different token types for the first sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores
Shape (batch_size, num_masked_positions, vocab_size)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/mobilebert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py | Apache-2.0 |
def __init__(self, backbone_cfg,
weight_initializer=None,
bias_initializer=None):
"""
Parameters
----------
backbone_cfg
The cfg of the backbone model
weight_initializer
bias_initializer
"""
super().__init__()
self.backbone_model = MobileBertModel.from_cfg(backbone_cfg)
if weight_initializer is None:
weight_initializer = self.backbone_model.weight_initializer
if bias_initializer is None:
bias_initializer = self.backbone_model.bias_initializer
# Construct nsp_classifier for next sentence prediction
self.nsp_classifier = nn.Dense(units=2,
in_units=self.backbone_model.units,
weight_initializer=weight_initializer,
dtype=self.backbone_model.dtype)
self.mlm_decoder = nn.HybridSequential()
# Extra non-linear layer
self.mlm_decoder.add(nn.Dense(units=self.backbone_model.units,
in_units=self.backbone_model.units,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.backbone_model.dtype))
self.mlm_decoder.add(get_activation(self.backbone_model.activation))
# use basic layer normalization for pretaining
self.mlm_decoder.add(nn.LayerNorm(epsilon=self.backbone_model.layer_norm_eps,
in_channels=self.backbone_model.units))
# only load the dense weights with a re-initialized bias
# parameters are stored in 'word_embed_bias' which is
# not used in original embedding
self.embedding_table = nn.Dense(
units=self.backbone_model.vocab_size,
in_units=self.backbone_model.embed_size,
flatten=False,
bias_initializer=bias_initializer,
dtype=self.backbone_model.dtype)
self.embedding_table.weight = self.backbone_model.word_embed.weight
if self.backbone_model.embed_size != self.backbone_model.units:
self.extra_table = nn.Dense(
units=self.backbone_model.vocab_size,
in_units=self.backbone_model.units -
self.backbone_model.embed_size,
flatten=False,
use_bias=False,
bias_initializer=bias_initializer,
dtype=self.backbone_model.dtype) |
Parameters
----------
backbone_cfg
The cfg of the backbone model
weight_initializer
bias_initializer
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/mobilebert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py | Apache-2.0 |
def forward(self, inputs, token_types, valid_length,
masked_positions):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a mobile mobile bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score
Shape (batch_size, 2)
mlm_scores
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
nsp_score = self.nsp_classifier(pooled_out)
if self.backbone_model.layout == 'NT':
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
else:
mlm_features = select_vectors_by_position(np.swapaxes(contextual_embeddings, 0, 1),
masked_positions)
intermediate_output = self.mlm_decoder(mlm_features)
if self.backbone_model.embed_size != self.backbone_model.units:
scores = self.embedding_table(
intermediate_output[:, :, :self.backbone_model.embed_size])
extra_scores = self.extra_table(
intermediate_output[:, :, self.backbone_model.embed_size:])
mlm_scores = scores + extra_scores
else:
mlm_scores = self.embedding_table(intermediate_output)
return contextual_embeddings, pooled_out, nsp_score, mlm_scores | Generate the representation given the inputs.
This is used in training or fine-tuning a mobile mobile bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score
Shape (batch_size, 2)
mlm_scores
Shape (batch_size, num_masked_positions, vocab_size)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/mobilebert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py | Apache-2.0 |
def get_pretrained_mobilebert(model_name: str = 'google_uncased_mobilebert',
root: str = get_model_zoo_home_dir(),
load_backbone: str = True,
load_mlm: str = False)\
-> Tuple[CN, HuggingFaceWordPieceTokenizer, str, str]:
"""Get the pretrained mobile bert weights
Parameters
----------
model_name
The name of the mobile bert model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceWordPieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_mobilebert())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
mlm_params_path = PRETRAINED_URL[model_name]['mlm_params']
local_paths = dict()
download_jobs = [('vocab', vocab_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for k, path in download_jobs:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_mlm and mlm_params_path is not None:
local_mlm_params_path = download(url=get_repo_model_zoo_url() + mlm_params_path,
path=os.path.join(root, mlm_params_path),
sha1_hash=FILE_STATS[mlm_params_path])
else:
local_mlm_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = HuggingFaceWordPieceTokenizer(
vocab_file=local_paths['vocab'],
unk_token='[UNK]',
pad_token='[PAD]',
cls_token='[CLS]',
sep_token='[SEP]',
mask_token='[MASK]',
lowercase=do_lower)
if cfg is None:
cfg = MobileBertModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, local_mlm_params_path | Get the pretrained mobile bert weights
Parameters
----------
model_name
The name of the mobile bert model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceWordPieceTokenizer
backbone_params_path
Path to the parameter of the backbone network
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
| get_pretrained_mobilebert | python | dmlc/gluon-nlp | src/gluonnlp/models/mobilebert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/mobilebert.py | Apache-2.0 |
def __init__(self,
vocab_size=50265,
units=768,
hidden_size=3072,
num_layers=12,
num_heads=12,
max_length=512,
hidden_dropout_prob=0.1,
attention_dropout_prob=0.1,
pos_embed_type='learned',
activation='gelu',
pooler_activation='tanh',
layer_norm_eps=1E-5,
embed_initializer=TruncNorm(stdev=0.02),
weight_initializer=TruncNorm(stdev=0.02),
bias_initializer='zeros',
dtype='float32',
use_pooler=True,
classifier_activation=False,
encoder_normalize_before=True,
output_all_encodings=False,
layout='NT',
compute_layout='auto'):
"""
Parameters
----------
vocab_size
units
hidden_size
num_layers
num_heads
max_length
hidden_dropout_prob
attention_dropout_prob
pos_embed_type
activation
pooler_activation
layer_norm_eps
embed_initializer
weight_initializer
bias_initializer
dtype
use_pooler
Whether to output the CLS hidden state
classifier_activation
Whether to use classification head
encoder_normalize_before
Whether to normalize before the
output_all_encodings
Whether to output all encodings
layout
The layout
compute_layout
The computation layout
"""
super().__init__()
self._dtype = dtype
self._output_all_encodings = output_all_encodings
self.vocab_size = vocab_size
self.units = units
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_heads = num_heads
self.max_length = max_length
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_dropout_prob = attention_dropout_prob
self.pos_embed_type = pos_embed_type
self.activation = activation
self.pooler_activation = pooler_activation
self.layer_norm_eps = layer_norm_eps
self.use_pooler = use_pooler
self.classifier_activation = classifier_activation
self.encoder_normalize_before = encoder_normalize_before
self.weight_initializer = weight_initializer
self.bias_initializer = bias_initializer
self._layout = layout
if compute_layout == 'auto' or compute_layout is None:
self._compute_layout = layout
else:
self._compute_layout = compute_layout
self.word_embed = nn.Embedding(
input_dim=self.vocab_size,
output_dim=self.units,
weight_initializer=embed_initializer,
dtype=self._dtype
)
if self.encoder_normalize_before:
self.embed_ln = nn.LayerNorm(
epsilon=self.layer_norm_eps,
in_channels=self.units)
self.embed_dropout = nn.Dropout(self.hidden_dropout_prob)
self.pos_embed = PositionalEmbedding(
units=self.units,
max_length=self.max_length,
dtype=self._dtype,
method=pos_embed_type)
self.encoder = RobertaEncoder(
units=self.units,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
num_heads=self.num_heads,
attention_dropout_prob=self.attention_dropout_prob,
hidden_dropout_prob=self.hidden_dropout_prob,
layer_norm_eps=self.layer_norm_eps,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
activation=self.activation,
dtype=self._dtype,
output_all_encodings=self._output_all_encodings,
layout=self._compute_layout,
)
if self.use_pooler and self.classifier_activation:
# Construct pooler
self.pooler = nn.Dense(units=self.units,
in_units=self.units,
flatten=False,
activation=self.pooler_activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer) |
Parameters
----------
vocab_size
units
hidden_size
num_layers
num_heads
max_length
hidden_dropout_prob
attention_dropout_prob
pos_embed_type
activation
pooler_activation
layer_norm_eps
embed_initializer
weight_initializer
bias_initializer
dtype
use_pooler
Whether to output the CLS hidden state
classifier_activation
Whether to use classification head
encoder_normalize_before
Whether to normalize before the
output_all_encodings
Whether to output all encodings
layout
The layout
compute_layout
The computation layout
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/roberta.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py | Apache-2.0 |
def get_initial_embedding(self, inputs):
"""Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
"""
if self._layout == 'NT':
batch_axis, time_axis = 0, 1
else:
batch_axis, time_axis = 1, 0
embedding = self.word_embed(inputs)
if self.pos_embed_type:
positional_embedding = self.pos_embed(npx.arange_like(inputs, axis=time_axis))
positional_embedding = np.expand_dims(positional_embedding, axis=batch_axis)
embedding = embedding + positional_embedding
if self.encoder_normalize_before:
embedding = self.embed_ln(embedding)
embedding = self.embed_dropout(embedding)
return embedding | Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
| get_initial_embedding | python | dmlc/gluon-nlp | src/gluonnlp/models/roberta.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py | Apache-2.0 |
def apply_pooling(self, sequence):
"""Generate the representation given the inputs.
This is used for pre-training or fine-tuning a mobile bert model.
Get the first token of the whole sequence which is [CLS]
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
ret
Shape (batch_size, units)
"""
if self._layout == 'NT':
outputs = sequence[:, 0, :]
elif self._layout == 'TN':
outputs = sequence[0, :, :]
else:
raise NotImplementedError
if self.classifier_activation:
return self.pooler(outputs)
else:
return outputs | Generate the representation given the inputs.
This is used for pre-training or fine-tuning a mobile bert model.
Get the first token of the whole sequence which is [CLS]
Parameters
----------
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
Returns
-------
ret
Shape (batch_size, units)
| apply_pooling | python | dmlc/gluon-nlp | src/gluonnlp/models/roberta.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py | Apache-2.0 |
def forward(self, inputs, valid_length, masked_positions):
"""Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
all_encodings_outputs, pooled_out = self.backbone_model(inputs, valid_length)
if self.backbone_model._output_all_encodings:
contextual_embeddings = all_encodings_outputs[-1]
else:
contextual_embeddings = all_encodings_outputs
if self.backbone_model.layout == 'TN':
contextual_embeddings = np.swapaxes(contextual_embeddings, 0, 1)
mlm_features = select_vectors_by_position(contextual_embeddings, masked_positions)
mlm_scores = self.mlm_decoder(mlm_features)
return all_encodings_outputs, pooled_out, mlm_scores | Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/roberta.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py | Apache-2.0 |
def get_pretrained_roberta(model_name: str = 'fairseq_roberta_base',
root: str = get_model_zoo_home_dir(),
load_backbone: bool = True,
load_mlm: bool = False) \
-> Tuple[CN, HuggingFaceByteBPETokenizer, str, str]:
"""Get the pretrained RoBERTa weights
Parameters
----------
model_name
The name of the RoBERTa model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceByteBPETokenizer
params_path
Path to the parameters
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_roberta())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
merges_path = PRETRAINED_URL[model_name]['merges']
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
mlm_params_path = PRETRAINED_URL[model_name]['mlm_params']
local_paths = dict()
download_jobs = [('vocab', vocab_path), ('merges', merges_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for k, path in download_jobs:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_mlm and mlm_params_path is not None:
local_mlm_params_path = download(url=get_repo_model_zoo_url() + mlm_params_path,
path=os.path.join(root, mlm_params_path),
sha1_hash=FILE_STATS[mlm_params_path])
else:
local_mlm_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = HuggingFaceByteBPETokenizer(
merges_file=local_paths['merges'],
vocab_file=local_paths['vocab'],
lowercase=do_lower)
if cfg is None:
cfg = RobertaModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, local_mlm_params_path | Get the pretrained RoBERTa weights
Parameters
----------
model_name
The name of the RoBERTa model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceByteBPETokenizer
params_path
Path to the parameters
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
| get_pretrained_roberta | python | dmlc/gluon-nlp | src/gluonnlp/models/roberta.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/roberta.py | Apache-2.0 |
def __init__(
self,
d_model,
d_kv,
d_ff,
is_decoder,
num_heads=12,
dropout_prob=0.1,
layer_norm_eps=1E-6,
activation='relu',
init_factor=1.0,
layout='NT',
dtype='float32'
):
"""
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
is_decoder
If is_decoder, apply cross-attention.
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
"""
super().__init__()
self._d_model = d_model
self._d_kv = d_kv
self._d_ff = d_ff
self._is_decoder = is_decoder
self._num_heads = num_heads
self._inner_dim = self._num_heads * self._d_kv
self._dtype = dtype
assert layout in ['TN', 'NT'], \
'Invalid layout: {}. Only "TN" and "NT" are supported.'.format(layout)
self._layout = layout
self._time_axis = 1 if self.layout == 'NT' else 0
self.self_attn_layer_norm = RMSNorm(
in_channels=d_model,
center=False,
scale=True,
gamma_initializer=Constant(1.0 * init_factor),
variance_epsilon=layer_norm_eps,
dtype=dtype
)
# avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
self.self_attn_q = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal((d_model * d_kv) ** -0.5 * init_factor),
dtype=dtype
)
self.self_attn_k = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal(d_model ** -0.5 * init_factor),
dtype=dtype
)
self.self_attn_v = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal(d_model ** -0.5 * init_factor),
dtype=dtype
)
self.self_attn = MultiHeadAttentionCell(
query_units=self._inner_dim,
num_heads=num_heads,
attention_dropout=dropout_prob,
scaled=False,
normalized=False,
dtype=dtype,
layout='NTK' if layout == 'NT' else 'TNK',
use_einsum=False
)
self.self_attn_proj = nn.Dense(
units=d_model,
in_units=self._inner_dim,
flatten=False,
use_bias=False,
weight_initializer=Normal(self._inner_dim ** -0.5 * init_factor),
dtype=dtype
)
if is_decoder:
self.cross_attn_layer_norm = RMSNorm(
in_channels=d_model,
center=False,
scale=True,
gamma_initializer=Constant(1.0 * init_factor),
variance_epsilon=layer_norm_eps,
dtype=dtype
)
# avoid scaling before softmax
self.cross_attn_q = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal((d_model * d_kv) ** -0.5 * init_factor),
dtype=dtype
)
self.cross_attn_k = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal(d_model ** -0.5 * init_factor),
dtype=dtype
)
self.cross_attn_v = nn.Dense(
units=self._inner_dim,
in_units=d_model,
flatten=False,
use_bias=False,
weight_initializer=Normal(d_model ** -0.5 * init_factor),
dtype=dtype
)
self.cross_attn = MultiHeadAttentionCell(
query_units=self._inner_dim,
num_heads=num_heads,
attention_dropout=dropout_prob,
scaled=False,
normalized=False,
dtype=dtype,
layout='NTK' if layout == 'NT' else 'TNK',
use_einsum=False
)
self.cross_attn_proj = nn.Dense(
units=d_model,
in_units=self._inner_dim,
flatten=False,
use_bias=False,
weight_initializer=Normal(self._inner_dim ** -0.5 * init_factor),
dtype=dtype
)
assert activation in ['relu', 'gated-gelu'], \
'{} is not supported. Please choose from "relu" and "gated-gelu"'.format(activation)
# the weight_initializer here is equivalent to Normal(in_units ** -0.5 * init_factor)
self.ffn = PositionwiseFFN(
units=d_model,
hidden_size=d_ff,
use_bias=False,
activation_dropout=dropout_prob,
dropout=dropout_prob,
weight_initializer=Xavier('gaussian', 'in', np.sqrt(init_factor)),
activation='relu' if activation == 'relu' else 'gelu(tanh)',
use_gated_activation=False if activation == 'relu' else True,
normalization='rms_norm',
layer_norm_eps=layer_norm_eps,
pre_norm=True,
dtype=dtype,
center=False,
scale=True,
gamma_initializer=Constant(1.0 * init_factor)
)
self.dropout = nn.Dropout(dropout_prob) |
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
is_decoder
If is_decoder, apply cross-attention.
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def incremental_decode(
self,
step_hidden_states,
step_position_embeddings,
past_key_value,
mem_states,
step_mem_attn_mask
):
"""Incrementally generate the output given the decoder input.
Parameters
----------
step_hidden_states
Stepwise hidden states where L_seq = 1 as in `forward` case.
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
step_position_embeddings
Stepwise relative position embeddings.
Shape (num_heads, 1, (L_past_seq + 1))
past_key_value
A tuple containing past key and past value. Presumably they are of the same shape.
- layout = 'NT'
Shape (B, L_past_seq, num_heads, d_kv)
- layout = 'TN'
Shape (L_past_seq, B, num_heads, d_kv)
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
step_mem_attn_mask
Stepwise attention mask for cross-attention.
Shape (B, 1, L_src_seq)
Returns
-------
step_hidden_states
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
(self_key, self_value)
The updated `past_key_value` tuple. Presumably they are of the same shape.
- layout = 'NT'
Shape (B, (L_past_seq + 1), num_heads, d_kv)
- layout = 'TN'
Shape ((L_past_seq + 1), B, num_heads, d_kv)
"""
# 1. self-attention
out = self.self_attn_layer_norm(step_hidden_states)
step_self_query, step_self_key, step_self_value = (
self.transpose_for_scores(self.self_attn_q(out)),
self.transpose_for_scores(self.self_attn_k(out)),
self.transpose_for_scores(self.self_attn_v(out))
)
self_key, self_value = (
np.concatenate([past_key_value[0], step_self_key], axis=self._time_axis),
np.concatenate([past_key_value[1], step_self_value], axis=self._time_axis)
)
out, _ = self.self_attn(
step_self_query,
self_key,
self_value,
None,
step_position_embeddings
)
out = self.dropout(self.self_attn_proj(out))
step_hidden_states = step_hidden_states + out
# 2. cross-attention
out = self.cross_attn_layer_norm(step_hidden_states)
step_cross_query, cross_key, cross_value = (
self.transpose_for_scores(self.cross_attn_q(out)),
self.transpose_for_scores(self.cross_attn_k(mem_states)),
self.transpose_for_scores(self.cross_attn_v(mem_states))
)
out, _ = self.cross_attn(
step_cross_query,
cross_key,
cross_value,
step_mem_attn_mask
)
out = self.dropout(self.cross_attn_proj(out))
step_hidden_states = step_hidden_states + out
# 3. feed forward
step_hidden_states = self.ffn(step_hidden_states)
return step_hidden_states, (self_key, self_value) | Incrementally generate the output given the decoder input.
Parameters
----------
step_hidden_states
Stepwise hidden states where L_seq = 1 as in `forward` case.
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
step_position_embeddings
Stepwise relative position embeddings.
Shape (num_heads, 1, (L_past_seq + 1))
past_key_value
A tuple containing past key and past value. Presumably they are of the same shape.
- layout = 'NT'
Shape (B, L_past_seq, num_heads, d_kv)
- layout = 'TN'
Shape (L_past_seq, B, num_heads, d_kv)
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
step_mem_attn_mask
Stepwise attention mask for cross-attention.
Shape (B, 1, L_src_seq)
Returns
-------
step_hidden_states
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
(self_key, self_value)
The updated `past_key_value` tuple. Presumably they are of the same shape.
- layout = 'NT'
Shape (B, (L_past_seq + 1), num_heads, d_kv)
- layout = 'TN'
Shape ((L_past_seq + 1), B, num_heads, d_kv)
| incremental_decode | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def forward(
self,
hidden_states,
self_attn_mask,
position_embeddings,
mem_states=None,
mem_attn_mask=None
):
"""
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
self_attn_mask
if is_decoder, it should be a "causal" attention mask.
Shape (B, L_seq, L_seq)
position_embeddings
Relative position embeddings for self-attention, while cross-attention is free of position encoding.
Shape (num_heads, L_seq, L_seq)
mem_states
Encoded results. Only applicable to decoder layers.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_attn_mask
Attention mask ask for cross-attention. Only applicable to decoder layers.
Shape (B, L_seq, L_src_seq)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
# 1. self-attention
out = self.self_attn_layer_norm(hidden_states)
self_query, self_key, self_value = (
self.transpose_for_scores(self.self_attn_q(out)),
self.transpose_for_scores(self.self_attn_k(out)),
self.transpose_for_scores(self.self_attn_v(out))
)
out, _ = self.self_attn(
self_query,
self_key,
self_value,
self_attn_mask,
position_embeddings
)
out = self.dropout(self.self_attn_proj(out))
hidden_states = hidden_states + out
# 2. cross-attention, if needed
if self._is_decoder:
out = self.cross_attn_layer_norm(hidden_states)
cross_query, cross_key, cross_value = (
self.transpose_for_scores(self.cross_attn_q(out)),
self.transpose_for_scores(self.cross_attn_k(mem_states)),
self.transpose_for_scores(self.cross_attn_v(mem_states))
)
out, _ = self.cross_attn(
cross_query,
cross_key,
cross_value,
mem_attn_mask
)
out = self.dropout(self.cross_attn_proj(out))
hidden_states = hidden_states + out
# 3. feed forward
hidden_states = self.ffn(hidden_states)
return hidden_states |
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
self_attn_mask
if is_decoder, it should be a "causal" attention mask.
Shape (B, L_seq, L_seq)
position_embeddings
Relative position embeddings for self-attention, while cross-attention is free of position encoding.
Shape (num_heads, L_seq, L_seq)
mem_states
Encoded results. Only applicable to decoder layers.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_attn_mask
Attention mask ask for cross-attention. Only applicable to decoder layers.
Shape (B, L_seq, L_src_seq)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def __init__(
self,
d_model,
d_kv,
d_ff,
num_layers=12,
num_heads=12,
dropout_prob=0.1,
layer_norm_eps=1E-6,
activation='relu',
init_factor=1.0,
layout='NT',
dtype='float32'
):
"""
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
"""
super().__init__()
self._d_model = d_model
self._d_kv = d_kv
self._d_ff = d_ff
self._num_layers = num_layers
self._num_heads = num_heads
self._inner_dim = num_heads * d_kv
self._dtype = dtype
assert layout in ['TN', 'NT'], \
'Invalid layout: {}. Only "TN" and "NT" are supported.'.format(layout)
self._layout = layout
self._time_axis = 1 if self.layout == 'NT' else 0
self.relative_position_encoder = RelAttentionScoreCell(
query_units=self._inner_dim,
num_heads=num_heads,
method='t5',
bidirectional=True,
embed_initializer=Normal(d_model ** -0.5 * init_factor),
layout='NTK' if layout == 'NT' else 'TNK',
dtype=dtype
)
self.layers = nn.HybridSequential()
for _ in range(num_layers):
self.layers.add(
T5Block(
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
is_decoder=False,
num_heads=num_heads,
dropout_prob=dropout_prob,
layer_norm_eps=layer_norm_eps,
activation=activation,
init_factor=init_factor,
layout=layout,
dtype=dtype
)
)
self.final_layer_norm = RMSNorm(
in_channels=d_model,
center=False,
scale=True,
gamma_initializer=Constant(1.0 * init_factor),
variance_epsilon=layer_norm_eps,
dtype=dtype
)
self.dropout = nn.Dropout(dropout_prob) |
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def forward(self, hidden_states, valid_length):
"""
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
# 1. relative position embeddings and attention masks
position_embeddings = self.relative_position_encoder(
gen_rel_position(hidden_states, layout=self.layout)
)
self_attn_mask = gen_self_attn_mask(
hidden_states,
valid_length,
dtype=self._dtype,
attn_type='full',
layout=self.layout
)
# 2. encoder blocks and other layers
hidden_states = self.dropout(hidden_states)
for layer in self.layers:
hidden_states = layer(
hidden_states,
self_attn_mask,
position_embeddings
)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states |
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def __init__(
self,
d_model,
d_kv,
d_ff,
num_layers=12,
num_heads=12,
dropout_prob=0.1,
layer_norm_eps=1E-6,
activation='relu',
init_factor=1.0,
layout='NT',
dtype='float32'
):
"""
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
"""
super().__init__()
self._d_model = d_model
self._d_kv = d_kv
self._d_ff = d_ff
self._num_layers = num_layers
self._num_heads = num_heads
self._inner_dim = num_heads * d_kv
self._dtype = dtype
assert layout in ['TN', 'NT'], \
'Invalid layout: {}. Only "TN" and "NT" are supported.'.format(layout)
self._layout = layout
self._time_axis = 1 if self.layout == 'NT' else 0
self.relative_position_encoder = RelAttentionScoreCell(
query_units=self._inner_dim,
num_heads=num_heads,
method='t5',
bidirectional=False,
embed_initializer=Normal(d_model ** -0.5 * init_factor),
layout='NTK' if layout == 'NT' else 'TNK',
dtype=dtype
)
self.layers = nn.HybridSequential()
for _ in range(num_layers):
self.layers.add(
T5Block(
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
is_decoder=True,
num_heads=num_heads,
dropout_prob=dropout_prob,
layer_norm_eps=layer_norm_eps,
activation=activation,
init_factor=init_factor,
layout=layout,
dtype=dtype
)
)
self.final_layer_norm = RMSNorm(
in_channels=d_model,
center=False,
scale=True,
gamma_initializer=Constant(1.0 * init_factor),
variance_epsilon=layer_norm_eps,
dtype=dtype
)
self.dropout = nn.Dropout(dropout_prob) |
Parameters
----------
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def incremental_decode(
self,
step_hidden_states,
position,
past_key_values,
mem_states,
mem_valid_length
):
"""Incrementally generate the output given the decoder input.
Parameters
----------
step_hidden_states
Stepwise hidden states where L_seq = 1 as in `forward` case.
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
position
Current position index in incremental decoding.
Shape (B,)
past_key_values
A list of tuples where each one corresponds to the `past_key_value` of a decoder layer.
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
step_hidden_states
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
present_key_values
A list of tuples containing the updated `past_key_value` for each decoder layer.
"""
# 1. relative position embeddings and attention mask
# step_position_embeddings: Shape (num_heads, 1, L_seq), for self-attention
# step_mem_attn_mask: Shape (B, 1, L_src_seq), for cross-attention
position_embeddings = self.relative_position_encoder(
gen_rel_position(
step_hidden_states,
past_data=past_key_values[0][0],
layout=self.layout
)
)
step_position_embeddings = position_embeddings[:, -1:, :]
step_mem_attn_mask = gen_mem_attn_mask(
mem_states,
mem_valid_length,
step_hidden_states,
dtype=self._dtype,
layout=self.layout
)
# 2. decoder blocks and other layers
step_hidden_states = self.dropout(step_hidden_states)
present_key_values = []
for i, layer in enumerate(self.layers):
step_hidden_states, present_key_value = layer.incremental_decode(
step_hidden_states,
step_position_embeddings,
past_key_values[i],
mem_states,
step_mem_attn_mask
)
present_key_values.append(present_key_value)
step_hidden_states = self.final_layer_norm(step_hidden_states)
step_hidden_states = self.dropout(step_hidden_states)
return step_hidden_states, present_key_values | Incrementally generate the output given the decoder input.
Parameters
----------
step_hidden_states
Stepwise hidden states where L_seq = 1 as in `forward` case.
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
position
Current position index in incremental decoding.
Shape (B,)
past_key_values
A list of tuples where each one corresponds to the `past_key_value` of a decoder layer.
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
step_hidden_states
- layout = 'NT'
Shape (B, 1, d_model)
- layout = 'TN'
Shape (1, B, d_model)
present_key_values
A list of tuples containing the updated `past_key_value` for each decoder layer.
| incremental_decode | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def forward(self, hidden_states, valid_length, mem_states, mem_valid_length):
"""
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder: mem_valid_length = src_valid_length.
Shape (B,)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
# 1. relative position embeddings and attention masks
# position_embeddings: Shape (num_heads, L_seq, L_seq), broadcastable, for self-attention
# self_attn_mask: Shape (B, L_seq, L_seq), for self-attention
# mem_attn_mask: Shape (B, L_seq, L_src_seq), for cross-attention
position_embeddings = self.relative_position_encoder(
gen_rel_position(hidden_states, layout=self.layout)
)
self_attn_mask = gen_self_attn_mask(
hidden_states,
valid_length,
dtype=self._dtype,
attn_type='causal',
layout=self.layout
)
mem_attn_mask = gen_mem_attn_mask(
mem_states,
mem_valid_length,
hidden_states,
valid_length,
dtype=self._dtype,
layout=self.layout
)
# 2. decoder blocks and other layers
hidden_states = self.dropout(hidden_states)
for layer in self.layers:
hidden_states = layer(
hidden_states,
self_attn_mask,
position_embeddings,
mem_states,
mem_attn_mask
)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states |
Parameters
----------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
mem_states
Encoded results.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder: mem_valid_length = src_valid_length.
Shape (B,)
Returns
-------
hidden_states
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def __init__(
self,
vocab_size=32128,
d_model=768,
d_kv=64,
d_ff=3072,
num_layers=12,
num_heads=12,
dropout_prob=0.1,
layer_norm_eps=1E-6,
activation='relu',
init_factor=1.0,
layout='NT',
dtype='float32'
):
"""
Parameters
----------
vocab_size
vocab_size should be no smaller than len(tokenizer._sp_model).
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
"""
super().__init__()
assert vocab_size > 0, 'Vocab size {} is not valid.'.format(vocab_size)
self._vocab_size = vocab_size
self._d_model = d_model
self._d_kv = d_kv
self._d_ff = d_ff
self._num_layers = num_layers
self._num_heads = num_heads
self._inner_dim = num_heads * d_kv
self._activation = activation
self._init_factor = init_factor
self._dtype = dtype
assert layout in ['TN', 'NT'], \
'Invalid layout: {}. Only "TN" and "NT" are supported.'.format(layout)
self._layout = layout
self._time_axis = 1 if self.layout == 'NT' else 0
# input embedding weights are shared between across encoder and decoder
self.input_embedding_layer = nn.Embedding(
input_dim=vocab_size,
output_dim=d_model,
weight_initializer=Normal(1.0 * init_factor),
dtype=dtype
)
self.encoder = T5Encoder(
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_heads=num_heads,
dropout_prob=dropout_prob,
layer_norm_eps=layer_norm_eps,
activation=activation,
init_factor=init_factor,
layout=layout,
dtype=dtype
)
self.decoder = T5Decoder(
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_heads=num_heads,
dropout_prob=dropout_prob,
layer_norm_eps=layer_norm_eps,
activation=activation,
init_factor=init_factor,
layout=layout,
dtype=dtype
) |
Parameters
----------
vocab_size
vocab_size should be no smaller than len(tokenizer._sp_model).
d_model
Equivalent to transformer's `units`.
d_kv
d_kv * num_heads (see below) = inner_dim.
d_ff
Equivalent to transformer's `hidden_size`.
num_layers
num_heads
dropout_prob
We use the same dropout rate for all dropout layers.
layer_norm_eps
activation
Type of feed forward projection. Currently supported are `relu` and `gated-gelu`.
init_factor
A scalor factor in sd of weight initialization.
layout
dtype
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def encode(self, src_data, src_valid_length):
"""Encode the source data to memory states.
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
enc_out
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
"""
src_hidden_states = self.input_embedding_layer(src_data)
enc_out = self.encoder(
src_hidden_states,
src_valid_length
)
return enc_out | Encode the source data to memory states.
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
enc_out
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
| encode | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def decode(self, tgt_data, tgt_valid_length, mem_states, mem_valid_length):
"""Decode based on target data and memory states.
Parameters
----------
tgt_data
Token ids feeded into the decoder.
- layout = 'NT'
Shape (B, L_seq)
- layout = 'TN'
Shape (L_seq, B)
tgt_valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
mem_states
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder: mem_valid_length = src_valid_length.
Shape (B,)
Returns
-------
dec_out
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
tgt_hidden_states = self.input_embedding_layer(tgt_data)
dec_out = self.decoder(
tgt_hidden_states,
tgt_valid_length,
mem_states,
mem_valid_length
)
return dec_out | Decode based on target data and memory states.
Parameters
----------
tgt_data
Token ids feeded into the decoder.
- layout = 'NT'
Shape (B, L_seq)
- layout = 'TN'
Shape (L_seq, B)
tgt_valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
mem_states
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
mem_valid_length
Valid sequence length for each sample feeded into the encoder: mem_valid_length = src_valid_length.
Shape (B,)
Returns
-------
dec_out
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
| decode | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def forward(self, src_data, src_valid_length, tgt_data, tgt_valid_length):
"""
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
tgt_data
Token ids feeded into the decoder.
- layout = 'NT'
Shape (B, L_seq)
- layout = 'TN'
Shape (L_seq, B)
tgt_valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
Returns
-------
dec_out
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
"""
enc_out = self.encode(src_data, src_valid_length)
dec_out = self.decode(tgt_data, tgt_valid_length, enc_out, src_valid_length)
return dec_out |
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
tgt_data
Token ids feeded into the decoder.
- layout = 'NT'
Shape (B, L_seq)
- layout = 'TN'
Shape (L_seq, B)
tgt_valid_length
Valid sequence length for each sample feeded into the decoder.
Shape (B,)
Returns
-------
dec_out
- layout = 'NT'
Shape (B, L_seq, d_model)
- layout = 'TN'
Shape (L_seq, B, d_model)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def state_batch_axis(self):
"""The returned 4-tuple corresponds to the batch axes of `init_states()` results.
Returns
-------
enc_out_batch_axis
src_valid_length_batch_axis
position_batch_axis
dec_layer_batch_axes
"""
if self.model.layout == 'NT':
return 0, 0, 0, self.model.decoder.state_batch_axis
else:
return 1, 0, 0, self.model.decoder.state_batch_axis | The returned 4-tuple corresponds to the batch axes of `init_states()` results.
Returns
-------
enc_out_batch_axis
src_valid_length_batch_axis
position_batch_axis
dec_layer_batch_axes
| state_batch_axis | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def init_states(self, src_data, src_valid_length):
"""Initialize the states required for incremental decoding.
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
enc_out
Encoded results from src_data.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
src_valid_length
Shape (B,)
position
Shape (B,)
dec_states
A list of `past_key_value` for incremental decoding.
"""
batch_size = src_data.shape[1 - self.model._time_axis] # NT: 0; TN: 1
ctx = src_data.ctx
enc_out = self.model.encode(src_data, src_valid_length)
position = np.zeros((batch_size,), dtype=np.int32, ctx=ctx)
key_values = self.model.decoder._init_key_values(batch_size, ctx, dtype=enc_out.dtype)
return enc_out, src_valid_length, position, key_values | Initialize the states required for incremental decoding.
Parameters
----------
src_data
Token ids feeded into the encoder.
- layout = 'NT'
Shape (B, L_src_seq)
- layout = 'TN'
Shape (L_src_seq, B)
src_valid_length
Valid sequence length for each sample feeded into the encoder.
Shape (B,)
Returns
-------
enc_out
Encoded results from src_data.
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
src_valid_length
Shape (B,)
position
Shape (B,)
dec_states
A list of `past_key_value` for incremental decoding.
| init_states | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def forward(self, step_data, past_states):
"""
Parameters
----------
step_data
Stepwise batched token ids for incremental decoding.
Shape (B,)
past_states
A 4-tuple containing states of last incremental decoding step.
1. mem_states
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
2. mem_valid_length
Shape (B,)
3. position
Shape (B,)
4. dec_states
A list of `past_key_value` tuples whose shape depend on layout.
Returns
-------
step_hidden_states
Stepwise hidden states with time axis squeezed out.
Shape (B, vocab_size)
new_states
Similar to past_states, but updated for next incremental decoding step.
"""
mem_states, mem_valid_length, position, past_key_values = past_states
step_hidden_states = self.model.input_embedding_layer(step_data)
# NT: (B, d_model) -> (B, 1, d_model); TN: (B, d_model) -> (1, B, d_model)
step_hidden_states = np.expand_dims(step_hidden_states, axis=self.model._time_axis)
step_hidden_states, present_key_values = self.model.decoder.incremental_decode(
step_hidden_states,
position,
past_key_values,
mem_states,
mem_valid_length
)
step_hidden_states = self.output_layer(step_hidden_states)
# NT: (B, 1, vocab_size) -> (B, vocab_size); TN: (1, B, vocab_size) -> (B, vocab_size)
step_hidden_states = npx.reshape(step_hidden_states, (-5, -1))
return step_hidden_states, (mem_states, mem_valid_length, position + 1, present_key_values) |
Parameters
----------
step_data
Stepwise batched token ids for incremental decoding.
Shape (B,)
past_states
A 4-tuple containing states of last incremental decoding step.
1. mem_states
- layout = 'NT'
Shape (B, L_src_seq, d_model)
- layout = 'TN'
Shape (L_src_seq, B, d_model)
2. mem_valid_length
Shape (B,)
3. position
Shape (B,)
4. dec_states
A list of `past_key_value` tuples whose shape depend on layout.
Returns
-------
step_hidden_states
Stepwise hidden states with time axis squeezed out.
Shape (B, vocab_size)
new_states
Similar to past_states, but updated for next incremental decoding step.
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/t5.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/t5.py | Apache-2.0 |
def transformer_base():
"""Configuration of Transformer WMT EN-DE Base"""
cfg = CN()
cfg.MODEL = CN()
cfg.MODEL.src_vocab_size = -1
cfg.MODEL.tgt_vocab_size = -1
cfg.MODEL.max_src_length = -1
cfg.MODEL.max_tgt_length = -1
cfg.MODEL.scale_embed = True
cfg.MODEL.pos_embed_type = "sinusoidal"
cfg.MODEL.shared_embed = True
cfg.MODEL.tie_weights = True
cfg.MODEL.attention_dropout = 0.0
cfg.MODEL.activation_dropout = 0.0
cfg.MODEL.dropout = 0.1
cfg.MODEL.layout = 'NT'
cfg.MODEL.dtype = 'float32'
# Parameters for the encoder
cfg.MODEL.ENCODER = CN()
cfg.MODEL.ENCODER.num_layers = 6
cfg.MODEL.ENCODER.units = 512
cfg.MODEL.ENCODER.num_heads = 8
cfg.MODEL.ENCODER.hidden_size = 2048
cfg.MODEL.ENCODER.recurrent = False
cfg.MODEL.ENCODER.activation = 'relu'
cfg.MODEL.ENCODER.pre_norm = False
cfg.MODEL.ENCODER.use_qkv_bias = True
# Parameters for the decoder
cfg.MODEL.DECODER = CN()
cfg.MODEL.DECODER.num_layers = 6
cfg.MODEL.DECODER.units = 512
cfg.MODEL.DECODER.num_heads = 8
cfg.MODEL.DECODER.hidden_size = 2048
cfg.MODEL.DECODER.recurrent = False
cfg.MODEL.DECODER.activation = 'relu'
cfg.MODEL.DECODER.pre_norm = False
cfg.MODEL.DECODER.use_qkv_bias = False
# Parameters for the initializer
cfg.INITIALIZER = CN()
cfg.INITIALIZER.embed = ['xavier', 'gaussian', 'in', 1.0]
cfg.INITIALIZER.weight = ['xavier', 'uniform', 'avg', 3.0]
cfg.INITIALIZER.bias = ['zeros']
cfg.VERSION = 1
cfg.freeze()
return cfg | Configuration of Transformer WMT EN-DE Base | transformer_base | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def __init__(self,
units: int = 512,
hidden_size: int = 2048,
num_heads: int = 8,
attention_dropout_prob: float = 0.1,
hidden_dropout_prob: float = 0.1,
activation_dropout_prob: float = 0.0,
layer_norm_eps: float = 1e-12,
pre_norm: bool = False,
use_qkv_bias: bool = True,
weight_initializer: Optional[InitializerType] = None,
bias_initializer: Optional[InitializerType] = 'zeros',
activation: str = 'relu',
dtype='float32',
layout='NT'):
"""
Parameters
----------
units
hidden_size
num_heads
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
layer_norm_eps
pre_norm
Whether to attach the normalization layer before attention layer
If pre_norm:
norm(data) -> attn -> res(+data) -> ffn
Else:
data -> attn -> norm(res(+data)) -> ffn
use_qkv_bias
Whether to use bias for self attention
weight_initializer
bias_initializer
activation
dtype
layout
"""
super().__init__()
self._units = units
self._hidden_size = hidden_size
self._num_heads = num_heads
self._attention_dropout_prob = attention_dropout_prob
self._hidden_dropout_prob = hidden_dropout_prob
self._activation_dropout_prob = activation_dropout_prob
self._pre_norm = pre_norm
self._dtype = dtype
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
assert self._units % self._num_heads == 0, 'units must be divisive by the number of heads'
self.dropout_layer = nn.Dropout(hidden_dropout_prob)
self.attn_qkv = nn.Dense(3 * units,
flatten=False,
use_bias=use_qkv_bias,
in_units=units,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
self.attention_proj = nn.Dense(units=units,
flatten=False,
in_units=units,
use_bias=True,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self._dtype)
attention_layout = 'NTK' if self._layout == 'NT' else 'TNK'
self.attention_cell = \
MultiHeadAttentionCell(
query_units=self._units,
num_heads=self._num_heads,
attention_dropout=self._attention_dropout_prob,
scaled=True,
dtype=self._dtype,
layout=attention_layout
)
self.layer_norm = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
self.ffn = PositionwiseFFN(units=units,
hidden_size=hidden_size,
dropout=hidden_dropout_prob,
activation_dropout=activation_dropout_prob,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
layer_norm_eps=layer_norm_eps,
activation=activation,
pre_norm=pre_norm,
dtype=self._dtype) |
Parameters
----------
units
hidden_size
num_heads
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
layer_norm_eps
pre_norm
Whether to attach the normalization layer before attention layer
If pre_norm:
norm(data) -> attn -> res(+data) -> ffn
Else:
data -> attn -> norm(res(+data)) -> ffn
use_qkv_bias
Whether to use bias for self attention
weight_initializer
bias_initializer
activation
dtype
layout
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def forward(self, data, attn_mask):
"""
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
attn_mask
Shape (batch_size, seq_length, seq_length)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
attn_weight
Shape (batch_size, seq_length, seq_length)
"""
if self._pre_norm:
data = self.layer_norm(data)
query, key, value = np.split(self.attn_qkv(data), 3, axis=-1)
query = npx.reshape(query, (-2, -2, self._num_heads, -1))
key = npx.reshape(key, (-2, -2, self._num_heads, -1))
value = npx.reshape(value, (-2, -2, self._num_heads, -1))
out, [_, attn_weight] = self.attention_cell(query, key, value, attn_mask)
out = self.attention_proj(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.layer_norm(out)
out = self.ffn(out)
return out, attn_weight |
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
attn_mask
Shape (batch_size, seq_length, seq_length)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
attn_weight
Shape (batch_size, seq_length, seq_length)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def __init__(self, num_layers=6, recurrent=False,
units=512, hidden_size=2048, num_heads=8,
activation_dropout=0.0, dropout=0.1, use_qkv_bias=True,
attention_dropout=0.1, layer_norm_eps=1E-5, data_norm=False,
pre_norm=False, weight_initializer=None, bias_initializer='zeros',
activation='relu', dtype='float32', layout='NT'):
"""
Parameters
----------
num_layers :
The number of layers
recurrent : bool
Whether the layers share weights or not
units
hidden_size
num_heads
dropout
layer_norm_eps
data_norm
Whether to apply LayerNorm to the data
pre_norm
Whether to apply LayerNorm before the attention layer.
weight_initializer
bias_initializer
activation
dtype
layout
"""
super().__init__()
self._dtype = dtype
self.num_layers = num_layers
self._recurrent = recurrent
self._data_norm = data_norm
self._pre_norm = pre_norm
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
self.dropout_layer = nn.Dropout(dropout)
if self._pre_norm:
self.ln_final = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
if self._data_norm:
self.ln_data = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
# Construct the intermediate layers
self.layers = nn.HybridSequential()
real_num_layers = 1 if recurrent else num_layers
for i in range(real_num_layers):
self.layers.add(TransformerEncoderLayer(
units=units,
hidden_size=hidden_size,
num_heads=num_heads,
hidden_dropout_prob=dropout,
attention_dropout_prob=attention_dropout,
activation_dropout_prob=activation_dropout,
use_qkv_bias=use_qkv_bias,
layer_norm_eps=layer_norm_eps,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
pre_norm=pre_norm,
activation=activation,
layout=self._layout,
dtype=dtype)) |
Parameters
----------
num_layers :
The number of layers
recurrent : bool
Whether the layers share weights or not
units
hidden_size
num_heads
dropout
layer_norm_eps
data_norm
Whether to apply LayerNorm to the data
pre_norm
Whether to apply LayerNorm before the attention layer.
weight_initializer
bias_initializer
activation
dtype
layout
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def forward(self, data, valid_length):
"""
Parameters
----------
data :
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length :
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Embed the data
attn_mask = gen_self_attn_mask(data, valid_length,
dtype=self._dtype,
layout=self.layout,
attn_type='full')
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
for i in range(self.num_layers):
if self._recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out, _ = layer(out, attn_mask)
if self._pre_norm:
out = self.ln_final(out)
return out |
Parameters
----------
data :
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length :
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def __init__(self, units: int = 512,
mem_units: Optional[int] = None,
hidden_size: int = 2048,
num_heads: int = 8,
activation_dropout: float = 0.0,
dropout: float = 0.1,
attention_dropout: float = 0.1,
layer_norm_eps: float = 1E-5,
activation: str = 'relu',
pre_norm: bool = False,
use_qkv_bias: bool = True,
weight_initializer=None,
bias_initializer='zeros',
dtype='float32',
layout='NT'):
"""
Parameters
----------
units
mem_units
The number of units in the memory. By default, it is initialized to be the
same as the units.
hidden_size
num_heads
activation_dropout
dropout
attention_dropout
layer_norm_eps
activation
pre_norm
Whether to apply normalization before the attention layer
use_qkv_bias
Whether to use bias for both self attention and contextual attention
weight_initializer
bias_initializer
dtype
Data type
layout
Layout of the input
"""
super().__init__()
self._dtype = dtype
self._units = units
if mem_units is None:
mem_units = units
self._mem_units = mem_units
self._pre_norm = pre_norm
self._num_heads = num_heads
self._attention_dropout = attention_dropout
self._dtype = dtype
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
attention_layout = 'NTK' if layout == 'NT' else 'TNK'
self.dropout_layer = nn.Dropout(dropout)
if units % num_heads:
raise ValueError('In Transformer, units should be divided exactly by the number of '
'heads. Received units={}, num_heads={}'.format(units, num_heads))
self.attn_in_qkv = nn.Dense(3 * units, in_units=units,
use_bias=use_qkv_bias,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.self_attention = MultiHeadAttentionCell(query_units=units,
num_heads=num_heads,
attention_dropout=self._attention_dropout,
dtype=dtype,
layout=attention_layout)
self.proj_in = nn.Dense(units=units, in_units=units, flatten=False, use_bias=True,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.attn_inter_q = nn.Dense(units,
in_units=units,
use_bias=use_qkv_bias,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.attn_inter_k = nn.Dense(units, in_units=mem_units,
use_bias=use_qkv_bias,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.attn_inter_v = nn.Dense(units, in_units=mem_units,
use_bias=use_qkv_bias,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
self.inter_attention = MultiHeadAttentionCell(query_units=units,
num_heads=num_heads,
attention_dropout=self._attention_dropout,
dtype=dtype,
layout=attention_layout)
self.proj_inter = nn.Dense(units=units, in_units=units,
flatten=False, use_bias=True,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=dtype)
# TODO(sxjscience) Add DType to LayerNorm
self.ln_in = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
self.ln_inter = nn.LayerNorm(epsilon=layer_norm_eps,
in_channels=units)
self.ffn = PositionwiseFFN(units=units,
hidden_size=hidden_size,
dropout=dropout,
activation_dropout=activation_dropout,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
layer_norm_eps=layer_norm_eps,
activation=activation,
pre_norm=pre_norm,
dtype=dtype) |
Parameters
----------
units
mem_units
The number of units in the memory. By default, it is initialized to be the
same as the units.
hidden_size
num_heads
activation_dropout
dropout
attention_dropout
layer_norm_eps
activation
pre_norm
Whether to apply normalization before the attention layer
use_qkv_bias
Whether to use bias for both self attention and contextual attention
weight_initializer
bias_initializer
dtype
Data type
layout
Layout of the input
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def forward(self, data, mem, self_causal_mask, mem_attn_mask):
"""
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
self_causal_mask
Shape (batch_size, seq_length, seq_length)
Mask for the causal self-attention.
self_causal_mask[i, j, :] masks the elements that token `j` attends to.
To understand the self-causal attention mask, we can look at the following example:
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
mem_attn_mask :
Shape (batch_size, seq_length, mem_length)
Mask between the decoding input and the memory.
.. code-block:: none
['numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1
'can': 1, 1, 1, 1
'now': 1, 1, 1, 1
'use': 1, 1, 1, 1
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Get the causal self-attention value
if self._pre_norm:
data = self.ln_in(data)
self_query, self_key, self_value = np.split(self.attn_in_qkv(data), 3, axis=-1)
out, [_, self_attn_weight] = self.self_attention(
npx.reshape(self_query, (-2, -2, self._num_heads, -1)),
npx.reshape(self_key, (-2, -2, self._num_heads, -1)),
npx.reshape(self_value, (-2, -2, self._num_heads, -1)),
self_causal_mask)
out = self.proj_in(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.ln_in(out)
# 2. Attend to the contextual memory
data = out
if self._pre_norm:
data = self.ln_inter(data)
out, [_, context_attn_weight] = self.inter_attention(
npx.reshape(self.attn_inter_q(data), (-2, -2, self._num_heads, -1)),
npx.reshape(self.attn_inter_k(mem), (-2, -2, self._num_heads, -1)),
npx.reshape(self.attn_inter_v(mem), (-2, -2, self._num_heads, -1)),
mem_attn_mask)
out = self.proj_inter(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.ln_inter(out)
# 3. Encode the output via an FFN layer
out = self.ffn(out)
return out |
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
self_causal_mask
Shape (batch_size, seq_length, seq_length)
Mask for the causal self-attention.
self_causal_mask[i, j, :] masks the elements that token `j` attends to.
To understand the self-causal attention mask, we can look at the following example:
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
mem_attn_mask :
Shape (batch_size, seq_length, mem_length)
Mask between the decoding input and the memory.
.. code-block:: none
['numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1
'can': 1, 1, 1, 1
'now': 1, 1, 1, 1
'use': 1, 1, 1, 1
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def init_states(self, batch_size, ctx, dtype='float32'):
"""Initialize the states required for incremental decoding
Returns
-------
init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
init_value
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
"""
if self.layout == 'NT':
init_key = mx.np.zeros(shape=(batch_size, 0, self._num_heads,
self._units // self._num_heads), ctx=ctx, dtype=dtype)
init_value = mx.np.zeros(shape=(batch_size, 0, self._num_heads,
self._units // self._num_heads), ctx=ctx, dtype=dtype)
else:
init_key = mx.np.zeros(shape=(0, batch_size, self._num_heads,
self._units // self._num_heads), ctx=ctx, dtype=dtype)
init_value = mx.np.zeros(shape=(0, batch_size, self._num_heads,
self._units // self._num_heads), ctx=ctx, dtype=dtype)
return init_key, init_value | Initialize the states required for incremental decoding
Returns
-------
init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
init_value
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
| init_states | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def incremental_decode(self, data, states, mem, mem_valid_length, mem_attn_mask=None):
"""Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contains
1. layout = 'NT':
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
mem_attn_mask
The attention mask between data and the memory
Has shape (batch_size, 1, mem_length)
Returns
-------
out
Shape (batch_size, C_out)
updated_states
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (batch_size, prev_seq_length + 1, num_heads, C_value)
"""
if self._pre_norm:
data = self.ln_in(data)
if self.layout == 'NT':
time_axis = 1
else:
time_axis = 0
data = np.expand_dims(data, axis=time_axis)
# Shape (B, prev_L, #Head, C_K), (B, prev_L, #Head, C_V)
# or (prev_L, B, #Head, C_K), (prev_L, B, #Head, C_V)
prev_key, prev_value = states
if mem_attn_mask is None:
mem_attn_mask = gen_mem_attn_mask(mem, mem_valid_length, data, None,
dtype=self._dtype, layout=self.layout)
# 1. Get the causal self-attention value, we need to attend to both the current data
# and the previous stored key/values
# Shape (B, 1, 3 * num_heads * C_key)
# or (1, B, 3 * num_heads * C_key)
step_qkv = self.attn_in_qkv(data)
step_query, step_key, step_value = np.split(step_qkv, 3, axis=-1)
step_query = npx.reshape(step_query, (-2, -2, self._num_heads, -1))
step_key = npx.reshape(step_key, (-2, -2, self._num_heads, -1))
step_value = npx.reshape(step_value, (-2, -2, self._num_heads, -1))
new_key = np.concatenate([prev_key, step_key], axis=time_axis)
new_value = np.concatenate([prev_value, step_value], axis=time_axis)
out, [_, attn_weight] = self.self_attention(step_query, new_key, new_value, None)
out = self.proj_in(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.ln_in(out)
# 2. Attend to the contextual memory
data = out
if self._pre_norm:
data = self.ln_inter(data)
out, _ = self.inter_attention(npx.reshape(self.attn_inter_q(data),
(-2, -2, self._num_heads, -1)),
npx.reshape(self.attn_inter_k(mem),
(-2, -2, self._num_heads, -1)),
npx.reshape(self.attn_inter_v(mem),
(-2, -2, self._num_heads, -1)),
mem_attn_mask)
out = self.proj_inter(out)
out = self.dropout_layer(out)
out = out + data
if not self._pre_norm:
out = self.ln_inter(out)
# 3. Encode the output via an FFN layer
out = self.ffn(out)
out = npx.reshape(out, (-5, -1))
return out, (new_key, new_value) | Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contains
1. layout = 'NT':
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
mem_attn_mask
The attention mask between data and the memory
Has shape (batch_size, 1, mem_length)
Returns
-------
out
Shape (batch_size, C_out)
updated_states
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (batch_size, prev_seq_length + 1, num_heads, C_value)
| incremental_decode | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def forward(self, data, valid_length, mem_data, mem_valid_length):
"""
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Embed the data
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
self_causal_mask = gen_self_attn_mask(data, valid_length,
dtype=self._dtype,
attn_type='causal',
layout=self._layout)
mem_attn_mask = gen_mem_attn_mask(mem_data, mem_valid_length, data, valid_length,
dtype=self._dtype,
layout=self._layout)
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out = layer(out, mem_data, self_causal_mask, mem_attn_mask)
if self._pre_norm:
out = self.ln_final(out)
return out |
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def init_states(self, batch_size, ctx, dtype='float32'):
"""Initialize the states required for incremental decoding
Returns
-------
states
A list of states, each includes:
- init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
- init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
"""
states = []
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
states.append(layer.init_states(batch_size=batch_size,
ctx=ctx,
dtype=dtype))
return states | Initialize the states required for incremental decoding
Returns
-------
states
A list of states, each includes:
- init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
- init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
| init_states | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def incremental_decode(self, data, states, mem, mem_valid_length):
"""Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contain a list of
1. layout = 'NT'
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
Returns
-------
out
Shape (batch_size, C_out)
new_states
The updated states, contain a list of
1. layout = 'NT'
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
2. layout = 'TN'
- new_key
Shape (prev_seq_length + 1, batch_size, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
"""
# 1. Embed the data
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
time_axis = 0 if self.layout == 'TN' else 1
# Generate the mem_attn_mask
time_steps = npx.arange_like(mem, axis=time_axis) # (mem_length,)
mem_attn_mask = np.reshape(time_steps, (1, 1, -1))\
< np.reshape(mem_valid_length, (-1, 1, 1))
# TODO(sxjscience) Try with boolean masking
mem_attn_mask = mem_attn_mask.astype(self._dtype)
new_states = []
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out, new_state = layer.incremental_decode(out, states[i],
mem, mem_valid_length, mem_attn_mask)
new_states.append(new_state)
if self._pre_norm:
out = self.ln_final(out)
return out, new_states | Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contain a list of
1. layout = 'NT'
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
Returns
-------
out
Shape (batch_size, C_out)
new_states
The updated states, contain a list of
1. layout = 'NT'
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
2. layout = 'TN'
- new_key
Shape (prev_seq_length + 1, batch_size, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
| incremental_decode | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def __init__(self, src_vocab_size: int,
tgt_vocab_size: int,
max_src_length: Optional[int] = None,
max_tgt_length: Optional[int] = None,
scale_embed: bool = True,
pos_embed_type="sinusoidal",
shared_embed: bool = True,
tie_weights: bool = True,
activation_dropout: float = 0.0,
dropout: float = 0.1,
attention_dropout: float = 0.1,
layer_norm_eps: float = 1E-5,
data_norm: bool = False,
enc_units: int = 512,
enc_hidden_size: int = 2048,
enc_num_heads: int = 8,
enc_num_layers: int = 6,
enc_recurrent: bool = False,
enc_activation='relu',
enc_pre_norm: bool = False,
enc_use_qkv_bias: bool = True,
dec_units: int = 512,
dec_hidden_size: int = 2048,
dec_num_heads: int = 8,
dec_num_layers: int = 6,
dec_recurrent: bool = False,
dec_activation='relu',
dec_pre_norm: bool = False,
dec_use_qkv_bias: bool = True,
embed_initializer=mx.init.Xavier('gaussian', 'in', 1),
weight_initializer=mx.init.Xavier('uniform', 'avg', 3),
bias_initializer='zeros',
dtype='float32',
layout='NT'):
"""
Parameters
----------
src_vocab_size
The vocabulary size of the source language
tgt_vocab_size
The vocabulary size of the target language
max_src_length
The maximal length of the source sequence.
If it's negative, we will use treat it as not set.
max_tgt_length
The maximal length of the target sequence.
If it's negative, we will use treat it as not set.
scale_embed
Whether to multiply the src and dst embeddings by sqrt(units)
pos_embed_type
Type of the positional embedding
shared_embed
Whether to share the embedding of the src and tgt language
tie_weights
Whether to tie the weights of input + output.
activation_dropout
The ratio of the activation dropout in FFN
dropout
The default dropout ratio
attention_dropout
The ratio of the attention dropout
layer_norm_eps
The epsilon of the layer normalization
data_norm
Whether to add layer normalization layer after the input.
enc_units
Units of the encoder
enc_hidden_size
Hidden size of the encoder
enc_num_heads
Number of heads of the encoder
enc_num_layers
Number of layers of the encoder
enc_recurrent
Whether to use recurrent encoder (share weights)
enc_activation
Activation of the encoder layer
enc_pre_norm
Whether to add layer_norm before self-attention in the encoder
enc_use_qkv_bias
Wether to use bias for attention layer in the encoder
dec_units
Units of the decoder
dec_hidden_size
Hidden size of the decoder
dec_num_heads
Number of heads of the decoder
dec_num_layers
Number of layers of the decoder
dec_recurrent
Whether to use recurrent decoder (share weights)
dec_activation
Activation of the decoder layer
dec_pre_norm
Whether to add layer_norm before self-attention in the decoder
dec_use_qkv_bias
Wether to use bias for attention layer in the decoder
embed_initializer
Initializer of the embedding layer
weight_initializer
Initializer of the weight
bias_initializer
Initializer of the bias
dtype
Data type of the weights
layout
The layout of the input + target
"""
super().__init__()
assert src_vocab_size > 0 and tgt_vocab_size > 0,\
'Cannot set "src_vocab_size" and "tgt_vocab_size" to negative numbers. ' \
'Are you creating ' \
'the model with the config from TransformerModel.get_cfg()? If that is ' \
'the case, you will need to set the cfg.MODEL.src_vocab_size and ' \
'cfg.MODEL.tgt_vocab_size manually before passing to ' \
'TransformerModel.from_cfg().'
self._dtype = dtype
self._src_vocab_size = src_vocab_size
self._tgt_vocab_size = tgt_vocab_size
self.tie_weights = tie_weights
self.pos_embed_type = pos_embed_type
self.scaled_embed = scale_embed
self.enc_units = enc_units
self.dec_units = dec_units
self.weight_initializer = weight_initializer
self.bias_initializer = bias_initializer
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
if max_src_length is not None and max_src_length < 0:
max_src_length = None
if max_tgt_length is not None and max_tgt_length < 0:
max_tgt_length = None
if enc_units != dec_units:
assert shared_embed is False, 'Cannot share embedding when the enc_units and dec_units ' \
'are different! enc_units={},' \
' dec_units={}'.format(enc_units, dec_units)
self.src_embed_layer = nn.Embedding(input_dim=src_vocab_size,
output_dim=enc_units,
weight_initializer=embed_initializer,
dtype=self._dtype)
self.tgt_embed_layer = nn.Embedding(input_dim=tgt_vocab_size,
output_dim=dec_units,
weight_initializer=embed_initializer,
dtype=self._dtype)
if shared_embed:
self.tgt_embed_layer.weight = self.src_embed_layer.weight
if pos_embed_type is not None:
self.src_pos_embed_layer = PositionalEmbedding(units=enc_units,
max_length=max_src_length,
dtype=self._dtype,
method=pos_embed_type)
self.tgt_pos_embed_layer = PositionalEmbedding(units=dec_units,
max_length=max_tgt_length,
dtype=self._dtype,
method=pos_embed_type)
self.encoder = TransformerEncoder(num_layers=enc_num_layers,
recurrent=enc_recurrent,
units=enc_units,
hidden_size=enc_hidden_size,
num_heads=enc_num_heads,
activation_dropout=activation_dropout,
use_qkv_bias=enc_use_qkv_bias,
dropout=dropout,
attention_dropout=attention_dropout,
layer_norm_eps=layer_norm_eps,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
activation=enc_activation,
data_norm=data_norm,
pre_norm=enc_pre_norm,
dtype=self._dtype,
layout=layout)
self.decoder = TransformerDecoder(num_layers=dec_num_layers,
recurrent=dec_recurrent,
units=dec_units,
mem_units=enc_units,
hidden_size=dec_hidden_size,
num_heads=dec_num_heads,
activation_dropout=activation_dropout,
use_qkv_bias=dec_use_qkv_bias,
dropout=dropout,
attention_dropout=attention_dropout,
layer_norm_eps=layer_norm_eps,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
activation=dec_activation,
data_norm=data_norm,
pre_norm=dec_pre_norm,
dtype=self._dtype,
layout=layout)
if tie_weights:
self.tgt_final_layer = \
nn.Dense(units=tgt_vocab_size,
flatten=False,
in_units=self.dec_units,
bias_initializer=bias_initializer,
use_bias=False,
dtype=self._dtype)
self.tgt_final_layer.weight = self.tgt_embed_layer.weight
else:
self.tgt_final_layer = \
nn.Dense(tgt_vocab_size,
flatten=False,
in_units=self.dec_units,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
use_bias=False,
dtype=self._dtype) |
Parameters
----------
src_vocab_size
The vocabulary size of the source language
tgt_vocab_size
The vocabulary size of the target language
max_src_length
The maximal length of the source sequence.
If it's negative, we will use treat it as not set.
max_tgt_length
The maximal length of the target sequence.
If it's negative, we will use treat it as not set.
scale_embed
Whether to multiply the src and dst embeddings by sqrt(units)
pos_embed_type
Type of the positional embedding
shared_embed
Whether to share the embedding of the src and tgt language
tie_weights
Whether to tie the weights of input + output.
activation_dropout
The ratio of the activation dropout in FFN
dropout
The default dropout ratio
attention_dropout
The ratio of the attention dropout
layer_norm_eps
The epsilon of the layer normalization
data_norm
Whether to add layer normalization layer after the input.
enc_units
Units of the encoder
enc_hidden_size
Hidden size of the encoder
enc_num_heads
Number of heads of the encoder
enc_num_layers
Number of layers of the encoder
enc_recurrent
Whether to use recurrent encoder (share weights)
enc_activation
Activation of the encoder layer
enc_pre_norm
Whether to add layer_norm before self-attention in the encoder
enc_use_qkv_bias
Wether to use bias for attention layer in the encoder
dec_units
Units of the decoder
dec_hidden_size
Hidden size of the decoder
dec_num_heads
Number of heads of the decoder
dec_num_layers
Number of layers of the decoder
dec_recurrent
Whether to use recurrent decoder (share weights)
dec_activation
Activation of the decoder layer
dec_pre_norm
Whether to add layer_norm before self-attention in the decoder
dec_use_qkv_bias
Wether to use bias for attention layer in the decoder
embed_initializer
Initializer of the embedding layer
weight_initializer
Initializer of the weight
bias_initializer
Initializer of the bias
dtype
Data type of the weights
layout
The layout of the input + target
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def encode(self, src_data, src_valid_length):
"""Encode the source data to memory
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
Returns
-------
enc_out
- layout = 'NT'
Shape (batch_size, src_length, C_out)
- layout = 'TN'
Shape (src_length, batch_size, C_out)
"""
src_data = self.src_embed_layer(src_data)
if self.scaled_embed:
src_data = src_data * _np.sqrt(self.enc_units)
if self.pos_embed_type is not None:
if self.layout == 'NT':
src_data = src_data + self.src_pos_embed_layer(npx.arange_like(src_data, axis=1))
else:
src_data = src_data + np.expand_dims(self.src_pos_embed_layer(
npx.arange_like(src_data, axis=0)), axis=1)
enc_out = self.encoder(src_data, src_valid_length)
return enc_out | Encode the source data to memory
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
Returns
-------
enc_out
- layout = 'NT'
Shape (batch_size, src_length, C_out)
- layout = 'TN'
Shape (src_length, batch_size, C_out)
| encode | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def decode_seq(self, tgt_data, tgt_valid_length, mem_data, mem_valid_length):
"""Decode a sequence of inputs
Parameters
----------
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, src_length, C_out)
- layout = 'TN'
Shape (src_length, batch_size, C_out)
mem_valid_length :
Shape (batch_size,)
Returns
-------
dec_out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
"""
tgt_data = self.tgt_embed_layer(tgt_data)
if self.scaled_embed:
tgt_data = tgt_data * _np.sqrt(self.dec_units)
if self.pos_embed_type is not None:
if self.layout == 'NT':
tgt_data = tgt_data + self.tgt_pos_embed_layer(
npx.arange_like(tgt_data, axis=1))
else:
tgt_data = tgt_data + np.expand_dims(self.tgt_pos_embed_layer(
npx.arange_like(tgt_data, axis=0)), axis=1)
dec_out = self.decoder(tgt_data, tgt_valid_length, mem_data, mem_valid_length)
return dec_out | Decode a sequence of inputs
Parameters
----------
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, src_length, C_out)
- layout = 'TN'
Shape (src_length, batch_size, C_out)
mem_valid_length :
Shape (batch_size,)
Returns
-------
dec_out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
| decode_seq | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def forward(self, src_data, src_valid_length, tgt_data, tgt_valid_length):
"""
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
"""
enc_out = self.encode(src_data, src_valid_length)
dec_out = self.decode_seq(tgt_data, tgt_valid_length, enc_out, src_valid_length)
dec_out = self.tgt_final_layer(dec_out)
return dec_out |
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
tgt_data
- layout = 'NT'
Shape (batch_size, tgt_length)
- layout = 'TN'
Shape (tgt_length, batch_size)
tgt_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, tgt_length, tgt_vocab_size)
- layout = 'TN'
Shape (tgt_length, batch_size, tgt_vocab_size)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def state_batch_axis(self) -> Tuple[int, int, int, List]:
"""Return a data structure that stores the batch axis of the internal states
of the inference model.
Returns
-------
enc_out_batch_axis
src_valid_length_batch_axis
position_batch_axis
dec_layer_batch_axis
"""
if self.model.layout == 'NT':
return 0, 0, 0, self.model.decoder.state_batch_axis
else:
return 1, 0, 0, self.model.decoder.state_batch_axis | Return a data structure that stores the batch axis of the internal states
of the inference model.
Returns
-------
enc_out_batch_axis
src_valid_length_batch_axis
position_batch_axis
dec_layer_batch_axis
| state_batch_axis | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def init_states(self, src_data, src_valid_length): # TODO(sxjscience) Revisit here, support auxiliary states?
"""Initialize the states required for incremental decoding
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
Returns
-------
enc_out
- layout = 'NT'
Shape (batch_size, src_length, C_mem)
- layout = 'TN'
Shape (src_length, batch_size, C_mem)
src_valid_length
Shape (batch_size,)
position
Shape (batch_size,)
dec_states: list
The states of the decoder
"""
if self.model.layout == 'NT':
batch_size = src_data.shape[0]
else:
batch_size = src_data.shape[1]
ctx = src_data.ctx
enc_out = self.model.encode(src_data, src_valid_length)
position = mx.np.zeros((batch_size,), dtype=np.int32, ctx=ctx)
dtype = enc_out.dtype
dec_states = self.model.decoder.init_states(batch_size, ctx, dtype)
return enc_out, src_valid_length, position, dec_states | Initialize the states required for incremental decoding
Parameters
----------
src_data
- layout = 'NT'
Shape (batch_size, src_length)
- layout = 'TN'
Shape (src_length, batch_size)
src_valid_length
Shape (batch_size,)
Returns
-------
enc_out
- layout = 'NT'
Shape (batch_size, src_length, C_mem)
- layout = 'TN'
Shape (src_length, batch_size, C_mem)
src_valid_length
Shape (batch_size,)
position
Shape (batch_size,)
dec_states: list
The states of the decoder
| init_states | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def forward(self, step_data, states):
"""
Parameters
----------
step_data
Shape (batch_size,)
states
It includes :
- layout = 'NT'
- mem_data : (batch_size, src_length, C_mem)
- mem_valid_length : (batch_size,)
- position : (batch_size,)
- dec_states : list
- layout = 'TN'
- mem_data : (src_length, batch_size, C_mem)
- mem_valid_length : (batch_size,)
- position : (batch_size,)
- dec_states : list
Returns
-------
out
Shape (batch_size, C)
new_states
Has the same structure as the states
"""
mem_data, mem_valid_length, position, dec_states = states
# 1. Get the embedding
step_data = self.model.tgt_embed_layer(step_data)
if self.model.scaled_embed:
step_data = step_data * _np.sqrt(self.model.dec_units)
if self.model.pos_embed_type is not None:
step_data = step_data + self.model.tgt_pos_embed_layer(position)
out, new_states =\
self.model.decoder.incremental_decode(step_data, dec_states,
mem_data, mem_valid_length)
out = self.model.tgt_final_layer(out)
return out, (mem_data, mem_valid_length, position + 1, new_states) |
Parameters
----------
step_data
Shape (batch_size,)
states
It includes :
- layout = 'NT'
- mem_data : (batch_size, src_length, C_mem)
- mem_valid_length : (batch_size,)
- position : (batch_size,)
- dec_states : list
- layout = 'TN'
- mem_data : (src_length, batch_size, C_mem)
- mem_valid_length : (batch_size,)
- position : (batch_size,)
- dec_states : list
Returns
-------
out
Shape (batch_size, C)
new_states
Has the same structure as the states
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer.py | Apache-2.0 |
def forward(self, data, mem, rel_positions, mask, query_r_bias, query_k_bias):
"""
Parameters
----------
data
The input data.
- layout = 'NT'
Shape (batch_size, query_length, units)
- layout = 'TN'
Shape (query_length, batch_size, units)
mem
The memory.
- layout = 'NT'
Shape (batch_size, mem_length, units)
- layout = 'TN'
Shape (mem_length, batch_size, units)
rel_positions
The relative positions between data and concat(mem, data).
Shape is (query_length, mem_length + query_length).
A positive value means that query is after the memory, i.e.,
query_location - mem_location.
mask
Mask between the query and the memory + query.
1--> will be used, 0 --> won't be used
Shape (batch_size, query_length, mem_length + query_length)
query_r_bias
The query bias for calculating the relative scores
Shape (num_heads, query_head_units)
query_k_bias
The key bias for calculating the relative scores.
Shape (num_heads, query_head_units)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, query_length, units)
- layout = 'TN'
Shape (query_length, batch_size, units)
"""
if self._layout == 'NT':
context = np.concatenate([mem, data], axis=1)
elif self._layout == 'TN':
context = np.concatenate([mem, data], axis=0)
else:
raise NotImplementedError
if self._pre_norm:
query = self.attn_query(self.layer_norm(data))
key_value = self.attn_kv(self.layer_norm(context))
key, value = np.split(key_value, 2, axis=-1)
else:
query = self.attn_query(data)
key_value = self.attn_kv(context)
key, value = np.split(key_value, 2, axis=-1)
query = npx.reshape(query, (-2, -2, self._num_heads, -1))
key = npx.reshape(key, (-2, -2, self._num_heads, -1))
value = npx.reshape(value, (-2, -2, self._num_heads, -1))
# Compute attention
rel_score = self.rel_pos_score_cell(rel_positions, query + query_r_bias)
out, _ = self.attn_cell(query + query_k_bias, key, value, mask, rel_score)
out = self.dropout_layer(out)
if self._pre_norm:
out = data + out
else:
out = self.layer_norm(data + out)
out = self.ffn(out)
return out |
Parameters
----------
data
The input data.
- layout = 'NT'
Shape (batch_size, query_length, units)
- layout = 'TN'
Shape (query_length, batch_size, units)
mem
The memory.
- layout = 'NT'
Shape (batch_size, mem_length, units)
- layout = 'TN'
Shape (mem_length, batch_size, units)
rel_positions
The relative positions between data and concat(mem, data).
Shape is (query_length, mem_length + query_length).
A positive value means that query is after the memory, i.e.,
query_location - mem_location.
mask
Mask between the query and the memory + query.
1--> will be used, 0 --> won't be used
Shape (batch_size, query_length, mem_length + query_length)
query_r_bias
The query bias for calculating the relative scores
Shape (num_heads, query_head_units)
query_k_bias
The key bias for calculating the relative scores.
Shape (num_heads, query_head_units)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, query_length, units)
- layout = 'TN'
Shape (query_length, batch_size, units)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer_xl.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py | Apache-2.0 |
def forward(self, data, mem_l, rel_positions, mask):
"""
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, query_length)
- layout = 'TN'
Shape (query_length, batch_size)
mem_l
Contains a list of memory objects, each one will contain:
- layout = 'NT'
Shape (batch_size, mem_length, C_i)
- layout = 'TN'
Shape (mem_length, batch_size, C_i)
rel_positions
The relative positions.
Shape (query_length, mem_length + query_length)
mask
Mask between the query and the memory + query.
Shape (batch_size, query_length, mem_length + query_length)
Returns
-------
out_l
Contains a list of hidden states, each will contain:
- layout = 'NT'
Shape (batch_size, query_length, C_o)
- layout = 'TN'
Shape (query_length, batch_size, C_o)
"""
query_k_bias = self.query_k_bias.data()
query_r_bias = self.query_r_bias.data()
out_l = []
out = data
for i, layer in enumerate(self.decoder_layers):
out = layer(out, mem_l[i], rel_positions, mask, query_r_bias, query_k_bias)
out_l.append(out)
return out_l |
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, query_length)
- layout = 'TN'
Shape (query_length, batch_size)
mem_l
Contains a list of memory objects, each one will contain:
- layout = 'NT'
Shape (batch_size, mem_length, C_i)
- layout = 'TN'
Shape (mem_length, batch_size, C_i)
rel_positions
The relative positions.
Shape (query_length, mem_length + query_length)
mask
Mask between the query and the memory + query.
Shape (batch_size, query_length, mem_length + query_length)
Returns
-------
out_l
Contains a list of hidden states, each will contain:
- layout = 'NT'
Shape (batch_size, query_length, C_o)
- layout = 'TN'
Shape (query_length, batch_size, C_o)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer_xl.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py | Apache-2.0 |
def init_states(self, batch_size, ctx):
"""Initialize the states
Parameters
----------
batch_size
ctx
ctx of the initialized
Returns
-------
mems
A list of memory states
- layout = 'NT'
Shape (B, T, C)
- layout = 'TN'
Shape (T, B, C)
"""
if self._layout == 'NT':
return [mx.np.zeros((batch_size, 0, self._units), ctx=ctx)
for _ in range(self._num_layers)]
elif self._layout == 'TN':
return [mx.np.zeros((0, batch_size, self._units), ctx=ctx)
for _ in range(self._num_layers)]
else:
raise NotImplementedError | Initialize the states
Parameters
----------
batch_size
ctx
ctx of the initialized
Returns
-------
mems
A list of memory states
- layout = 'NT'
Shape (B, T, C)
- layout = 'TN'
Shape (T, B, C)
| init_states | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer_xl.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py | Apache-2.0 |
def set_mem_length(self, mem_length: int):
"""
Parameters
----------
mem_length
The memory length of the model
"""
self._cfg.defrost()
self._cfg.MODEL.mem_length = mem_length
self._cfg.freeze() |
Parameters
----------
mem_length
The memory length of the model
| set_mem_length | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer_xl.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py | Apache-2.0 |
def forward(self, data, target, mem_l, rel_positions=None, data_mem_mask=None,
causal_only=False, detach_memory=True):
"""
Parameters
----------
data
The input data
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
target
The ground truth
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
mem_l
A list of memory objects
- layout = 'NT'
Shape (B, T_mem, units)
- layout = 'TN'
Shape (T_mem, B, units)
rel_positions
Shape (query_length, mem_length + query_length)
By default, we will use the following relative positions
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'in': 5, 4, 3, 2, 1, 0, -1, -2
'Gluon@@': 6, 5, 4, 3, 2, 1, 0, -1
'NLP': 7, 6, 5, 4, 3, 2, 1, 0
data_mem_mask
Shape (B, query_length, mem_length + query_length)
Here, 1 --> will be used, 0 --> won't be used.
By default, we will mask all locations that have distance > mem_length with the
current token.
Following is an example in which query_length = 3, mem_length = 4
.. code-block:: none
|------- <mem> ----------|--------- <query> ------------|
<query> ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 0, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 0, 0, 1, 1, 1, 1, 1, 0
'NLP': 0, 0, 0, 1, 1, 1, 1, 1
Also, we provide the option in which we only mask the future tokens, this is
supported by setting `causal_only` to True. However, there will be a
discrepancy between training and inference because the effecitve memory length is
longer for the later tokens in the query.
.. code-block:: none
|------- <mem> ----------|--------- <query> ------------|
<query> ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
causal_only
Whether to ignore the local masking constraint. See the flag above for more information.
detach_memory
Whether to detach the encoded memory from the graph.
Returns
-------
logits
The selected logits
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
new_mem_l
A list of the updated memory
- layout = 'NT'
Each will have shape (B, T, C)
- layout = 'TN'
Each will have shape (T, B, C)
"""
# Note that curr_mem_length will not necessarily be equal to mem_length
if self._layout == 'NT':
time_axis = 1
batch_axis = 0
elif self._layout == 'TN':
time_axis = 0
batch_axis = 1
else:
raise NotImplementedError
query_length = data.shape[time_axis]
curr_mem_length = mem_l[0].shape[time_axis]
batch_size = mem_l[0].shape[batch_axis]
ctx = data.ctx
local_attn_mask = mx.np.ones((batch_size, query_length, curr_mem_length + query_length),
dtype=np.int32, ctx=ctx)
if not causal_only:
# Generate the mask, we mask out the input outside the local self.mem_length window
local_attn_mask = mx.np.triu(mx.np.tril(local_attn_mask, curr_mem_length),
curr_mem_length - self.mem_length)
else:
local_attn_mask = mx.np.tril(local_attn_mask, curr_mem_length)
if data_mem_mask is None:
data_mem_mask = local_attn_mask
else:
data_mem_mask = data_mem_mask * local_attn_mask
if rel_positions is None:
query_ids = mx.np.arange(curr_mem_length, curr_mem_length + query_length,
dtype=np.int32, ctx=ctx)
mem_ids = mx.np.arange(0, curr_mem_length + query_length,
dtype=np.int32, ctx=ctx)
rel_positions = mx.np.expand_dims(query_ids, axis=1)\
- mx.np.expand_dims(mem_ids, axis=0)
# Get word embeddings
word_embeddings = self.word_emb(data)
word_embeddings = self.dropout_layer(word_embeddings)
out_l = self.decoder(word_embeddings, mem_l, rel_positions, data_mem_mask)
# Get the output logits
logits = self.crit(out_l[-1], target)
# Get the new memory
new_mem_l = []
for step_out, mem in zip([word_embeddings] + out_l, mem_l):
new_mem = mx.np.concatenate([mem, step_out], axis=time_axis)
if self._layout == 'NT':
new_mem = new_mem[:, -self.mem_length:]
elif self._layout == 'TN':
new_mem = new_mem[-self.mem_length:, :]
else:
raise NotImplementedError
if detach_memory:
new_mem_l.append(new_mem.detach())
else:
new_mem_l.append(new_mem)
return logits, new_mem_l |
Parameters
----------
data
The input data
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
target
The ground truth
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
mem_l
A list of memory objects
- layout = 'NT'
Shape (B, T_mem, units)
- layout = 'TN'
Shape (T_mem, B, units)
rel_positions
Shape (query_length, mem_length + query_length)
By default, we will use the following relative positions
.. code-block:: none
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'in': 5, 4, 3, 2, 1, 0, -1, -2
'Gluon@@': 6, 5, 4, 3, 2, 1, 0, -1
'NLP': 7, 6, 5, 4, 3, 2, 1, 0
data_mem_mask
Shape (B, query_length, mem_length + query_length)
Here, 1 --> will be used, 0 --> won't be used.
By default, we will mask all locations that have distance > mem_length with the
current token.
Following is an example in which query_length = 3, mem_length = 4
.. code-block:: none
|------- <mem> ----------|--------- <query> ------------|
<query> ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 0, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 0, 0, 1, 1, 1, 1, 1, 0
'NLP': 0, 0, 0, 1, 1, 1, 1, 1
Also, we provide the option in which we only mask the future tokens, this is
supported by setting `causal_only` to True. However, there will be a
discrepancy between training and inference because the effecitve memory length is
longer for the later tokens in the query.
.. code-block:: none
|------- <mem> ----------|--------- <query> ------------|
<query> ['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
causal_only
Whether to ignore the local masking constraint. See the flag above for more information.
detach_memory
Whether to detach the encoded memory from the graph.
Returns
-------
logits
The selected logits
- layout = 'NT'
Shape (B, T)
- layout = 'TN'
Shape (T, B)
new_mem_l
A list of the updated memory
- layout = 'NT'
Each will have shape (B, T, C)
- layout = 'TN'
Each will have shape (T, B, C)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer_xl.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py | Apache-2.0 |
def step_forward(self, step_data, mem_l):
"""Forward for just one step
Parameters
----------
step_data
Shape (B,)
mem_l
A list of memory objects
- layout = 'NT'
Shape (B, T_mem, units)
- layout = 'TN'
Shape (T_mem, B, units)
Returns
-------
logits
Shape (B, V)
new_mem_l
A list of memory objects
- layout = 'NT'
Shape (B, min(T_mem + 1, memory_length), C)
- layout = 'TN'
Shape (min(T_mem + 1, memory_length), B, C)
"""
batch_size = step_data.shape[0]
if self._layout == 'NT':
curr_mem_length = mem_l[0].shape[1]
elif self._layout == 'TN':
curr_mem_length = mem_l[0].shape[0]
else:
raise NotImplementedError
ctx = step_data.ctx
mask = mx.np.ones((batch_size, 1, curr_mem_length + 1), dtype=np.int32, ctx=ctx)
rel_positions = mx.np.expand_dims(mx.np.arange(curr_mem_length, -1, -1, dtype=np.int32,
ctx=ctx), axis=0)
# Word embedding shape = (B, C)
word_embeddings = self.dropout_layer(self.word_emb(step_data))
if self._layout == 'NT':
word_embeddings = mx.np.expand_dims(word_embeddings, axis=1)
elif self._layout == 'TN':
word_embeddings = mx.np.expand_dims(word_embeddings, axis=0)
else:
raise NotImplementedError
out_l = self.decoder(word_embeddings, mem_l, rel_positions, mask)
# Get logits
if self._layout == 'NT':
final_out = out_l[-1][:, 0]
elif self._layout == 'TN':
final_out = out_l[-1][0, :]
else:
raise NotImplementedError
logits = self.crit.get_logits(final_out)
# Update memory
new_mem_l = []
for step_out, mem in zip([word_embeddings] + out_l, mem_l):
if self._layout == 'NT':
new_mem = mx.np.concatenate([mem, step_out], axis=1)
new_mem = new_mem[:, -self.mem_length:]
elif self._layout == 'TN':
new_mem = mx.np.concatenate([mem, step_out], axis=0)
new_mem = new_mem[-self.mem_length:, :]
else:
raise NotImplementedError
new_mem_l.append(new_mem)
return logits, new_mem_l | Forward for just one step
Parameters
----------
step_data
Shape (B,)
mem_l
A list of memory objects
- layout = 'NT'
Shape (B, T_mem, units)
- layout = 'TN'
Shape (T_mem, B, units)
Returns
-------
logits
Shape (B, V)
new_mem_l
A list of memory objects
- layout = 'NT'
Shape (B, min(T_mem + 1, memory_length), C)
- layout = 'TN'
Shape (min(T_mem + 1, memory_length), B, C)
| step_forward | python | dmlc/gluon-nlp | src/gluonnlp/models/transformer_xl.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/transformer_xl.py | Apache-2.0 |
def get_pretrained_xlmr(model_name: str = 'fairseq_xlmr_base',
root: str = get_model_zoo_home_dir(),
load_backbone: bool = True,
load_mlm: bool = False) \
-> Tuple[CN, SentencepieceTokenizer, str, str]:
"""Get the pretrained XLM-R weights
Parameters
----------
model_name
The name of the xlmr model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The SentencepieceTokenizer
params_path
Path to the parameters
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_xlmr())
cfg_path = PRETRAINED_URL[model_name]['cfg']
if isinstance(cfg_path, CN):
cfg = cfg_path
else:
cfg = None
sp_model_path = PRETRAINED_URL[model_name]['sentencepiece.model']
params_path = PRETRAINED_URL[model_name]['params']
mlm_params_path = PRETRAINED_URL[model_name]['mlm_params']
local_paths = dict()
download_jobs = [('sentencepiece.model', sp_model_path)]
if cfg is None:
download_jobs.append(('cfg', cfg_path))
for k, path in download_jobs:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
if load_backbone:
local_params_path = download(url=get_repo_model_zoo_url() + params_path,
path=os.path.join(root, params_path),
sha1_hash=FILE_STATS[params_path])
else:
local_params_path = None
if load_mlm and mlm_params_path is not None:
local_mlm_params_path = download(url=get_repo_model_zoo_url() + mlm_params_path,
path=os.path.join(root, mlm_params_path),
sha1_hash=FILE_STATS[mlm_params_path])
else:
local_mlm_params_path = None
do_lower = True if 'lowercase' in PRETRAINED_URL[model_name]\
and PRETRAINED_URL[model_name]['lowercase'] else False
tokenizer = SentencepieceTokenizer(
model_path=local_paths['sentencepiece.model'],
lowercase=do_lower)
if cfg is None:
cfg = XLMRModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_params_path, local_mlm_params_path | Get the pretrained XLM-R weights
Parameters
----------
model_name
The name of the xlmr model.
root
The downloading root
load_backbone
Whether to load the weights of the backbone network
load_mlm
Whether to load the weights of MLM
Returns
-------
cfg
Network configuration
tokenizer
The SentencepieceTokenizer
params_path
Path to the parameters
mlm_params_path
Path to the parameter that includes both the backbone and the MLM
| get_pretrained_xlmr | python | dmlc/gluon-nlp | src/gluonnlp/models/xlmr.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/models/xlmr.py | Apache-2.0 |
def gen_self_attn_mask(data,
valid_length=None,
attn_type: str = 'full',
layout: str = 'NT'):
"""Generate the mask used for the encoder, i.e, self-attention.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data with two samples:
data =
[['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP' ],
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']]
valid_length =
[8, 6]
- attn_type = 'causal'
Each token will attend to itself + the tokens before.
It will not attend to tokens in the future.
For our example, the mask of the first sample is
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 0, 0, 0, 0, 0, 0, 0
'the': 1, 1, 0, 0, 0, 0, 0, 0
'force': 1, 1, 1, 0, 0, 0, 0, 0
'be': 1, 1, 1, 1, 0, 0, 0, 0
'with': 1, 1, 1, 1, 1, 0, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
- attn_type = 'full'
Each token will attend to both the tokens before and in the future
For our example, the mask of the first sample is
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1, 1, 1, 1, 1
'can': 1, 1, 1, 1, 1, 1, 1, 1
'now': 1, 1, 1, 1, 1, 1, 1, 1
'use': 1, 1, 1, 1, 1, 1, 1, 1
'numpy': 1, 1, 1, 1, 1, 1, 1, 1
'in': 1, 1, 1, 1, 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 1
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 1, 1, 1, 1, 1, 0, 0
'the': 1, 1, 1, 1, 1, 1, 0, 0
'force': 1, 1, 1, 1, 1, 1, 0, 0
'be': 1, 1, 1, 1, 1, 1, 0, 0
'with': 1, 1, 1, 1, 1, 1, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
Parameters
----------
data
The data.
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
attn_type
Can be 'full' or 'causal'
layout
The layout of the data
Returns
-------
mask
Shape (batch_size, seq_length, seq_length)
"""
device = data.device
if layout == 'NT':
batch_axis, time_axis = 0, 1
elif layout == 'TN':
batch_axis, time_axis = 1, 0
else:
raise NotImplementedError('Unsupported layout={}'.format(layout))
if attn_type == 'full':
if valid_length is not None:
steps = th.arange(data.shape[time_axis], device=device) # (seq_length,)
mask1 = (steps.view((1, 1, -1))
< valid_length.view((valid_length.shape[0], 1, 1)))
mask2 = (steps.view((1, -1, 1))
< valid_length.view((valid_length.shape[0], 1, 1)))
mask = mask1 * mask2
else:
seq_len_ones = th.ones((data.shape[time_axis],), device=device) # (seq_length,)
batch_ones = th.ones((data.shape[batch_axis],), device=device) # (batch_size,)
mask = batch_ones.view((-1, 1, 1)) * seq_len_ones.view((1, -1, 1))\
* seq_len_ones.view((1, 1, -1))
elif attn_type == 'causal':
steps = th.arange(data.shape[time_axis], device=device)
# mask: (seq_length, seq_length)
# batch_mask: (batch_size, seq_length)
mask = th.unsqueeze(steps, dim=0) <= th.unsqueeze(steps, dim=1)
if valid_length is not None:
batch_mask = th.unsqueeze(steps, dim=0) < th.unsqueeze(valid_length, dim=-1)
mask = mask * th.unsqueeze(batch_mask, dim=-1)
else:
batch_ones = th.ones(data.shape[batch_axis], device=device)
mask = mask * batch_ones.view((-1, 1, 1))
else:
raise NotImplementedError
return mask.type(th.bool) | Generate the mask used for the encoder, i.e, self-attention.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data with two samples:
data =
[['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP' ],
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']]
valid_length =
[8, 6]
- attn_type = 'causal'
Each token will attend to itself + the tokens before.
It will not attend to tokens in the future.
For our example, the mask of the first sample is
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 0, 0, 0, 0, 0, 0, 0
'the': 1, 1, 0, 0, 0, 0, 0, 0
'force': 1, 1, 1, 0, 0, 0, 0, 0
'be': 1, 1, 1, 1, 0, 0, 0, 0
'with': 1, 1, 1, 1, 1, 0, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
- attn_type = 'full'
Each token will attend to both the tokens before and in the future
For our example, the mask of the first sample is
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1, 1, 1, 1, 1
'can': 1, 1, 1, 1, 1, 1, 1, 1
'now': 1, 1, 1, 1, 1, 1, 1, 1
'use': 1, 1, 1, 1, 1, 1, 1, 1
'numpy': 1, 1, 1, 1, 1, 1, 1, 1
'in': 1, 1, 1, 1, 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 1
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
The mask of the second sample is
['May', 'the', 'force', 'be', 'with', 'you', '<PAD>', '<PAD>']
'May': 1, 1, 1, 1, 1, 1, 0, 0
'the': 1, 1, 1, 1, 1, 1, 0, 0
'force': 1, 1, 1, 1, 1, 1, 0, 0
'be': 1, 1, 1, 1, 1, 1, 0, 0
'with': 1, 1, 1, 1, 1, 1, 0, 0
'you': 1, 1, 1, 1, 1, 1, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
'<PAD>': 0, 0, 0, 0, 0, 0, 0, 0
Parameters
----------
data
The data.
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
attn_type
Can be 'full' or 'causal'
layout
The layout of the data
Returns
-------
mask
Shape (batch_size, seq_length, seq_length)
| gen_self_attn_mask | python | dmlc/gluon-nlp | src/gluonnlp/torch/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py | Apache-2.0 |
def gen_mem_attn_mask(mem, mem_valid_length, data, data_valid_length=None,
layout: str = 'NT'):
"""Generate the mask used for the decoder. All query slots are attended to the memory slots.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data + mem with a batch of two samples:
mem = [['I', 'can', 'now', 'use'],
['May', 'the', 'force', '<PAD>']]
mem_valid_length =
[4, 3]
data =
[['numpy', 'in', 'Gluon@@', 'NLP' ],
['be', 'with', 'you', '<PAD>']]
data_valid_length =
[4, 3]
For our example, the mask of the first sample is
['I', 'can', 'now', 'use']
'numpy': 1, 1, 1, 1
'in': 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1
'NLP': 1, 1, 1, 1
The mask of the second sample is
['be', 'with', 'you', '<PAD>']
'May': 1, 1, 1, 0
'the': 1, 1, 1, 0
'force': 1, 1, 1, 0
'<PAD>': 0, 0, 0, 0
Parameters
----------
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length :
Shape (batch_size,)
data
- layout = 'NT'
Shape (batch_size, query_length, C_data)
- layout = 'TN'
Shape (query_length, batch_size, C_data)
data_valid_length :
Shape (batch_size,)
layout
Layout of the data + mem tensor
Returns
-------
mask
Shape (batch_size, query_length, mem_length)
"""
device = mem.device
if layout == 'NT':
batch_axis, time_axis = 0, 1
elif layout == 'TN':
batch_axis, time_axis = 1, 0
else:
raise NotImplementedError('Unsupported layout={}'.format(layout))
batch_size = mem.shape[batch_axis]
mem_length = mem.shape[time_axis]
query_length = data[time_axis]
mem_steps = th.arange(mem_length, device=device) # (mem_length,)
data_steps = th.arange(data.shape[time_axis], device=device) # (query_length,)
# mem_mask will have shape (B, 1, mem_length)
mem_mask = mem_steps.view((1, 1, mem_length)) < mem_valid_length.view((batch_size, 1, 1))
if data_valid_length is not None:
# (B, query_length, 1)
data_mask = (data_steps.view((1, -1, 1))
< data_valid_length.view((batch_size, 1, 1)))
mask = mem_mask * data_mask
else:
mask = mem_mask.expand(batch_size, query_length, -1)
return mask.type(th.bool) | Generate the mask used for the decoder. All query slots are attended to the memory slots.
In our implementation, 1 --> not masked, 0 --> masked
Let's consider the data + mem with a batch of two samples:
mem = [['I', 'can', 'now', 'use'],
['May', 'the', 'force', '<PAD>']]
mem_valid_length =
[4, 3]
data =
[['numpy', 'in', 'Gluon@@', 'NLP' ],
['be', 'with', 'you', '<PAD>']]
data_valid_length =
[4, 3]
For our example, the mask of the first sample is
['I', 'can', 'now', 'use']
'numpy': 1, 1, 1, 1
'in': 1, 1, 1, 1
'Gluon@@': 1, 1, 1, 1
'NLP': 1, 1, 1, 1
The mask of the second sample is
['be', 'with', 'you', '<PAD>']
'May': 1, 1, 1, 0
'the': 1, 1, 1, 0
'force': 1, 1, 1, 0
'<PAD>': 0, 0, 0, 0
Parameters
----------
mem
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length :
Shape (batch_size,)
data
- layout = 'NT'
Shape (batch_size, query_length, C_data)
- layout = 'TN'
Shape (query_length, batch_size, C_data)
data_valid_length :
Shape (batch_size,)
layout
Layout of the data + mem tensor
Returns
-------
mask
Shape (batch_size, query_length, mem_length)
| gen_mem_attn_mask | python | dmlc/gluon-nlp | src/gluonnlp/torch/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py | Apache-2.0 |
def masked_softmax(att_score, mask, axis: int = -1):
"""Ignore the masked elements when calculating the softmax.
The mask can be broadcastable.
Parameters
----------
att_score : Symborl or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
1 --> The element is not masked
0 --> The element is masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
att_weights : Symborl or NDArray
Shape (..., length, ...)
"""
if mask is not None:
# Fill in the masked scores with a very small value
if att_score.dtype == th.float16:
att_score = att_score.masked_fill(th.logical_not(mask), -1E4)
else:
att_score = att_score.masked_fill(th.logical_not(mask), -1E18)
att_weights = th.softmax(att_score, dim=axis) * mask
else:
att_weights = th.softmax(att_score, dim=axis)
return att_weights | Ignore the masked elements when calculating the softmax.
The mask can be broadcastable.
Parameters
----------
att_score : Symborl or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
1 --> The element is not masked
0 --> The element is masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
att_weights : Symborl or NDArray
Shape (..., length, ...)
| masked_softmax | python | dmlc/gluon-nlp | src/gluonnlp/torch/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py | Apache-2.0 |
def masked_logsoftmax(att_score, mask, axis: int = -1):
"""Ignore the masked elements when calculating the softmax. The mask can be broadcastable.
Parameters
----------
att_score : Symborl or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
mask = 1 --> not masked
mask = 0 --> masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
logits : Symborl or NDArray
Shape (..., length, ...)
The masked values will be all zero
"""
if mask is not None:
# Fill in the masked scores with a very small value
inv_mask = th.logical_not(mask)
if att_score.dtype == th.float16:
att_score = att_score.masked_fill(inv_mask, -1E4)
else:
att_score = att_score.masked_fill(inv_mask, -1E18)
logits = th.log_softmax(att_score, dim=axis)
logits.masked_fill(inv_mask, float('-inf'))
else:
logits = th.log_softmax(att_score, dim=axis)
return logits | Ignore the masked elements when calculating the softmax. The mask can be broadcastable.
Parameters
----------
att_score : Symborl or NDArray
Shape (..., length, ...)
mask : Symbol or NDArray or None
Shape (..., length, ...)
mask = 1 --> not masked
mask = 0 --> masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
logits : Symborl or NDArray
Shape (..., length, ...)
The masked values will be all zero
| masked_logsoftmax | python | dmlc/gluon-nlp | src/gluonnlp/torch/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py | Apache-2.0 |
def multi_head_dot_attn(query, key, value,
mask=None,
edge_scores=None,
dropout: float = 0.0,
scaled: bool = True, normalized: bool = False,
eps: float = 1E-6,
layout: str = 'NKT',
use_einsum: bool = None, *, training: bool = True):
"""Multihead dot product attention between the query, key, value.
scaled is False, normalized is False:
D(h_q, h_k) = <h_q, h_k>
scaled is True, normalized is False:
D(h_q, h_k) = <h_q, h_k> / sqrt(dim_q)
scaled is False, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||>
scaled is True, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> / sqrt(dim_q)
If edge_scores is provided, we will calcualte the attention as
scores = D(h_q, h_k) + EdgeScore_{q, k}
Parameters
----------
query
Query. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, query_length, key_dim)
- layout is 'NTK'
Shape (batch_size, query_length, num_heads, key_dim)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads, key_dim)
key
Key. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, key_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, key_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, key_dim)
value
Value. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, value_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, value_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, value_dim)
mask
Mask between query and memory. Shape (batch_size, query_length, mem_length)
edge_scores
The edge attention score. Shape can be any shape that is broadcastable to
(batch_size, num_heads, query_length, mem_length)
dropout
Dropout rate
scaled
Whether to divide the attention weights by the sqrt of the query dimension.
This is first proposed in "[NIPS2017] Attention is all you need."::
score = <h_q, h_k> / sqrt(dim_q)
normalized
If turned on, the cosine distance is used, i.e::
score = <h_q / ||h_q||, h_k / ||h_k||>
eps
The epsilon value used in L2 normalization
layout
This stands for the layout of the attention cell. The shape of the input/output will depend
on the layout. Currently, we support 'NKT', 'NTK' and 'TNK' in which
'N' means the batch_size, 'K' means the head, and 'T' means the length dimension.
use_einsum
Whether to use einsum for the computation
Returns
-------
context_vec
- layout is 'NKT' or 'NTK'
Shape (batch_size, query_length, num_heads * value_units)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads * value_units)
additional_info
scores:
Shape (batch_size, num_head, query_length, mem_length)
attn_weight:
Shape (batch_size, num_head, query_length, mem_length)
"""
if use_einsum is None:
use_einsum = use_einsum_optimization()
# TODO(sxjscience) Profile layout
if normalized:
query = F.normalize(query, p=2, dim=-1, eps=eps)
key = F.normalize(key, p=2, dim=-1, eps=eps)
if scaled:
scale = math.sqrt(query.shape[-1])
else:
scale = None
if layout == 'NKT':
# 1. Expand the dimension of the mask:
# (B, L_query, L_mem) --> (B, 1, L_query, L_mem)
if mask is not None:
mask = th.unsqueeze(mask, dim=1)
# 2. Calculate the attention weights
# Score: (B, N, L_query, C_Q) X (B, N, L_mem, C_Q) --> (B, N, L_query, L_mem)
scores = th.matmul(query, th.transpose(key, -2, -1))
if edge_scores is not None:
scores = scores + edge_scores
attn_weights = masked_softmax(scores / scale if scale is not None else scores, mask, axis=-1)
attn_weights = th.nn.functional.dropout(attn_weights, p=dropout, training=training)
# 3. Calculate the context vector
# (B, N, L_query, L_mem) X (B, N, L_mem, C_V) --> (B, L_query, N * C_V)
if use_einsum:
context_vec = th.einsum('bnij,bnjc->binc', attn_weights, value)
else:
context_vec = th.transpose(th.matmul(attn_weights, value), 1, 2)
context_vec = th.reshape(context_vec,
(context_vec.shape[0], context_vec.shape[1], -1))
elif layout == 'NTK':
# 1. Expand the dimension of the mask:
# (B, L_query, L_mem) --> (B, 1, L_query, L_mem)
if mask is not None:
mask = th.unsqueeze(mask, dim=1)
# 2. Calculate the attention weights
# Score: (B, L_query, N, C_Q) X (B, L_mem, N, C_Q) --> (B, N, L_query, L_mem)
if use_einsum:
scores = th.einsum('binc,bjnc->bnij', query, key)
else:
scores = th.matmul(th.transpose(query, 1, 2), key.permute(0, 2, 3, 1))
if edge_scores is not None:
scores = scores + edge_scores
attn_weights = masked_softmax(scores / scale if scale is not None else scores, mask)
attn_weights = th.nn.functional.dropout(attn_weights, p=dropout, training=training)
# 3. Calculate the context vector
# (B, N, L_query, L_mem) X (B, L_mem, N, C_V) --> (B, L_query, N * C_V)
if use_einsum:
context_vec = th.einsum('bnij,bjnc->binc', attn_weights, value)
else:
context_vec = th.matmul(attn_weights, th.transpose(value, 1, 2)).permute(0, 2, 1, 3)
context_vec = th.reshape(context_vec, (context_vec.shape[0], context_vec.shape[1], -1))
elif layout == 'TNK':
# 1. Expand the dimension of the mask:
# (B, L_query, L_mem) --> (B, 1, L_query, L_mem)
if mask is not None:
mask = th.unsqueeze(mask, dim=1)
# 2. Calculate the attention weights
# Score: (L_query, B, N, C_Q) X (L_mem, B, N, C_Q) --> (B, N, L_query, L_mem)
# This layout structure can be implemented very efficiently because B, N are consecutive
# to each other. To have a clear picture of what's happening, we may consider the
# (i, j)th element of the output
# out[i, j, :, :] = query[:, i, j, :] X key[:, i, j, :].T, which is just one GEMM call
# We can thus implement the whole kernel via a single call of batched GEMM with stride.
if use_einsum:
scores = th.einsum('ibnc,jbnc->bnij', query, key)
else:
scores = th.matmul(query.permute(1, 2, 0, 3),
key.permute(1, 2, 3, 0))
if edge_scores is not None:
scores = scores + edge_scores
attn_weights = masked_softmax(scores / scale if scale is not None else scores, mask)
attn_weights = th.nn.functional.dropout(attn_weights, p=dropout, training=training)
# 3. Calculate the context vector
# (B, N, L_query, L_mem) X (L_mem, B, N, C_V) --> (L_query, B, N * C_V)
# Again, we can implement it via a single call to batched GEMM with stride.
# Shape (B, N, L_query, C_V)
if use_einsum:
context_vec = th.einsum('bnij,jbnc->ibnc', attn_weights, value)
else:
context_vec = th.matmul(attn_weights,
value.permute(1, 2, 0, 3)).permute(2, 0, 1, 3)
context_vec = th.reshape(context_vec, (context_vec.shape[0], context_vec.shape[1], -1))
else:
raise NotImplementedError('layout="{}" is not supported! '
'We only support layout = "NKT", "NTK", and "TNK".'
.format(layout))
return context_vec, [scores, attn_weights] | Multihead dot product attention between the query, key, value.
scaled is False, normalized is False:
D(h_q, h_k) = <h_q, h_k>
scaled is True, normalized is False:
D(h_q, h_k) = <h_q, h_k> / sqrt(dim_q)
scaled is False, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||>
scaled is True, normalized is True:
D(h_q, h_k) = <h_q / ||h_q||, h_k / ||h_k||> / sqrt(dim_q)
If edge_scores is provided, we will calcualte the attention as
scores = D(h_q, h_k) + EdgeScore_{q, k}
Parameters
----------
query
Query. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, query_length, key_dim)
- layout is 'NTK'
Shape (batch_size, query_length, num_heads, key_dim)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads, key_dim)
key
Key. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, key_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, key_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, key_dim)
value
Value. The shape depends on the layout
- layout is 'NKT'
Shape (batch_size, num_heads, mem_length, value_dim)
- layout is 'NTK'
Shape (batch_size, mem_length, num_heads, value_dim)
- layout is 'TNK'
Shape (mem_length, batch_size, num_heads, value_dim)
mask
Mask between query and memory. Shape (batch_size, query_length, mem_length)
edge_scores
The edge attention score. Shape can be any shape that is broadcastable to
(batch_size, num_heads, query_length, mem_length)
dropout
Dropout rate
scaled
Whether to divide the attention weights by the sqrt of the query dimension.
This is first proposed in "[NIPS2017] Attention is all you need."::
score = <h_q, h_k> / sqrt(dim_q)
normalized
If turned on, the cosine distance is used, i.e::
score = <h_q / ||h_q||, h_k / ||h_k||>
eps
The epsilon value used in L2 normalization
layout
This stands for the layout of the attention cell. The shape of the input/output will depend
on the layout. Currently, we support 'NKT', 'NTK' and 'TNK' in which
'N' means the batch_size, 'K' means the head, and 'T' means the length dimension.
use_einsum
Whether to use einsum for the computation
Returns
-------
context_vec
- layout is 'NKT' or 'NTK'
Shape (batch_size, query_length, num_heads * value_units)
- layout is 'TNK'
Shape (query_length, batch_size, num_heads * value_units)
additional_info
scores:
Shape (batch_size, num_head, query_length, mem_length)
attn_weight:
Shape (batch_size, num_head, query_length, mem_length)
| multi_head_dot_attn | python | dmlc/gluon-nlp | src/gluonnlp/torch/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/attention_cell.py | Apache-2.0 |
def relative_position_bucket(relative_position, bidirectional: bool = True, num_buckets: int = 32,
max_distance: int = 128):
"""Map the relative position to buckets.
The major difference between our implementation and
that in [mesh_tensorflow](https://github.com/tensorflow/mesh/blob/c59988047e49b4d2af05603e3170724cdbadc467/mesh_tensorflow/transformer/transformer_layers.py#L595-L637)
is that we use 'query_i - mem_j' as the (i, j)-th location in relative_position.
Thus, a positive value means that the query slot is in a later timestamp than the memory slot.
However, in mesh transformer, it is treated as `mem_i - query_j` (reversed).
The implementation uses the first half of the bucket (num_buckets // 2) to store the
exact increments in positions and the second half of the bucket
(num_buckets - num_buckets // 2) to store the bucketing values in the logarithm order.
Parameters
----------
relative_position
Shape (...,)
bidirectional
Whether we are dealing with bidirectional attention.
If it's bidirectional, we will use the first half to map the positions of the
positive shifts and the second half to map the positions of the negative shifts.
num_buckets
The number of buckets.
max_distance
Maximum distance. Positions that fall outside of 'max_distance' will be trimmed.
Returns
-------
buckets
Shape (...,).
It has the same shape as the `relative_position`. It will have int32 type.
"""
ret = 0
if bidirectional:
assert num_buckets % 2 == 0, 'When bidirectional is True, the number of buckets must be ' \
'divisible by 2.'
num_buckets //= 2
ret = ret + (relative_position < 0).astype(th.int32) * num_buckets
relative_position = th.abs(relative_position)
else:
# Clip all the negative values to 0
relative_position = th.clip(relative_position, min=0, max=None)
# Now, the relative_position is in the range [0, inf)
# Half of the buckets deal with the exact increments,
# i.e., 0, 1, 2, ..., max_exact - 1, where max_exact = num_buckets // 2
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to
# max_distance
val_if_large = max_exact + (th.log(relative_position.type(th.float32) / max_exact) /
math.log(max_distance / max_exact) *
(num_buckets - max_exact)).astype(th.int32)
val_if_large = th.minimum(val_if_large, th.tensor(num_buckets - 1))
ret = ret + th.where(is_small, relative_position, val_if_large)
return ret | Map the relative position to buckets.
The major difference between our implementation and
that in [mesh_tensorflow](https://github.com/tensorflow/mesh/blob/c59988047e49b4d2af05603e3170724cdbadc467/mesh_tensorflow/transformer/transformer_layers.py#L595-L637)
is that we use 'query_i - mem_j' as the (i, j)-th location in relative_position.
Thus, a positive value means that the query slot is in a later timestamp than the memory slot.
However, in mesh transformer, it is treated as `mem_i - query_j` (reversed).
The implementation uses the first half of the bucket (num_buckets // 2) to store the
exact increments in positions and the second half of the bucket
(num_buckets - num_buckets // 2) to store the bucketing values in the logarithm order.
Parameters
----------
relative_position
Shape (...,)
bidirectional
Whether we are dealing with bidirectional attention.
If it's bidirectional, we will use the first half to map the positions of the
positive shifts and the second half to map the positions of the negative shifts.
num_buckets
The number of buckets.
max_distance
Maximum distance. Positions that fall outside of 'max_distance' will be trimmed.
Returns
-------
buckets
Shape (...,).
It has the same shape as the `relative_position`. It will have int32 type.
| relative_position_bucket | python | dmlc/gluon-nlp | src/gluonnlp/torch/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py | Apache-2.0 |
def get_activation(act, inplace=False):
"""
Parameters
----------
act
Name of the activation
inplace
Whether to perform inplace activation
Returns
-------
activation_layer
The activation
"""
if act is None:
return lambda x: x
if isinstance(act, str):
if act == 'leaky':
# TODO(sxjscience) Add regex matching here to parse `leaky(0.1)`
return nn.LeakyReLU(0.1, inplace=inplace)
elif act == 'identity':
return nn.Identity()
elif act == 'elu':
return nn.ELU(inplace=inplace)
elif act == 'gelu':
return nn.GELU()
elif act == 'gelu(tanh)':
return GELU_TANH()
elif act == 'relu':
return nn.ReLU()
elif act == 'sigmoid':
return nn.Sigmoid()
elif act == 'tanh':
return nn.Tanh()
elif act == 'softrelu' or act == 'softplus':
return nn.Softplus()
elif act == 'softsign':
return nn.Softsign()
else:
raise NotImplementedError('act="{}" is not supported. '
'Try to include it if you can find that in '
'https://pytorch.org/docs/stable/nn.html'.format(act))
else:
return act |
Parameters
----------
act
Name of the activation
inplace
Whether to perform inplace activation
Returns
-------
activation_layer
The activation
| get_activation | python | dmlc/gluon-nlp | src/gluonnlp/torch/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py | Apache-2.0 |
def get_norm_layer(normalization: str = 'layer_norm', axis: int = -1, epsilon: float = 1e-5,
in_channels: int = 0, **kwargs):
"""Get the normalization layer based on the provided type
Parameters
----------
normalization
The type of the layer normalization from ['layer_norm']
axis
The axis to normalize the
epsilon
The epsilon of the normalization layer
in_channels
Input channel
Returns
-------
norm_layer
The layer normalization layer
"""
if isinstance(normalization, str):
if normalization == 'layer_norm':
assert in_channels > 0
assert axis == -1
norm_layer = nn.LayerNorm(normalized_shape=in_channels, eps=epsilon, **kwargs)
else:
raise NotImplementedError('normalization={} is not supported'.format(normalization))
return norm_layer
else:
raise NotImplementedError('The type of normalization must be str') | Get the normalization layer based on the provided type
Parameters
----------
normalization
The type of the layer normalization from ['layer_norm']
axis
The axis to normalize the
epsilon
The epsilon of the normalization layer
in_channels
Input channel
Returns
-------
norm_layer
The layer normalization layer
| get_norm_layer | python | dmlc/gluon-nlp | src/gluonnlp/torch/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py | Apache-2.0 |
def __init__(self, units: int = 512, hidden_size: int = 2048, activation_dropout: float = 0.0,
dropout: float = 0.1, gated_proj: bool = False, activation='relu',
normalization: str = 'layer_norm', layer_norm_eps: float = 1E-5,
pre_norm: bool = False):
"""
Parameters
----------
units
hidden_size
activation_dropout
dropout
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
"""
super().__init__()
self._pre_norm = pre_norm
self._gated_proj = gated_proj
self._kwargs = OrderedDict([
('units', units),
('hidden_size', hidden_size),
('activation_dropout', activation_dropout),
('activation', activation),
('dropout', dropout),
('normalization', normalization),
('layer_norm_eps', layer_norm_eps),
('gated_proj', gated_proj),
('pre_norm', pre_norm),
])
self.dropout_layer = nn.Dropout(dropout)
self.activation_dropout_layer = nn.Dropout(activation_dropout)
self.ffn_1 = nn.Linear(in_features=units, out_features=hidden_size, bias=True)
if self._gated_proj:
self.ffn_1_gate = nn.Linear(in_features=units, out_features=hidden_size, bias=True)
self.activation = get_activation(activation)
self.ffn_2 = nn.Linear(in_features=hidden_size, out_features=units, bias=True)
self.layer_norm = get_norm_layer(normalization=normalization, in_channels=units,
epsilon=layer_norm_eps)
self.init_weights() |
Parameters
----------
units
hidden_size
activation_dropout
dropout
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py | Apache-2.0 |
def forward(self, data):
"""
Parameters
----------
data :
Shape (B, seq_length, C_in)
Returns
-------
out :
Shape (B, seq_length, C_out)
"""
residual = data
if self._pre_norm:
data = self.layer_norm(data)
if self._gated_proj:
out = self.activation(self.ffn_1_gate(data)) * self.ffn_1(data)
else:
out = self.activation(self.ffn_1(data))
out = self.activation_dropout_layer(out)
out = self.ffn_2(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.layer_norm(out)
return out |
Parameters
----------
data :
Shape (B, seq_length, C_in)
Returns
-------
out :
Shape (B, seq_length, C_out)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py | Apache-2.0 |
def __init__(self, units: int, learnable=False):
"""Use a geometric sequence of timescales.
It is calculated as
[sin(wi x), cos(wi x), sin(wi x), cos(wi x), ...]
By default, we initialize wi to be (1 / 10000) ^ (1 / (units//2 - 1))
Parameters
----------
units
The number of units for positional embedding
learnable
Whether to make the Sinusoidal positional embedding learnable.
If it is turned on, we will also update the frequency of this layer.
See "[ICLR2021] On Position Embeddings in BERT" for more detail.
"""
super().__init__()
def _init_sinusoidal_base(units):
half_units = units // 2
val = np.log(10000) / (half_units - 1)
val = np.exp(np.arange(half_units, dtype=np.float32) * -val)
return val
default_sinusoidal_base = _init_sinusoidal_base(units)
self.freq = nn.Parameter(data=th.tensor(default_sinusoidal_base), requires_grad=learnable)
self._units = units
self._learnable = learnable | Use a geometric sequence of timescales.
It is calculated as
[sin(wi x), cos(wi x), sin(wi x), cos(wi x), ...]
By default, we initialize wi to be (1 / 10000) ^ (1 / (units//2 - 1))
Parameters
----------
units
The number of units for positional embedding
learnable
Whether to make the Sinusoidal positional embedding learnable.
If it is turned on, we will also update the frequency of this layer.
See "[ICLR2021] On Position Embeddings in BERT" for more detail.
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py | Apache-2.0 |
def forward(self, positions):
"""
Parameters
----------
positions : th.Tensor
Shape (..., )
Returns
-------
ret :
Shape (..., units)
"""
emb = positions.unsqueeze(-1) * self.freq
sin_emb = th.sin(emb)
cos_emb = th.cos(emb)
if self._units % 2 == 0:
return th.cat([sin_emb, cos_emb], dim=-1)
else:
return th.cat([sin_emb, cos_emb, th.zeros_like(positions).unsqueeze(-1)], dim=-1) |
Parameters
----------
positions : th.Tensor
Shape (..., )
Returns
-------
ret :
Shape (..., units)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/layers.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/layers.py | Apache-2.0 |
def to_torch_dtype(dtype):
"""Convert the dtype to pytorch data type
Parameters
----------
dtype
The input dtype
Returns
-------
ret
Converted dtype
"""
if isinstance(dtype, th.dtype) or dtype is None:
return dtype
dtype = np.dtype(dtype)
if dtype in numpy_to_torch_dtype_dict:
return numpy_to_torch_dtype_dict[dtype]
else:
raise KeyError(f'dtype = {dtype} is not supported for conversion') | Convert the dtype to pytorch data type
Parameters
----------
dtype
The input dtype
Returns
-------
ret
Converted dtype
| to_torch_dtype | python | dmlc/gluon-nlp | src/gluonnlp/torch/utils.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py | Apache-2.0 |
def to_numpy_dtype(dtype):
"""Convert the dtype to numpy dtype
Parameters
----------
dtype
Input dtype
Returns
-------
ret
The converted dtype
"""
if dtype is None:
return None
if dtype in torch_dtype_to_numpy_dict:
return torch_dtype_to_numpy_dict[dtype]
else:
return np.dtype(dtype) | Convert the dtype to numpy dtype
Parameters
----------
dtype
Input dtype
Returns
-------
ret
The converted dtype
| to_numpy_dtype | python | dmlc/gluon-nlp | src/gluonnlp/torch/utils.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py | Apache-2.0 |
def share_parameters(source, target):
"""Share parameters recursively from source model to target model.
For example, if you want ``dense1`` to share ``dense0``'s weights, you can do::
dense0 = nn.Linear(20)
dense1 = nn.Linear(20)
share_parameters(dense0, dense)
which equals to
dense1.weight = dense0.weight
dense1.bias = dense0.bias
Parameters
----------
source : nn.Module
target : nn.Module
"""
def _named_members(module, get_members_fn, prefix='', recurse=True):
r"""Helper method for yielding various names + members of modules.
Unlike upstream torch implementation, this implementation returns
members that are known under multiple names, such as shared
parameters.
"""
modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None:
continue
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
source_names = set(n for n, p in _named_members(source, lambda m: m._parameters.items()))
target_names = set(n for n, p in _named_members(target, lambda m: m._parameters.items()))
if not source_names == target_names:
raise ValueError(
'Source and target modules do not have the same set of parameters. '
f'The following parameters are missing from target: "{source_names - target_names}"'
f'The following parameters are missing from source: "{target_names - source_names}"')
for name in source_names:
module_names = name.split('.')
weight_name = module_names.pop()
tmp_source, tmp_target = source, target
for module_name in module_names:
tmp_source = tmp_source._modules[module_name]
tmp_target = tmp_target._modules[module_name]
setattr(tmp_target, weight_name, getattr(tmp_source, weight_name)) | Share parameters recursively from source model to target model.
For example, if you want ``dense1`` to share ``dense0``'s weights, you can do::
dense0 = nn.Linear(20)
dense1 = nn.Linear(20)
share_parameters(dense0, dense)
which equals to
dense1.weight = dense0.weight
dense1.bias = dense0.bias
Parameters
----------
source : nn.Module
target : nn.Module
| share_parameters | python | dmlc/gluon-nlp | src/gluonnlp/torch/utils.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py | Apache-2.0 |
def _named_members(module, get_members_fn, prefix='', recurse=True):
r"""Helper method for yielding various names + members of modules.
Unlike upstream torch implementation, this implementation returns
members that are known under multiple names, such as shared
parameters.
"""
modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None:
continue
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v | Helper method for yielding various names + members of modules.
Unlike upstream torch implementation, this implementation returns
members that are known under multiple names, such as shared
parameters.
| _named_members | python | dmlc/gluon-nlp | src/gluonnlp/torch/utils.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py | Apache-2.0 |
def move_to(obj, device=None):
"""
Parameters
----------
obj
Nested torch object
device
The target device
Returns
-------
new_obj
The objects that have been moved to device.
"""
if th.is_tensor(obj):
return obj.to(device)
elif isinstance(obj, dict):
res = {}
for k, v in obj.items():
res[k] = move_to(v, device)
return res
elif isinstance(obj, (list, tuple)):
res = []
for v in obj:
res.append(move_to(v, device))
if isinstance(obj, tuple):
res = tuple(res)
return res
else:
raise TypeError("Invalid type for move_to") |
Parameters
----------
obj
Nested torch object
device
The target device
Returns
-------
new_obj
The objects that have been moved to device.
| move_to | python | dmlc/gluon-nlp | src/gluonnlp/torch/utils.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/utils.py | Apache-2.0 |
def _pad_arrs_to_max_length(arrs, pad_val, dtype, batch_dim=0, round_to=None):
"""Inner Implementation of the Pad batchify
Parameters
----------
arrs
List of arrays
pad_val
The padding value
dtype
The type of the tensor
batch_dim
The dimension to insert the batch dimension.
This controls how we should construct the mini-batch.
round_to
To round the size of the arrays to the closest multiply of round_to.
This helps ensure the shape of the input tensor.
Returns
-------
ret : th.Tensor
The returned tensor
"""
# First step is to convert the arrays to torch tensor
if not isinstance(arrs[0], th.Tensor):
arrs = [th.tensor(ele, dtype=dtype) for ele in arrs]
dtype = arrs[0].dtype if dtype is None else dtype
max_shape = list(arrs[0].shape)
assert 0 <= batch_dim <= arrs[0].ndim
for pad_axis in range(len(max_shape)):
curr_lengths = [ele.shape[pad_axis] for ele in arrs]
max_size = max(curr_lengths)
if round_to is not None:
max_size = round_to * math.ceil(max_size / round_to)
max_shape[pad_axis] = max_size
ret_shape = tuple(max_shape[:batch_dim]) + (len(arrs), ) + tuple(max_shape[batch_dim:])
# Construct the full output
ret = th.full(size=ret_shape, fill_value=pad_val, dtype=dtype)
for i, arr in enumerate(arrs):
slices = [slice(None) for _ in range(len(max_shape))]
for j in range(len(max_shape)):
if arr.shape[j] < max_shape[j]:
slices[j] = slice(0, arr.shape[j])
slices.insert(batch_dim, i)
ret[tuple(slices)] = arr
return ret | Inner Implementation of the Pad batchify
Parameters
----------
arrs
List of arrays
pad_val
The padding value
dtype
The type of the tensor
batch_dim
The dimension to insert the batch dimension.
This controls how we should construct the mini-batch.
round_to
To round the size of the arrays to the closest multiply of round_to.
This helps ensure the shape of the input tensor.
Returns
-------
ret : th.Tensor
The returned tensor
| _pad_arrs_to_max_length | python | dmlc/gluon-nlp | src/gluonnlp/torch/data/batchify.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py | Apache-2.0 |
def __call__(self, data):
"""Batchify the input data.
The input can be list of numpy.ndarray, list of numbers or list of
th.Tensor. The arrays will be padded to the largest dimension at `axis` and then
stacked to form the final output.
Parameters
----------
data : List[np.ndarray] or List[List[dtype]] or List[th.Tensor]
List of samples to pad and stack.
Returns
-------
batch_data: th.Tensor
Data in the minibatch.
If batch_dim = 0:
Shape (N, ...)
Otherwise, the N will be inserted to the location of the batch_dim, which will be
Shape (..., N, ...)
"""
_arr_cls = th.Tensor
if isinstance(data[0], (_arr_cls, np.ndarray, list)):
padded_arr = _pad_arrs_to_max_length(data, pad_val=self._pad_val, dtype=self._dtype,
batch_dim=self._axis, round_to=self._round_to)
return padded_arr
else:
raise NotImplementedError(
"Pad() does not support multiple items, use Group(Pad(), Pad(), ...) instead") | Batchify the input data.
The input can be list of numpy.ndarray, list of numbers or list of
th.Tensor. The arrays will be padded to the largest dimension at `axis` and then
stacked to form the final output.
Parameters
----------
data : List[np.ndarray] or List[List[dtype]] or List[th.Tensor]
List of samples to pad and stack.
Returns
-------
batch_data: th.Tensor
Data in the minibatch.
If batch_dim = 0:
Shape (N, ...)
Otherwise, the N will be inserted to the location of the batch_dim, which will be
Shape (..., N, ...)
| __call__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/data/batchify.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py | Apache-2.0 |
def _stack_arrs(arrs, batch_dim, dtype):
"""
Parameters
----------
arrs
batch_dim
The batch dimension
dtype
torch dtype
Returns
-------
stacked_arr
The resulting stacked array
"""
if isinstance(arrs[0], np.ndarray):
stacked_arr = np.stack(arrs, axis=batch_dim)
return th.as_tensor(stacked_arr, dtype=dtype)
elif isinstance(arrs[0], th.Tensor):
ret = th.stack(arrs, dim=batch_dim)
if dtype is None:
dtype = ret.dtype
if ret.dtype != dtype:
return ret.type(dtype)
else:
return ret
else:
stacked_arr = np.stack([np.array(arr) for arr in arrs], axis=batch_dim)
return th.as_tensor(stacked_arr, dtype=dtype) |
Parameters
----------
arrs
batch_dim
The batch dimension
dtype
torch dtype
Returns
-------
stacked_arr
The resulting stacked array
| _stack_arrs | python | dmlc/gluon-nlp | src/gluonnlp/torch/data/batchify.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py | Apache-2.0 |
def __call__(self, data):
"""Batchify the input data.
Parameters
----------
data : list
The samples to batchfy. Each sample should contain N attributes.
Returns
-------
ret : tuple
A tuple of length N. Contains the batchified result of each attribute in the input.
"""
assert len(data[0]) == len(self._fn),\
'The number of attributes in each data sample should contains' \
' {} elements'.format(len(self._fn))
ret = []
for i, ele_fn in enumerate(self._fn):
ret.append(ele_fn([ele[i] for ele in data]))
return tuple(ret) | Batchify the input data.
Parameters
----------
data : list
The samples to batchfy. Each sample should contain N attributes.
Returns
-------
ret : tuple
A tuple of length N. Contains the batchified result of each attribute in the input.
| __call__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/data/batchify.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py | Apache-2.0 |
def __call__(self, data: t_List[t_Dict]) -> t_Dict:
"""
Parameters
----------
data
The samples to batchify. Each sample should be a dictionary
Returns
-------
ret
The resulting dictionary that stores the merged samples.
"""
ret = dict()
for k, ele_fn in self._fn_dict.items():
ret[k] = ele_fn([ele[k] for ele in data])
return ret |
Parameters
----------
data
The samples to batchify. Each sample should be a dictionary
Returns
-------
ret
The resulting dictionary that stores the merged samples.
| __call__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/data/batchify.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py | Apache-2.0 |
def __call__(self, data: t_List[t_NamedTuple]) -> t_NamedTuple:
"""Batchify the input data.
Parameters
----------
data
The samples to batchfy. Each sample should be a namedtuple.
Returns
-------
ret
A namedtuple of length N. Contains the batchified result of each attribute in the input.
"""
if not isinstance(data[0], self._container):
raise ValueError('The samples should have the same type as the stored namedtuple.'
' data[0]={}, container={}'.format(data[0], self._container))
ret = []
for i, ele_fn in enumerate(self._fn_l):
ret.append(ele_fn([ele[i] for ele in data]))
return self._container(*ret) | Batchify the input data.
Parameters
----------
data
The samples to batchfy. Each sample should be a namedtuple.
Returns
-------
ret
A namedtuple of length N. Contains the batchified result of each attribute in the input.
| __call__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/data/batchify.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/data/batchify.py | Apache-2.0 |
def forward(self, data, valid_length):
"""
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
F
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
if self.layout == 'NT':
time_axis, batch_axis = 1, 0
else:
time_axis, batch_axis = 0, 1
# 1. Embed the data
attn_mask = gen_self_attn_mask(data, valid_length, attn_type='full', layout=self.layout)
out = data
all_encodings_outputs = []
additional_outputs = []
for layer_idx in range(self._num_layers):
layer = self.all_layers[layer_idx]
out, attention_weights = layer(out, attn_mask)
# out : [batch_size, seq_len, units] or [seq_len, batch_size, units]
# attention_weights : [batch_size, num_heads, seq_len, seq_len]
if self._output_all_encodings:
out = sequence_mask(out, valid_len=valid_length, axis=time_axis)
all_encodings_outputs.append(out)
if self._output_attention:
additional_outputs.append(attention_weights)
if not self._output_all_encodings:
# if self._output_all_encodings, SequenceMask is already applied above
out = sequence_mask(out, valid_len=valid_length, axis=time_axis)
return out, additional_outputs
else:
return all_encodings_outputs, additional_outputs |
Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
F
data
- layout = 'NT'
Shape (batch_size, seq_length, C)
- layout = 'TN'
Shape (seq_length, batch_size, C)
valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def forward(self, inputs, token_types, valid_length):
# pylint: disable=arguments-differ
"""Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (batch_size, seq_length)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length :
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_output :
This is optional. Shape (batch_size, units)
"""
if token_types is None:
token_types = th.zeros_like(inputs)
initial_embedding = self.get_initial_embedding(inputs, token_types)
prev_out = initial_embedding
outputs = []
if self._compute_layout != self._layout:
# Swap the axes if the compute_layout and layout mismatch
contextual_embeddings, additional_outputs = self.encoder(th.transpose(prev_out, 0, 1),
valid_length)
contextual_embeddings = th.transpose(contextual_embeddings, 0, 1)
else:
contextual_embeddings, additional_outputs = self.encoder(prev_out, valid_length)
outputs.append(contextual_embeddings)
if self.use_pooler:
pooled_out = self.apply_pooling(contextual_embeddings)
outputs.append(pooled_out)
return tuple(outputs) if len(outputs) > 1 else outputs[0] | Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (batch_size, seq_length)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length :
The valid length of each sequence
Shape (batch_size,)
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_output :
This is optional. Shape (batch_size, units)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def get_initial_embedding(self, inputs, token_types=None):
"""Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If None, it will be initialized as all zero
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_emb)
- layout = 'TN'
Shape (seq_length, batch_size, C_emb)
"""
if self.layout == 'NT':
time_axis, batch_axis = 1, 0
else:
time_axis, batch_axis = 0, 1
embedding = self.word_embed(inputs)
if token_types is None:
token_types = th.zeros_like(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = embedding + type_embedding
if self.pos_embed_type is not None:
positional_embedding = self.token_pos_embed(
th.arange(end=inputs.shape[time_axis], device=inputs.device))
positional_embedding = th.unsqueeze(positional_embedding, dim=batch_axis)
embedding = embedding + positional_embedding
# Extra layer normalization plus dropout
embedding = self.embed_layer_norm(embedding)
embedding = self.embed_dropout(embedding)
return embedding | Get the initial token embeddings that considers the token type and positional embeddings
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If None, it will be initialized as all zero
Returns
-------
embedding
The initial embedding that will be fed into the encoder
- layout = 'NT'
Shape (batch_size, seq_length, C_emb)
- layout = 'TN'
Shape (seq_length, batch_size, C_emb)
| get_initial_embedding | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def apply_pooling(self, sequence):
"""Generate the representation given the inputs.
This is used for pre-training or fine-tuning a bert model.
Get the first token of the whole sequence which is [CLS]
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
return:
Shape (batch_size, units)
"""
if self.layout == 'NT':
outputs = sequence[:, 0, :]
else:
outputs = sequence[0, :, :]
return th.tanh(self.pooler(outputs)) | Generate the representation given the inputs.
This is used for pre-training or fine-tuning a bert model.
Get the first token of the whole sequence which is [CLS]
sequence
- layout = 'NT'
Shape (batch_size, sequence_length, units)
- layout = 'TN'
Shape (sequence_length, batch_size, units)
return:
Shape (batch_size, units)
| apply_pooling | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def from_cfg(cls, cfg, use_pooler=True) -> 'BertModel':
"""
Parameters
----------
cfg
Configuration
use_pooler
Whether to output the pooled feature
Returns
-------
ret
The constructed BertModel
"""
cfg = BertModel.get_cfg().clone_merge(cfg)
assert cfg.VERSION == 1, 'Wrong version!'
return cls(vocab_size=cfg.MODEL.vocab_size, units=cfg.MODEL.units,
hidden_size=cfg.MODEL.hidden_size, num_layers=cfg.MODEL.num_layers,
num_heads=cfg.MODEL.num_heads, max_length=cfg.MODEL.max_length,
hidden_dropout_prob=cfg.MODEL.hidden_dropout_prob,
attention_dropout_prob=cfg.MODEL.attention_dropout_prob,
num_token_types=cfg.MODEL.num_token_types,
pos_embed_type=cfg.MODEL.pos_embed_type, activation=cfg.MODEL.activation,
layer_norm_eps=cfg.MODEL.layer_norm_eps, use_pooler=use_pooler,
layout=cfg.MODEL.layout, compute_layout=cfg.MODEL.compute_layout) |
Parameters
----------
cfg
Configuration
use_pooler
Whether to output the pooled feature
Returns
-------
ret
The constructed BertModel
| from_cfg | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def forward(self, inputs, token_types, valid_length, masked_positions):
"""Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length :
The valid length of each sequence
Shape (batch_size,)
masked_positions :
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units)
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
if self.layout == 'NT':
mlm_features = contextual_embeddings[
th.arange(contextual_embeddings.shape[0]).unsqueeze(1), masked_positions]
else:
contextual_embeddings_t = th.transpose(contextual_embeddings, 0, 1)
mlm_features = contextual_embeddings_t[
th.arange(contextual_embeddings_t.shape[0]).unsqueeze(1), masked_positions]
mlm_scores = self.mlm_decoder(mlm_features)
return contextual_embeddings, pooled_out, mlm_scores | Getting the scores of the masked positions.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
valid_length :
The valid length of each sequence
Shape (batch_size,)
masked_positions :
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units)
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def __init__(self, backbone_cfg):
"""
Parameters
----------
backbone_cfg
The cfg of the backbone model
"""
super().__init__()
self.backbone_model = BertModel.from_cfg(backbone_cfg)
# Construct nsp_classifier for next sentence prediction
self.nsp_classifier = th.nn.Linear(out_features=2, in_features=self.backbone_model.units)
self.mlm_decoder = th.nn.Sequential(
th.nn.Linear(out_features=self.backbone_model.units,
in_features=self.backbone_model.units),
get_activation(self.backbone_model.activation),
th.nn.LayerNorm(self.backbone_model.units, eps=self.backbone_model.layer_norm_eps),
th.nn.Linear(out_features=self.backbone_model.vocab_size,
in_features=self.backbone_model.units))
# TODO such weight sharing not supported in torchscript
self.mlm_decoder[-1].weight = self.backbone_model.word_embed.weight |
Parameters
----------
backbone_cfg
The cfg of the backbone model
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def forward(self, inputs, token_types, valid_length, masked_positions):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score :
Shape (batch_size, 2)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
"""
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
nsp_score = self.nsp_classifier(pooled_out)
if self.layout == 'NT':
mlm_features = contextual_embeddings[
th.arange(contextual_embeddings.shape[0]).unsqueeze(1), masked_positions]
else:
mlm_features = th.transpose(contextual_embeddings, 0,
1)[th.arange(contextual_embeddings.shape[1]).unsqueeze(1),
masked_positions]
mlm_scores = self.mlm_decoder(mlm_features)
return contextual_embeddings, pooled_out, nsp_score, mlm_scores | Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence
Shape (batch_size, num_masked_positions).
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
nsp_score :
Shape (batch_size, 2)
mlm_scores :
Shape (batch_size, num_masked_positions, vocab_size)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def __init__(self, backbone_cfg):
"""
Parameters
----------
backbone_cfg
The cfg of the backbone model
"""
super().__init__()
self.backbone_model = BertModel.from_cfg(backbone_cfg)
self.quickthought = th.nn.Sequential(
th.nn.Linear(out_features=self.backbone_model.units,
in_features=self.backbone_model.units),
get_activation(self.backbone_model.activation),
th.nn.LayerNorm(self.backbone_model.units, eps=self.backbone_model.layer_norm_eps))
self.mlm_decoder = th.nn.Sequential(
th.nn.Linear(out_features=self.backbone_model.units,
in_features=self.backbone_model.units),
get_activation(self.backbone_model.activation),
th.nn.LayerNorm(self.backbone_model.units, eps=self.backbone_model.layer_norm_eps),
th.nn.Linear(out_features=self.backbone_model.vocab_size,
in_features=self.backbone_model.units))
# TODO such weight sharing not supported in torchscript
self.mlm_decoder[-1].weight = self.backbone_model.word_embed.weight |
Parameters
----------
backbone_cfg
The cfg of the backbone model
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def forward(self, inputs, token_types, valid_length, masked_positions):
"""Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence with respect to flattened batch
Shape (N, ) for N masked positions across whole batch.
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (N, vocab_size)
"""
assert len(inputs) % 2 == 0, 'Model expects QuickThought paired inputs'
contextual_embeddings, pooled_out = self.backbone_model(inputs, token_types, valid_length)
if self.layout == 'NT':
mlm_features = contextual_embeddings.flatten(0, 1)[masked_positions]
else:
mlm_features = th.transpose(contextual_embeddings, 0, 1).flatten(0, 1)[masked_positions]
mlm_scores = self.mlm_decoder(mlm_features)
qt_embeddings = self.quickthought(pooled_out)
qt_similarity = self._cosine_similarity(qt_embeddings[:len(inputs) // 2],
qt_embeddings[len(inputs) // 2:])
return contextual_embeddings, pooled_out, mlm_scores, qt_similarity | Generate the representation given the inputs.
This is used in training or fine-tuning a bert model.
Parameters
----------
inputs
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
token_types
- layout = 'NT'
Shape (batch_size, seq_length)
- layout = 'TN'
Shape (seq_length, batch_size)
If the inputs contain two sequences, we will set different token types for the first
sentence and the second sentence.
valid_length
The valid length of each sequence
Shape (batch_size,)
masked_positions
The masked position of the sequence with respect to flattened batch
Shape (N, ) for N masked positions across whole batch.
Returns
-------
contextual_embedding
- layout = 'NT'
Shape (batch_size, seq_length, units).
- layout = 'TN'
Shape (seq_length, batch_size, units).
pooled_out
Shape (batch_size, units)
mlm_scores :
Shape (N, vocab_size)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/bert.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/bert.py | Apache-2.0 |
def __init__(self, units: int = 512, hidden_size: int = 2048, num_heads: int = 8,
attention_dropout_prob: float = 0.1, hidden_dropout_prob: float = 0.1,
activation_dropout_prob: float = 0.0, layer_norm_eps: float = 1e-12,
pre_norm: bool = False, use_qkv_bias: bool = True, activation: str = 'relu',
layout='NT'):
"""
Parameters
----------
units
hidden_size
num_heads
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
layer_norm_eps
pre_norm
Whether to attach the normalization layer before attention layer
If pre_norm:
data -> norm(data) -> attn -> res(+data) -> ffn
Else:
data -> attn -> norm(res(+data)) -> ffn
use_qkv_bias
Whether to use bias for self attention
activation
The activation
layout
The layout
"""
super().__init__()
self._units = units
self._hidden_size = hidden_size
self._num_heads = num_heads
self._attention_dropout_prob = attention_dropout_prob
self._hidden_dropout_prob = hidden_dropout_prob
self._activation_dropout_prob = activation_dropout_prob
self._pre_norm = pre_norm
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
assert self._units % self._num_heads == 0, 'units must be divisive by the number of heads'
self.dropout_layer = nn.Dropout(hidden_dropout_prob)
self.attn_qkv = nn.Linear(out_features=3 * units, in_features=units, bias=use_qkv_bias)
self.attention_proj = nn.Linear(out_features=units, in_features=units, bias=True)
attention_layout = 'NTK' if self._layout == 'NT' else 'TNK'
self.attention_cell = \
MultiHeadAttentionCell(
query_units=self._units,
num_heads=self._num_heads,
attention_dropout=self._attention_dropout_prob,
scaled=True,
layout=attention_layout
)
self.layer_norm = nn.LayerNorm(eps=layer_norm_eps, normalized_shape=units)
self.ffn = PositionwiseFFN(units=units, hidden_size=hidden_size,
dropout=hidden_dropout_prob,
activation_dropout=activation_dropout_prob,
layer_norm_eps=layer_norm_eps, activation=activation,
pre_norm=pre_norm) |
Parameters
----------
units
hidden_size
num_heads
attention_dropout_prob
hidden_dropout_prob
activation_dropout_prob
layer_norm_eps
pre_norm
Whether to attach the normalization layer before attention layer
If pre_norm:
data -> norm(data) -> attn -> res(+data) -> ffn
Else:
data -> attn -> norm(res(+data)) -> ffn
use_qkv_bias
Whether to use bias for self attention
activation
The activation
layout
The layout
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py | Apache-2.0 |
def forward(self, data, attn_mask):
"""
Parameters
----------
data :
If layout == 'NT'
Shape (batch_size, seq_length, C_in)
Else
Shape (seq_length, batch_size, C_in)
attn_mask :
Shape (batch_size, seq_length, seq_length)
Returns
-------
out :
If layout == 'NT'
Shape (batch_size, seq_length, C_out)
Else
Shape (seq_length, batch_size, C_out)
attn_weight :
Shape (batch_size, seq_length, seq_length)
"""
residual = data
if self._pre_norm:
data = self.layer_norm(data)
query, key, value = th.split(self.attn_qkv(data), self._units, dim=-1)
query = th.reshape(query, query.shape[:2] + (self._num_heads, -1))
key = th.reshape(key, key.shape[:2] + (self._num_heads, -1))
value = th.reshape(value, value.shape[:2] + (self._num_heads, -1))
out, [_, attn_weight] = self.attention_cell(query, key, value, attn_mask)
out = self.attention_proj(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.layer_norm(out)
out = self.ffn(out)
return out, attn_weight |
Parameters
----------
data :
If layout == 'NT'
Shape (batch_size, seq_length, C_in)
Else
Shape (seq_length, batch_size, C_in)
attn_mask :
Shape (batch_size, seq_length, seq_length)
Returns
-------
out :
If layout == 'NT'
Shape (batch_size, seq_length, C_out)
Else
Shape (seq_length, batch_size, C_out)
attn_weight :
Shape (batch_size, seq_length, seq_length)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py | Apache-2.0 |
def __init__(self, units: int = 512, mem_units: Optional[int] = None, hidden_size: int = 2048,
num_heads: int = 8, activation_dropout: float = 0.0, dropout: float = 0.1,
attention_dropout: float = 0.1, layer_norm_eps: float = 1E-5,
activation: str = 'relu', gated_proj: bool = False, pre_norm: bool = False,
use_qkv_bias: bool = True, layout='NT'):
"""
Parameters
----------
units
mem_units
The number of units in the memory. By default, it is initialized to be the
same as the units.
hidden_size
num_heads
activation_dropout
dropout
attention_dropout
layer_norm_eps
activation
gated_proj
pre_norm
Whether to apply normalization before the attention layer
use_qkv_bias
Whether to use bias for both self attention and contextual attention
layout
Layout of the input
"""
super().__init__()
self._units = units
if mem_units is None:
mem_units = units
self._mem_units = mem_units
self._pre_norm = pre_norm
self._num_heads = num_heads
self._attention_dropout = attention_dropout
self._layout = layout
assert layout in ['TN', 'NT'], 'Invalid layout received = {}. ' \
'Only "TN" and "NT" are accepted!'.format(layout)
attention_layout = 'NTK' if layout == 'NT' else 'TNK'
self.dropout_layer = nn.Dropout(dropout)
if units % num_heads:
raise ValueError('In Transformer, units should be divided exactly by the number of '
'heads. Received units={}, num_heads={}'.format(units, num_heads))
self.attn_in_qkv = nn.Linear(out_features=3 * units, in_features=units, bias=use_qkv_bias)
self.self_attention = MultiHeadAttentionCell(query_units=units, num_heads=num_heads,
attention_dropout=self._attention_dropout,
layout=attention_layout)
self.proj_in = nn.Linear(out_features=units, in_features=units, bias=True)
self.attn_inter_q = nn.Linear(out_features=units, in_features=units, bias=use_qkv_bias)
self.attn_inter_k = nn.Linear(out_features=units, in_features=mem_units, bias=use_qkv_bias)
self.attn_inter_v = nn.Linear(out_features=units, in_features=mem_units, bias=use_qkv_bias)
self.inter_attention = MultiHeadAttentionCell(query_units=units, num_heads=num_heads,
attention_dropout=self._attention_dropout,
layout=attention_layout)
self.proj_inter = nn.Linear(in_features=units, out_features=units, bias=True)
self.ln_in = nn.LayerNorm(eps=layer_norm_eps, normalized_shape=units)
self.ln_inter = nn.LayerNorm(eps=layer_norm_eps, normalized_shape=units)
self.ffn = PositionwiseFFN(units=units, hidden_size=hidden_size, dropout=dropout,
activation_dropout=activation_dropout,
layer_norm_eps=layer_norm_eps, activation=activation,
gated_proj=gated_proj, pre_norm=pre_norm)
self.init_weights() |
Parameters
----------
units
mem_units
The number of units in the memory. By default, it is initialized to be the
same as the units.
hidden_size
num_heads
activation_dropout
dropout
attention_dropout
layer_norm_eps
activation
gated_proj
pre_norm
Whether to apply normalization before the attention layer
use_qkv_bias
Whether to use bias for both self attention and contextual attention
layout
Layout of the input
| __init__ | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py | Apache-2.0 |
def forward(self, data, mem, self_causal_mask, mem_attn_mask):
"""
Parameters
----------
data :
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
mem :
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
self_causal_mask :
Shape (batch_size, seq_length, seq_length)
Mask for the causal self-attention.
self_causal_mask[i, j, :] masks the elements that token `j` attends to.
To understand the self-causal attention mask, we can look at the following example:
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
mem_attn_mask :
Shape (batch_size, seq_length, mem_length)
Mask between the decoding input and the memory.
['numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1
'can': 1, 1, 1, 1
'now': 1, 1, 1, 1
'use': 1, 1, 1, 1
Returns
-------
out :
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Get the causal self-attention value
residual = data
if self._pre_norm:
data = self.ln_in(data)
self_query, self_key, self_value = th.split(self.attn_in_qkv(data), self._units, dim=-1)
out, [_, self_attn_weight] = self.self_attention(
self_query.reshape((self_query.shape[0], self_query.shape[1], self._num_heads, -1)),
self_key.reshape((self_key.shape[0], self_key.shape[1], self._num_heads, -1)),
self_value.reshape((self_value.shape[0], self_value.shape[1], self._num_heads, -1)),
self_causal_mask)
out = self.proj_in(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.ln_in(out)
# 2. Attend to the contextual memory
data = out
residual = data
if self._pre_norm:
data = self.ln_inter(data)
out, [_, context_attn_weight] = self.inter_attention(
th.reshape(self.attn_inter_q(data),
(data.shape[0], data.shape[1], self._num_heads, -1)),
th.reshape(self.attn_inter_k(mem), (mem.shape[0], mem.shape[1], self._num_heads, -1)),
th.reshape(self.attn_inter_v(mem), (mem.shape[0], mem.shape[1], self._num_heads, -1)),
mem_attn_mask)
out = self.proj_inter(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.ln_inter(out)
# 3. Encode the output via an FFN layer
out = self.ffn(out)
return out |
Parameters
----------
data :
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
mem :
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
self_causal_mask :
Shape (batch_size, seq_length, seq_length)
Mask for the causal self-attention.
self_causal_mask[i, j, :] masks the elements that token `j` attends to.
To understand the self-causal attention mask, we can look at the following example:
['I', 'can', 'now', 'use', 'numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 0, 0, 0, 0, 0, 0, 0
'can': 1, 1, 0, 0, 0, 0, 0, 0
'now': 1, 1, 1, 0, 0, 0, 0, 0
'use': 1, 1, 1, 1, 0, 0, 0, 0
'numpy': 1, 1, 1, 1, 1, 0, 0, 0
'in': 1, 1, 1, 1, 1, 1, 0, 0
'Gluon@@': 1, 1, 1, 1, 1, 1, 1, 0
'NLP': 1, 1, 1, 1, 1, 1, 1, 1
mem_attn_mask :
Shape (batch_size, seq_length, mem_length)
Mask between the decoding input and the memory.
['numpy', 'in', 'Gluon@@', 'NLP']
'I': 1, 1, 1, 1
'can': 1, 1, 1, 1
'now': 1, 1, 1, 1
'use': 1, 1, 1, 1
Returns
-------
out :
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py | Apache-2.0 |
def init_states(self, batch_size, device=None, dtype='float32'):
"""Initialize the states required for incremental decoding
Parameters
----------
batch_size
device
dtype
Returns
-------
init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
"""
dtype = to_torch_dtype(dtype)
if self.layout == 'NT':
init_key = th.zeros(
size=(batch_size, 0, self._num_heads, self._units // self._num_heads),
device=device, dtype=dtype)
init_value = th.zeros(
size=(batch_size, 0, self._num_heads, self._units // self._num_heads),
device=device, dtype=dtype)
else:
init_key = th.zeros(
size=(0, batch_size, self._num_heads, self._units // self._num_heads),
device=device, dtype=dtype)
init_value = th.zeros(
size=(0, batch_size, self._num_heads, self._units // self._num_heads),
device=device, dtype=dtype)
return init_key, init_value | Initialize the states required for incremental decoding
Parameters
----------
batch_size
device
dtype
Returns
-------
init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
| init_states | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py | Apache-2.0 |
def incremental_decode(self, data, states, mem, mem_valid_length, mem_attn_mask=None):
"""Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contains
1. layout = 'NT':
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT':
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
mem_attn_mask
The attention mask between data and the memory
Has shape (batch_size, 1, mem_length)
Returns
-------
out
Shape (batch_size, C_out)
updated_states
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (batch_size, prev_seq_length + 1, num_heads, C_value)
"""
batch_size = data.shape[0]
if self.layout == 'NT':
time_axis = 1
else:
time_axis = 0
data = data.unsqueeze(time_axis)
residual = data
if self._pre_norm:
data = self.ln_in(data)
# Shape (B, prev_L, #Head, C_K), (B, prev_L, #Head, C_V)
# or (prev_L, B, #Head, C_K), (prev_L, B, #Head, C_V)
prev_key, prev_value = states
if mem_attn_mask is None:
mem_attn_mask = gen_mem_attn_mask(mem, mem_valid_length, data, None, layout=self.layout)
# 1. Get the causal self-attention value, we need to attend to both the current data
# and the previous stored key/values
# Shape (B, 1, 3 * num_heads * C_key)
# or (1, B, 3 * num_heads * C_key)
step_qkv = self.attn_in_qkv(data)
step_query, step_key, step_value = th.split(step_qkv, self._units, dim=-1)
step_query = th.reshape(
step_query, shape=(step_query.shape[0], step_query.shape[1], self._num_heads, -1))
step_key = th.reshape(step_key,
shape=(step_key.shape[0], step_key.shape[1], self._num_heads, -1))
step_value = th.reshape(
step_value, shape=(step_value.shape[0], step_value.shape[1], self._num_heads, -1))
new_key = th.cat([prev_key, step_key], dim=time_axis)
new_value = th.cat([prev_value, step_value], dim=time_axis)
out, [_, attn_weight] = self.self_attention(step_query, new_key, new_value, None)
out = self.proj_in(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.ln_in(out)
# 2. Attend to the contextual memory
data = out
residual = data
if self._pre_norm:
data = self.ln_inter(data)
out, _ = self.inter_attention(
th.reshape(self.attn_inter_q(data),
shape=(data.shape[0], data.shape[1], self._num_heads, -1)),
th.reshape(self.attn_inter_k(mem),
shape=(mem.shape[0], mem.shape[1], self._num_heads, -1)),
th.reshape(self.attn_inter_v(mem),
shape=(mem.shape[0], mem.shape[1], self._num_heads, -1)), mem_attn_mask)
out = self.proj_inter(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.ln_inter(out)
# 3. Encode the output via an FFN layer
out = self.ffn(out)
out = th.reshape(out, shape=(batch_size, -1))
return out, (new_key, new_value) | Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contains
1. layout = 'NT':
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT':
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
mem_attn_mask
The attention mask between data and the memory
Has shape (batch_size, 1, mem_length)
Returns
-------
out
Shape (batch_size, C_out)
updated_states
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (batch_size, prev_seq_length + 1, num_heads, C_value)
| incremental_decode | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py | Apache-2.0 |
def forward(self, data, valid_length, mem_data, mem_valid_length):
"""Run forward
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
"""
# 1. Embed the data
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
self_causal_mask = gen_self_attn_mask(data, valid_length, attn_type='causal',
layout=self._layout)
mem_attn_mask = gen_mem_attn_mask(mem_data, mem_valid_length, data, valid_length,
layout=self._layout)
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out = layer(out, mem_data, self_causal_mask, mem_attn_mask)
if self._pre_norm:
out = self.ln_final(out)
return out | Run forward
Parameters
----------
data
- layout = 'NT'
Shape (batch_size, seq_length, C_in)
- layout = 'TN'
Shape (seq_length, batch_size, C_in)
valid_length
Shape (batch_size,)
mem_data
- layout = 'NT'
Shape (batch_size, mem_length, C_mem)
- layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Shape (batch_size,)
Returns
-------
out
- layout = 'NT'
Shape (batch_size, seq_length, C_out)
- layout = 'TN'
Shape (seq_length, batch_size, C_out)
| forward | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py | Apache-2.0 |
def init_states(self, batch_size, device=None, dtype='float32'):
"""Initialize the states required for incremental decoding
Parameters
----------
batch_size
The batch size
device
The device
dtype
The data type of the states
Returns
-------
states
A list of states, each includes:
- init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
- init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
"""
states = []
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
states.append(layer.init_states(batch_size=batch_size, device=device, dtype=dtype))
return states | Initialize the states required for incremental decoding
Parameters
----------
batch_size
The batch size
device
The device
dtype
The data type of the states
Returns
-------
states
A list of states, each includes:
- init_key
- layout = 'NT'
Shape (batch_size, 0, N, C_key)
- layout = 'TN'
Shape (0, batch_size, N, C_key)
- init_value :
- layout = 'NT'
Shape (batch_size, 0, N, C_value)
- layout = 'TN'
Shape (0, batch_size, N, C_value)
| init_states | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py | Apache-2.0 |
def incremental_decode(self, data, states, mem, mem_valid_length):
"""Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contain a list of
1. layout = 'NT'
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
Returns
-------
out
Shape (batch_size, C_out)
new_states
The updated states, contain a list of
1. layout = 'NT'
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
2. layout = 'TN'
- new_key
Shape (prev_seq_length + 1, batch_size, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
"""
# 1. Embed the data
out = self.dropout_layer(data)
if self._data_norm:
out = self.ln_data(out)
time_axis = 0 if self.layout == 'TN' else 1
mem_length = mem.shape[time_axis]
# Generate the mem_attn_mask
time_steps = th.arange(mem_length, device=data.device) # (mem_length,)
mem_attn_mask = time_steps.reshape((1, 1, -1)) < mem_valid_length.reshape((-1, 1, 1))
new_states = []
for i in range(self.num_layers):
if self.recurrent:
layer = self.layers[0]
else:
layer = self.layers[i]
out, new_state = layer.incremental_decode(out, states[i], mem, mem_valid_length,
mem_attn_mask)
new_states.append(new_state)
if self._pre_norm:
out = self.ln_final(out)
return out, new_states | Incrementally generate the output given the decoder input.
Parameters
----------
data
Shape (batch_size, C_in)
states
The previous states, contain a list of
1. layout = 'NT'
- prev_multi_key
Shape (batch_size, prev_seq_length, num_heads, C_key)
- prev_multi_value
Shape (batch_size, prev_seq_length, num_heads, C_value)
2. layout = 'TN'
- prev_multi_key
Shape (prev_seq_length, batch_size, num_heads, C_key)
- prev_multi_value
Shape (prev_seq_length, batch_size, num_heads, C_value)
mem
The memory
1. layout = 'NT'
Shape (batch_size, mem_length, C_mem)
2. layout = 'TN'
Shape (mem_length, batch_size, C_mem)
mem_valid_length
Valid length of the memory
Shape (batch_size,)
Returns
-------
out
Shape (batch_size, C_out)
new_states
The updated states, contain a list of
1. layout = 'NT'
- new_key
Shape (batch_size, prev_seq_length + 1, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
2. layout = 'TN'
- new_key
Shape (prev_seq_length + 1, batch_size, num_heads, C_key)
- new_value
Shape (prev_seq_length + 1, batch_size, num_heads, C_value)
| incremental_decode | python | dmlc/gluon-nlp | src/gluonnlp/torch/models/transformer.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/models/transformer.py | Apache-2.0 |
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
# create lists for multi-tensor apply
g_16, q_16, p_16, m_16, v_16 = [], [], [], [], []
g_32, q_32, p_32, m_32, v_32 = [], [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
'FusedLANS does not support sparse gradients, please consider SparseAdam instead'
)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Buffer for scaled grad
state['scaled_grad'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
q_16.append(state['scaled_grad'])
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
q_32.append(state['scaled_grad'])
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedLAMB only support fp16 and fp32.')
if (len(g_16) > 0):
multi_tensor_applier(self.multi_tensor_lans, self._dummy_overflow_buf,
[g_16, q_16, p_16, m_16, v_16], group['lr'], beta1, beta2,
group['eps'], group['step'], bias_correction,
group['weight_decay'], grad_averaging, self.adam_w_mode,
group['normalize_grad'])
if (len(g_32) > 0):
multi_tensor_applier(self.multi_tensor_lans, self._dummy_overflow_buf,
[g_32, q_32, p_32, m_32, v_32], group['lr'], beta1, beta2,
group['eps'], group['step'], bias_correction,
group['weight_decay'], grad_averaging, self.adam_w_mode,
group['normalize_grad'])
return loss | Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
| step | python | dmlc/gluon-nlp | src/gluonnlp/torch/optimizers/fused_lans.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/optimizers/fused_lans.py | Apache-2.0 |
def get_warmup_linear_const_decay_poly_schedule(optimizer, total_steps, warmup_ratio=0.002,
const_ratio=0., degree=1.0, last_epoch=-1):
"""Create a schedule with a learning rate that decreases linearly from the
initial lr set in the optimizer to 0, after a warmup period during which it
increases linearly from 0 to the initial lr set in the optimizer and a
constant period.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
total_steps (:obj:`int`):
The total number of training steps.
warmup_ratio (:obj:`float`):
The number of steps for the warmup phase.
constant_ratio (:obj:`float`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(global_step: int):
x = global_step / total_steps
if warmup_ratio == 0.0:
return 1.0
elif x < warmup_ratio:
return x / warmup_ratio
elif x < warmup_ratio + const_ratio:
return 1.0
return ((1.0 - x) / (1.0 - warmup_ratio - const_ratio))**degree
return LambdaLR(optimizer, lr_lambda, last_epoch) | Create a schedule with a learning rate that decreases linearly from the
initial lr set in the optimizer to 0, after a warmup period during which it
increases linearly from 0 to the initial lr set in the optimizer and a
constant period.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
total_steps (:obj:`int`):
The total number of training steps.
warmup_ratio (:obj:`float`):
The number of steps for the warmup phase.
constant_ratio (:obj:`float`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
| get_warmup_linear_const_decay_poly_schedule | python | dmlc/gluon-nlp | src/gluonnlp/torch/optimizers/schedules.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/torch/optimizers/schedules.py | Apache-2.0 |
def clone_merge(self, cfg_filename_or_other_cfg):
"""Create a new cfg by cloning and merging with the given cfg
Parameters
----------
cfg_filename_or_other_cfg
Returns
-------
"""
ret = self.clone()
if isinstance(cfg_filename_or_other_cfg, str):
ret.merge_from_file(cfg_filename_or_other_cfg)
return ret
elif isinstance(cfg_filename_or_other_cfg, CfgNode):
ret.merge_from_other_cfg(cfg_filename_or_other_cfg)
return ret
elif cfg_filename_or_other_cfg is None:
return ret
else:
raise TypeError('Type of config path is not supported!') | Create a new cfg by cloning and merging with the given cfg
Parameters
----------
cfg_filename_or_other_cfg
Returns
-------
| clone_merge | python | dmlc/gluon-nlp | src/gluonnlp/utils/config.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/config.py | Apache-2.0 |
def glob(url, separator=','):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards.
Input may also include multiple patterns, separated by separator.
Parameters
----------
url : str
The name of the files
separator : str, default is ','
The separator in url to allow multiple patterns in the input
"""
patterns = [url] if separator is None else url.split(separator)
result = []
for pattern in patterns:
result.extend(_glob.glob(os.path.expanduser(pattern.strip())))
return result | Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards.
Input may also include multiple patterns, separated by separator.
Parameters
----------
url : str
The name of the files
separator : str, default is ','
The separator in url to allow multiple patterns in the input
| glob | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def file_line_number(path: str) -> int:
"""
Parameters
----------
path
The path to calculate the number of lines in a file.
Returns
-------
ret
The number of lines
"""
ret = 0
with open(path, 'rb') as f:
for _ in f:
ret += 1
return ret |
Parameters
----------
path
The path to calculate the number of lines in a file.
Returns
-------
ret
The number of lines
| file_line_number | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def md5sum(filename):
"""Calculate the md5sum of a file
Parameters
----------
filename
Name of the file
Returns
-------
ret
The md5sum
"""
with open(filename, mode='rb') as f:
d = hashlib.md5()
for buf in iter(functools.partial(f.read, 1024*100), b''):
d.update(buf)
return d.hexdigest() | Calculate the md5sum of a file
Parameters
----------
filename
Name of the file
Returns
-------
ret
The md5sum
| md5sum | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def sha1sum(filename):
"""Calculate the sha1sum of a file
Parameters
----------
filename
Name of the file
Returns
-------
ret
The sha1sum
"""
with open(filename, mode='rb') as f:
d = hashlib.sha1()
for buf in iter(functools.partial(f.read, 1024*100), b''):
d.update(buf)
return d.hexdigest() | Calculate the sha1sum of a file
Parameters
----------
filename
Name of the file
Returns
-------
ret
The sha1sum
| sha1sum | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def logging_config(folder: Optional[str] = None,
name: Optional[str] = None,
logger: logging.Logger = logging.root,
level: int = logging.INFO,
console_level: int = logging.INFO,
console: bool = True,
overwrite_handler: bool = False) -> str:
"""Config the logging module. It will set the logger to save to the specified file path.
Parameters
----------
folder
The folder to save the log
name
Name of the saved
logger
The logger
level
Logging level
console_level
Logging level of the console log
console
Whether to also log to console
overwrite_handler
Whether to overwrite the existing handlers in the logger
Returns
-------
folder
The folder to save the log file.
"""
if name is None:
name = inspect.stack()[-1][1].split('.')[0]
if folder is None:
folder = os.path.join(os.getcwd(), name)
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
need_file_handler = True
need_console_handler = True
# Check all loggers.
if overwrite_handler:
logger.handlers = []
else:
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
need_console_handler = False
logpath = os.path.join(folder, name + ".log")
print("All Logs will be saved to {}".format(logpath))
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if need_file_handler:
logfile = logging.FileHandler(logpath)
logfile.setLevel(level)
logfile.setFormatter(formatter)
logger.addHandler(logfile)
if console and need_console_handler:
# Initialze the console logging
logconsole = logging.StreamHandler()
logconsole.setLevel(console_level)
logconsole.setFormatter(formatter)
logger.addHandler(logconsole)
return folder | Config the logging module. It will set the logger to save to the specified file path.
Parameters
----------
folder
The folder to save the log
name
Name of the saved
logger
The logger
level
Logging level
console_level
Logging level of the console log
console
Whether to also log to console
overwrite_handler
Whether to overwrite the existing handlers in the logger
Returns
-------
folder
The folder to save the log file.
| logging_config | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.