python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from collections import OrderedDict
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from ..utils import set_seed
class MMDataset(Dataset):
"""
A generic multi-modal dataset.
Args:
`meta_processor`: a meta processor,
handling loading meta data and return video_id and text_id.
`video_processor`: a video processor,
handling e.g., decoding, loading .np files.
`text_processor`: a text processor,
handling e.g., tokenization.
`aligner`: combine the video and text feature
as one training example.
"""
def __init__(
self,
meta_processor,
video_processor,
text_processor,
align_processor,
):
self.split = meta_processor.split
self.meta_processor = meta_processor
self.video_processor = video_processor
self.text_processor = text_processor
self.align_processor = align_processor
def __len__(self):
return len(self.meta_processor)
def __getitem__(self, idx):
if self.split == "test":
set_seed(idx)
video_id, text_id = self.meta_processor[idx]
video_feature = self.video_processor(video_id)
text_feature = self.text_processor(text_id)
output = self.align_processor(video_id, video_feature, text_feature)
# TODO (huxu): the following is for debug purpose.
output.update({"idx": idx})
return output
def collater(self, samples):
"""This collator is deprecated.
set self.collator = MMDataset.collater.
see collator in FairseqMMDataset.
"""
if len(samples) == 0:
return {}
if isinstance(samples[0], dict):
batch = OrderedDict()
for key in samples[0]:
if samples[0][key] is not None:
batch[key] = default_collate(
[sample[key] for sample in samples])
# if torch.is_tensor(batch[key]):
# print(key, batch[key].size())
# else:
# print(key, len(batch[key]))
return batch
else:
return default_collate(samples)
def print_example(self, output):
print("[one example]", output["video_id"])
if (
hasattr(self.align_processor, "subsampling")
and self.align_processor.subsampling is not None
and self.align_processor.subsampling > 1
):
for key in output:
if torch.is_tensor(output[key]):
output[key] = output[key][0]
# search tokenizer to translate ids back.
tokenizer = None
if hasattr(self.text_processor, "tokenizer"):
tokenizer = self.text_processor.tokenizer
elif hasattr(self.align_processor, "tokenizer"):
tokenizer = self.align_processor.tokenizer
if tokenizer is not None:
caps = output["caps"].tolist()
if isinstance(caps[0], list):
caps = caps[0]
print("caps", tokenizer.decode(caps))
print("caps", tokenizer.convert_ids_to_tokens(caps))
for key, value in output.items():
if torch.is_tensor(value):
if len(value.size()) >= 3: # attention_mask.
print(key, value.size())
print(key, "first", value[0, :, :])
print(key, "last", value[-1, :, :])
else:
print(key, value)
print("[end of one example]")
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/datasets/mmdataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import numpy as np
import torch
from .shardedtensor import *
from .load_config import *
def set_seed(seed=43211):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch.backends.cudnn.enabled:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def get_world_size():
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def get_local_rank():
return torch.distributed.get_rank() \
if torch.distributed.is_initialized() else 0
def print_on_rank0(func):
local_rank = get_local_rank()
if local_rank == 0:
print("[INFO]", func)
class RetriMeter(object):
"""
Statistics on whether retrieval yields a better pair.
"""
def __init__(self, freq=1024):
self.freq = freq
self.total = 0
self.replace = 0
self.updates = 0
def __call__(self, data):
if isinstance(data, np.ndarray):
self.replace += data.shape[0] - int((data[:, 0] == -1).sum())
self.total += data.shape[0]
elif torch.is_tensor(data):
self.replace += int(data.sum())
self.total += data.size(0)
else:
raise ValueError("unsupported RetriMeter data type.", type(data))
self.updates += 1
if get_local_rank() == 0 and self.updates % self.freq == 0:
print("[INFO]", self)
def __repr__(self):
return "RetriMeter (" + str(self.replace / self.total) \
+ "/" + str(self.replace) + "/" + str(self.total) + ")"
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/utils/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import omegaconf
from omegaconf import OmegaConf
def load_config(args=None, config_file=None, overwrite_fairseq=False):
"""TODO (huxu): move fairseq overwrite to another function."""
if args is not None:
config_file = args.taskconfig
config = recursive_config(config_file)
if config.dataset.subsampling is not None:
batch_size = config.fairseq.dataset.batch_size // config.dataset.subsampling
print(
"adjusting batch_size to {} due to subsampling {}.".format(
batch_size, config.dataset.subsampling
)
)
config.fairseq.dataset.batch_size = batch_size
is_test = config.dataset.split is not None and config.dataset.split == "test"
if not is_test:
if (
config.fairseq.checkpoint is None
or config.fairseq.checkpoint.save_dir is None
):
raise ValueError("fairseq save_dir or save_path must be specified.")
save_dir = config.fairseq.checkpoint.save_dir
os.makedirs(save_dir, exist_ok=True)
if config.fairseq.common.tensorboard_logdir is not None:
tb_run_dir = suffix_rundir(
save_dir, config.fairseq.common.tensorboard_logdir
)
config.fairseq.common.tensorboard_logdir = tb_run_dir
print(
"update tensorboard_logdir as", config.fairseq.common.tensorboard_logdir
)
os.makedirs(save_dir, exist_ok=True)
OmegaConf.save(config=config, f=os.path.join(save_dir, "config.yaml"))
if overwrite_fairseq and config.fairseq is not None and args is not None:
# flatten fields.
for group in config.fairseq:
for field in config.fairseq[group]:
print("overwrite args." + field, "as", config.fairseq[group][field])
setattr(args, field, config.fairseq[group][field])
return config
def recursive_config(config_path):
"""allows for stacking of configs in any depth."""
config = OmegaConf.load(config_path)
if config.includes is not None:
includes = config.includes
config.pop("includes")
base_config = recursive_config(includes)
config = OmegaConf.merge(base_config, config)
return config
def suffix_rundir(save_dir, run_dir):
max_id = -1
for search_dir in os.listdir(save_dir):
if search_dir.startswith(run_dir):
splits = search_dir.split("_")
cur_id = int(splits[1]) if len(splits) > 1 else 0
max_id = max(max_id, cur_id)
return os.path.join(save_dir, run_dir + "_" + str(max_id + 1))
def overwrite_dir(config, replace, basedir):
for key in config:
if isinstance(config[key], str) and config[key].startswith(basedir):
config[key] = config[key].replace(basedir, replace)
if isinstance(config[key], omegaconf.dictconfig.DictConfig):
overwrite_dir(config[key], replace, basedir)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/utils/load_config.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
import numpy as np
class ShardedTensor(object):
def __init__(self, data, starts):
self.data = data
self.starts = starts
assert self.starts[0] == 0
assert self.starts[-1] == len(self.data)
assert (self.starts[1:] >= self.starts[:-1]).all()
assert (self.starts > -1).all()
@staticmethod
def from_list(xs):
starts = np.full((len(xs) + 1,), -1, dtype=np.long)
data = np.concatenate(xs, axis=0)
starts[0] = 0
for i, x in enumerate(xs):
starts[i + 1] = starts[i] + x.shape[0]
assert (starts > -1).all()
return ShardedTensor(data, starts)
def __getitem__(self, i):
return self.data[self.starts[i] : self.starts[i + 1]]
def __len__(self):
return len(self.starts) - 1
def lengths(self):
return self.starts[1:] - self.starts[:-1]
def save(self, path):
np.save(path + "_starts", self.starts)
np.save(path + "_data", self.data)
@staticmethod
def load(path, mmap_mode=None):
starts = np.load(path + "_starts.npy", mmap_mode)
data = np.load(path + "_data.npy", mmap_mode)
return ShardedTensor(data, starts)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/utils/shardedtensor.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch import nn
try:
from transformers.modeling_bert import (
BertPreTrainedModel,
BertModel,
BertEncoder,
BertPredictionHeadTransform,
)
except ImportError:
pass
from ..modules import VideoTokenMLP, MMBertEmbeddings
# --------------- fine-tuning models ---------------
class MMBertForJoint(BertPreTrainedModel):
"""A BertModel with isolated attention mask to separate modality."""
def __init__(self, config):
super().__init__(config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.init_weights()
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
separate_forward_split=None,
):
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
video_tokens = self.videomlp(input_video_embeds)
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
separate_forward_split=separate_forward_split,
)
return outputs
class MMBertForTokenClassification(BertPreTrainedModel):
"""A BertModel similar to MMJointUni, with extra wrapper layer
to be fine-tuned from other pretrained MMFusion model."""
def __init__(self, config):
super().__init__(config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# TODO(huxu): 779 is the number of classes for COIN: move to config?
self.classifier = nn.Linear(config.hidden_size, 779)
self.init_weights()
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
separate_forward_split=None,
):
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
video_tokens = self.videomlp(input_video_embeds)
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
separate_forward_split=separate_forward_split,
)
return (self.classifier(outputs[0]),)
# ------------ pre-training models ----------------
class MMBertForEncoder(BertPreTrainedModel):
"""A BertModel for Contrastive Learning."""
def __init__(self, config):
super().__init__(config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.init_weights()
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
if input_video_embeds is not None:
video_tokens = self.videomlp(input_video_embeds)
else:
video_tokens = None
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return outputs
class MMBertForMFMMLM(BertPreTrainedModel):
"""A BertModel with shared prediction head on MFM-MLM."""
def __init__(self, config):
super().__init__(config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.cls = MFMMLMHead(config)
self.hidden_size = config.hidden_size
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_frame_labels=None,
target_video_hidden_states=None,
non_masked_frame_mask=None,
masked_lm_labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
if input_video_embeds is not None:
video_tokens = self.videomlp(input_video_embeds)
else:
video_tokens = None
if target_video_hidden_states is not None:
target_video_hidden_states = self.videomlp(
target_video_hidden_states)
non_masked_frame_hidden_states = video_tokens.masked_select(
non_masked_frame_mask.unsqueeze(-1)
).view(-1, self.hidden_size)
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
mfm_scores, prediction_scores = None, None
if masked_frame_labels is not None and masked_lm_labels is not None:
# split the sequence.
text_offset = masked_frame_labels.size(1) + 1 # [CLS]
video_sequence_output = sequence_output[
:, 1:text_offset
] # remove [SEP] as not in video_label.
text_sequence_output = torch.cat(
[sequence_output[:, :1], sequence_output[:, text_offset:]],
dim=1
)
hidden_size = video_sequence_output.size(-1)
selected_video_output = video_sequence_output.masked_select(
masked_frame_labels.unsqueeze(-1)
).view(-1, hidden_size)
# only compute select tokens to training to speed up.
hidden_size = text_sequence_output.size(-1)
# masked_lm_labels = masked_lm_labels.reshape(-1)
labels_mask = masked_lm_labels != -100
selected_text_output = text_sequence_output.masked_select(
labels_mask.unsqueeze(-1)
).view(-1, hidden_size)
mfm_scores, prediction_scores = self.cls(
selected_video_output,
target_video_hidden_states,
non_masked_frame_hidden_states,
selected_text_output,
)
output = (
mfm_scores,
prediction_scores,
) + outputs
return output
class BertMFMMLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(
config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly
# resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(
self,
video_hidden_states=None,
target_video_hidden_states=None,
non_masked_frame_hidden_states=None,
text_hidden_states=None,
):
video_logits, text_logits = None, None
if video_hidden_states is not None:
video_hidden_states = self.transform(video_hidden_states)
non_masked_frame_logits = torch.mm(
video_hidden_states,
non_masked_frame_hidden_states.transpose(1, 0)
)
masked_frame_logits = torch.bmm(
video_hidden_states.unsqueeze(1),
target_video_hidden_states.unsqueeze(-1),
).squeeze(-1)
video_logits = torch.cat(
[masked_frame_logits, non_masked_frame_logits], dim=1
)
if text_hidden_states is not None:
text_hidden_states = self.transform(text_hidden_states)
text_logits = self.decoder(text_hidden_states)
return video_logits, text_logits
class MFMMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertMFMMLMPredictionHead(config)
def forward(
self,
video_hidden_states=None,
target_video_hidden_states=None,
non_masked_frame_hidden_states=None,
text_hidden_states=None,
):
video_logits, text_logits = self.predictions(
video_hidden_states,
target_video_hidden_states,
non_masked_frame_hidden_states,
text_hidden_states,
)
return video_logits, text_logits
class MMBertForMTM(MMBertForMFMMLM):
def __init__(self, config):
BertPreTrainedModel.__init__(self, config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.cls = MTMHead(config)
self.hidden_size = config.hidden_size
self.init_weights()
class BertMTMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(
config.hidden_size, config.vocab_size, bias=False)
def forward(
self,
video_hidden_states=None,
target_video_hidden_states=None,
non_masked_frame_hidden_states=None,
text_hidden_states=None,
):
non_masked_frame_hidden_states = non_masked_frame_hidden_states.transpose(1, 0)
video_logits, text_logits = None, None
if video_hidden_states is not None:
video_hidden_states = self.transform(video_hidden_states)
masked_frame_logits = torch.bmm(
video_hidden_states.unsqueeze(1),
target_video_hidden_states.unsqueeze(-1),
).squeeze(-1)
non_masked_frame_logits = torch.mm(
video_hidden_states,
non_masked_frame_hidden_states
)
video_on_vocab_logits = self.decoder(video_hidden_states)
video_logits = torch.cat([
masked_frame_logits,
non_masked_frame_logits,
video_on_vocab_logits], dim=1)
if text_hidden_states is not None:
text_hidden_states = self.transform(text_hidden_states)
# text first so label does not need to be shifted.
text_on_vocab_logits = self.decoder(text_hidden_states)
text_on_video_logits = torch.mm(
text_hidden_states,
non_masked_frame_hidden_states
)
text_logits = torch.cat([
text_on_vocab_logits,
text_on_video_logits
], dim=1)
return video_logits, text_logits
class MTMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertMTMPredictionHead(config)
def forward(
self,
video_hidden_states=None,
target_video_hidden_states=None,
non_masked_frame_hidden_states=None,
text_hidden_states=None,
):
video_logits, text_logits = self.predictions(
video_hidden_states,
target_video_hidden_states,
non_masked_frame_hidden_states,
text_hidden_states,
)
return video_logits, text_logits
class MMBertModel(BertModel):
"""MMBertModel has MMBertEmbedding to support video tokens."""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
# overwrite embedding
self.embeddings = MMBertEmbeddings(config)
self.encoder = MultiLayerAttentionMaskBertEncoder(config)
self.init_weights()
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
separate_forward_split=None,
):
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids "
"and inputs_embeds at the same time"
)
elif input_ids is not None:
if input_video_embeds is not None:
input_shape = (
input_ids.size(0),
input_ids.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (
input_ids.size(0),
input_ids.size(1),
)
elif inputs_embeds is not None:
if input_video_embeds is not None:
input_shape = (
inputs_embeds.size(0),
inputs_embeds.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (
input_ids.size(0),
input_ids.size(1),
)
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None \
else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case
# we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = \
self.get_extended_attention_mask(
attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to
# [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (
encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(
encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or
# [num_hidden_layers x num_heads]
# and head_mask is converted to shape
# [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(
head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids,
input_video_embeds,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
if separate_forward_split is not None:
split_embedding_output = \
embedding_output[:, :separate_forward_split]
split_extended_attention_mask = extended_attention_mask[
:, :, :, :separate_forward_split, :separate_forward_split
]
split_encoder_outputs = self.encoder(
split_embedding_output,
attention_mask=split_extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
assert (
len(split_encoder_outputs) <= 2
), "we do not support merge on attention for now."
encoder_outputs = []
encoder_outputs.append([split_encoder_outputs[0]])
if len(split_encoder_outputs) == 2:
encoder_outputs.append([])
for _all_hidden_states in split_encoder_outputs[1]:
encoder_outputs[-1].append([_all_hidden_states])
split_embedding_output = \
embedding_output[:, separate_forward_split:]
split_extended_attention_mask = extended_attention_mask[
:, :, :, separate_forward_split:, separate_forward_split:
]
split_encoder_outputs = self.encoder(
split_embedding_output,
attention_mask=split_extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
assert (
len(split_encoder_outputs) <= 2
), "we do not support merge on attention for now."
encoder_outputs[0].append(split_encoder_outputs[0])
encoder_outputs[0] = torch.cat(encoder_outputs[0], dim=1)
if len(split_encoder_outputs) == 2:
for layer_idx, _all_hidden_states in enumerate(
split_encoder_outputs[1]
):
encoder_outputs[1][layer_idx].append(_all_hidden_states)
encoder_outputs[1][layer_idx] = torch.cat(
encoder_outputs[1][layer_idx], dim=1
)
encoder_outputs = tuple(encoder_outputs)
else:
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
return (sequence_output, pooled_output) + encoder_outputs[1:]
def get_extended_attention_mask(self, attention_mask, input_shape, device):
"""This is borrowed from `modeling_utils.py` with the support of
multi-layer attention masks.
The second dim is expected to be number of layers.
See `MMAttentionMaskProcessor`.
Makes broadcastable attention and causal masks so that future
and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to,
zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, \
with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable
# to all heads.
if attention_mask.dim() == 4:
extended_attention_mask = attention_mask[:, :, None, :, :]
extended_attention_mask = extended_attention_mask.to(
dtype=self.dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) \
* -10000.0
return extended_attention_mask
else:
return super().get_extended_attention_mask(
attention_mask, input_shape, device
)
class MultiLayerAttentionMaskBertEncoder(BertEncoder):
"""extend BertEncoder with the capability of
multiple layers of attention mask."""
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_attention_mask = (
attention_mask[:, i, :, :, :]
if attention_mask.dim() == 5
else attention_mask
)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
layer_attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
layer_attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(
v
for v in [hidden_states, all_hidden_states, all_attentions]
if v is not None
)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/models/transformermodel.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .mmfusion import *
from .transformermodel import *
from .mmfusionnlg import *
try:
from .fairseqmmmodel import *
except ImportError:
pass
try:
from .expmmfusion import *
except ImportError:
pass
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import (
BaseFairseqModel,
register_model,
register_model_architecture
)
@register_model("mmmodel")
class FairseqMMModel(BaseFairseqModel):
"""a fairseq wrapper of model built by `task`."""
@classmethod
def build_model(cls, args, task):
return FairseqMMModel(task.mmtask.model)
def __init__(self, mmmodel):
super().__init__()
self.mmmodel = mmmodel
def forward(self, *args, **kwargs):
return self.mmmodel(*args, **kwargs)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
keys_to_delete = []
for key in state_dict:
if key not in self.state_dict():
keys_to_delete.append(key)
for key in keys_to_delete:
print("[INFO]", key, "not used anymore.")
del state_dict[key]
# copy any newly defined parameters.
for key in self.state_dict():
if key not in state_dict:
print("[INFO] adding", key)
state_dict[key] = self.state_dict()[key]
# a dummy arch, we config the model.
@register_model_architecture("mmmodel", "mmarch")
def mmarch(args):
pass
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/models/fairseqmmmodel.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch.nn import functional as F
from typing import Optional, Iterable
try:
from transformers import BertPreTrainedModel
from transformers.modeling_bert import BertOnlyMLMHead
from transformers.file_utils import ModelOutput
from transformers.modeling_outputs import CausalLMOutput
from transformers.generation_utils import (
BeamHypotheses,
top_k_top_p_filtering
)
except ImportError:
pass
from .mmfusion import MMFusion
from .transformermodel import MMBertModel
from ..modules import VideoTokenMLP
class MMFusionNLG(MMFusion):
def __init__(self, config, **kwargs):
super().__init__(config)
if config.model.max_decode_length is not None:
self.max_length = min(
config.model.max_decode_length,
config.dataset.max_len - config.dataset.max_video_len - 3
)
else:
self.max_length = \
config.dataset.max_len - config.dataset.max_video_len - 3
self.gen_param = config.gen_param if config.gen_param is not None \
else {}
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask,
video_label=None,
text_label=None,
**kwargs
):
"""use pre-trained LM header for generation."""
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
outputs = self.mm_encoder(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
masked_lm_labels=text_label,
)
return {"logits": outputs[0]}
@torch.no_grad()
def generate(
self,
caps, cmasks, vfeats, vmasks,
attention_mask=None,
bos_token_id=None,
eos_token_id=None,
**kwargs
):
# a simplified interface from
# https://huggingface.co/transformers/v3.4.0/_modules/transformers/generation_utils.html#GenerationMixin.generate
# caps now only have
# [CLS], [SEP] (for video) and [CLS] (as bos_token)
assert caps.size(1) == 3
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
output = self.mm_encoder.generate(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
max_length=self.max_length,
**self.gen_param
)
return output
class MMBertForNLG(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = MMBertModel(config)
self.videomlp = VideoTokenMLP(config)
# we do not use `BertGenerationOnlyLMHead`
# because we can reuse pretraining.
self.cls = BertOnlyMLMHead(config)
self.hidden_size = config.hidden_size
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# similar to MMBertForMFMMLM without MFM.
video_tokens = self.videomlp(input_video_embeds)
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = None
if masked_lm_labels is not None:
text_offset = input_video_embeds.size(1) + 1 # [CLS]
# recover caps format: [CLS] [SEP] text [SEP]
text_sequence_output = torch.cat(
[sequence_output[:, :1], sequence_output[:, text_offset:]],
dim=1
)
# only compute select tokens to training to speed up.
hidden_size = text_sequence_output.size(-1)
# masked_lm_labels = masked_lm_labels.reshape(-1)
labels_mask = masked_lm_labels != -100
selected_text_output = text_sequence_output.masked_select(
labels_mask.unsqueeze(-1)
).view(-1, hidden_size)
prediction_scores = self.cls(selected_text_output)
if not return_dict:
output = (
prediction_scores,
) + outputs[2:]
return output
# for generation.
text_offset = input_video_embeds.size(1) + 2 # [CLS]
text_sequence_output = sequence_output[:, text_offset:]
prediction_scores = self.cls(text_sequence_output)
return CausalLMOutput(
loss=None,
logits=prediction_scores,
)
def prepare_inputs_for_generation(
self,
input_ids,
input_video_embeds,
attention_mask=None,
token_type_ids=None,
**model_kwargs
):
# must return a dictionary.
seq_len = input_ids.size(1) + input_video_embeds.size(1)
if attention_mask is not None:
if len(attention_mask.size()) == 4:
attention_mask = attention_mask[:, :, :seq_len, :seq_len]
elif len(attention_mask.size()) == 3:
attention_mask = attention_mask[:, :seq_len, :seq_len]
else:
attention_mask = attention_mask[:, :seq_len]
if token_type_ids is not None:
token_type_ids = token_type_ids[:, :seq_len]
return {
"input_ids": input_ids,
"input_video_embeds": input_video_embeds,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[bool] = None,
num_beams: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
bad_words_ids: Optional[Iterable[int]] = None,
bos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
no_repeat_ngram_size: Optional[int] = None,
num_return_sequences: Optional[int] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_start_token_id: Optional[int] = None,
use_cache: Optional[bool] = None,
**model_kwargs
) -> torch.LongTensor:
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code
<https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__.
Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the
attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values
indicated are the default values of those config.
Most of these parameters are explained in more detail in `this blog post
<https://huggingface.co/blog/how-to-generate>`__.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes
it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`.
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
initial input_ids for the decoder of encoder-decoder type models. If :obj:`None` then only
decoder_start_token_id is passed as the first token to the decoder.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
min_length (:obj:`int`, `optional`, defaults to 10):
The minimum length of the sequence to be generated.
do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use sampling ; use greedy decoding otherwise.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
temperature (:obj:`float`, `optional`, defaults tp 1.0):
The value used to module the next token probabilities.
top_k (:obj:`int`, `optional`, defaults to 50):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (:obj:`float`, `optional`, defaults to 1.0):
If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or
higher are kept for generation.
repetition_penalty (:obj:`float`, `optional`, defaults to 1.0):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
bos_token_id (:obj:`int`, `optional`):
The id of the `beginning-of-sequence` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty.
Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in
order to encourage the model to produce longer sequences.
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
bad_words_ids(:obj:`List[int]`, `optional`):
List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for
tokens that are not masked, and 0 for masked tokens.
If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id (:obj:`int`, `optional`):
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model.
Return:
:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`:
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1),
bos_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
print(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# vocab size
if hasattr(self.config, "vocab_size"):
vocab_size = self.config.vocab_size
elif (
self.config.is_encoder_decoder
and hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "vocab_size")
):
vocab_size = self.config.decoder.vocab_size
else:
raise ValueError("either self.config.vocab_size or self.config.decoder.vocab_size needs to be defined")
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
# see if BOS token can be used for decoder_start_token_id
if bos_token_id is not None:
decoder_start_token_id = bos_token_id
elif (
hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "bos_token_id")
and self.config.decoder.bos_token_id is not None
):
decoder_start_token_id = self.config.decoder.bos_token_id
else:
raise ValueError(
"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
)
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs: ModelOutput = encoder(input_ids, attention_mask=attention_mask, return_dict=True)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
# TODO: make this a call-back function.
# input_ids=caps,
# input_video_embeds=vfeats,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
input_video_embeds = model_kwargs.pop("input_video_embeds", None)
token_type_ids = model_kwargs.pop("token_type_ids", None)
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len)
input_video_embeds_len, input_video_embeds_hidden = input_video_embeds.size(1), input_video_embeds.size(2)
input_video_embeds = input_video_embeds.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_video_embeds_len, input_video_embeds_hidden)
attention_mask_from_len, attention_mask_to_len = attention_mask.size(1), attention_mask.size(2)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, attention_mask_from_len, attention_mask_to_len
)
token_type_ids_len = token_type_ids.size(1)
token_type_ids = token_type_ids.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, token_type_ids_len
)
# contiguous ...
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
input_video_embeds = input_video_embeds.contiguous().view(
effective_batch_size * num_beams, input_video_embeds_len, input_video_embeds_hidden)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, attention_mask_from_len, attention_mask_to_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
token_type_ids = token_type_ids.contiguous().view(
effective_batch_size * num_beams, token_type_ids_len
)
model_kwargs["input_video_embeds"] = input_video_embeds
model_kwargs["token_type_ids"] = token_type_ids
if self.config.is_encoder_decoder:
device = next(self.parameters()).device
if decoder_input_ids is not None:
# give initial decoder input ids
input_ids = decoder_input_ids.repeat(effective_batch_size * num_beams, 1).to(device)
else:
# create empty decoder input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=device,
)
cur_len = input_ids.shape[-1]
assert (
batch_size == encoder_outputs.last_hidden_state.shape[0]
), f"expected encoder_outputs.last_hidden_state to have 1st dimension bs={batch_size}, got {encoder_outputs.last_hidden_state.shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select(
0, expanded_batch_idxs
)
# save encoder_outputs in `model_kwargs`
model_kwargs["encoder_outputs"] = encoder_outputs
else:
cur_len = input_ids.shape[-1]
assert (
cur_len < max_length
), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`"
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
attention_mask=attention_mask,
use_cache=use_cache,
model_kwargs=model_kwargs,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
attention_mask=attention_mask,
use_cache=use_cache,
model_kwargs=model_kwargs,
)
return output
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
pad_token_id,
eos_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
attention_mask,
use_cache,
model_kwargs,
):
"""Generate sequences for each example with beam search."""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = None
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs
)
outputs = self(**model_inputs, return_dict=True) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs.logits[:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if "past_key_values" in outputs:
past = outputs.past_key_values
elif "mems" in outputs:
past = outputs.mems
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solution
next_token_logits = self.adjust_logits_during_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
scores = self.postprocess_next_token_scores(
scores=scores,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=num_beams,
)
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Temperature
if temperature != 1.0:
_scores = _scores / temperature
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence, add a pad token
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content, this will get added to next_batch_beam
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(),
beam_token_score.item(),
)
else:
# add next predicted token since it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# once the beam for next step is full, don't add more tokens to it.
if len(next_sent_beam) == num_beams:
break
# Check if we are done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1), "We should have added num_beams each step"
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch and update current length
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
cur_len = cur_len + 1
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
# (huxu): move out since we trim attention_mask by ourselves.
# if self.config.is_encoder_decoder is False:
# attention_mask = torch.cat(
# [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
# )
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx],
beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# prepare for adding eos
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len)
# shorter batches are padded if needed
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`pad_token_id` has to be defined"
decoded.fill_(pad_token_id)
# fill with hypotheses and eos_token_id if the latter fits in
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
return decoded
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
pad_token_id,
eos_token_id,
batch_size,
attention_mask,
use_cache,
model_kwargs,
):
"""Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
sent_lengths = input_ids.new(batch_size).fill_(max_length)
past = None
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs
)
outputs = self(**model_inputs, return_dict=True)
next_token_logits = outputs.logits[:, -1, :]
scores = self.postprocess_next_token_scores(
scores=next_token_logits,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=1,
)
# if model has past, then set the past variable to speed up decoding
if "past_key_values" in outputs:
past = outputs.past_key_values
elif "mems" in outputs:
past = outputs.mems
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
next_token_logscores = top_k_top_p_filtering(scores, top_k=top_k, top_p=top_p)
# Sample
probs = F.softmax(next_token_logscores, dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# print(next_token_logits[0,next_token[0]], next_token_logits[0,eos_token_id])
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
else:
tokens_to_add = next_token
# add token and increase length by one
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
cur_len = cur_len + 1
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()
sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# extend attention_mask for new generated input if only decoder
# if self.config.is_encoder_decoder is False:
# attention_mask = torch.cat(
# [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
# )
return input_ids
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/models/mmfusionnlg.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch import nn
try:
from transformers import AutoConfig, AutoTokenizer
except ImportError:
pass
from . import transformermodel
class MMPTModel(nn.Module):
"""An e2e wrapper of inference model.
"""
@classmethod
def from_pretrained(cls, config, checkpoint="checkpoint_best.pt"):
import os
from ..utils import recursive_config
from ..tasks import Task
config = recursive_config(config)
mmtask = Task.config_task(config)
checkpoint_path = os.path.join(config.eval.save_path, checkpoint)
mmtask.build_model(checkpoint=checkpoint_path)
# TODO(huxu): make the video encoder configurable.
from ..processors.models.s3dg import S3D
video_encoder = S3D('pretrained_models/s3d_dict.npy', 512)
video_encoder.load_state_dict(
torch.load('pretrained_models/s3d_howto100m.pth'))
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name, use_fast=config.dataset.use_fast
)
from ..processors import Aligner
aligner = Aligner(config.dataset)
return (
MMPTModel(config, mmtask.model, video_encoder),
tokenizer,
aligner
)
def __init__(self, config, model, video_encoder, **kwargs):
super().__init__()
self.max_video_len = config.dataset.max_video_len
self.video_encoder = video_encoder
self.model = model
def forward(self, video_frames, caps, cmasks, return_score=False):
bsz = video_frames.size(0)
assert bsz == 1, "only bsz=1 is supported now."
seq_len = video_frames.size(1)
video_frames = video_frames.view(-1, *video_frames.size()[2:])
vfeats = self.video_encoder(video_frames.permute(0, 4, 1, 2, 3))
vfeats = vfeats['video_embedding']
vfeats = vfeats.view(bsz, seq_len, vfeats.size(-1))
padding = torch.zeros(
bsz, self.max_video_len - seq_len, vfeats.size(-1))
vfeats = torch.cat([vfeats, padding], dim=1)
vmasks = torch.cat([
torch.ones((bsz, seq_len), dtype=torch.bool),
torch.zeros((bsz, self.max_video_len - seq_len), dtype=torch.bool)
],
dim=1
)
output = self.model(caps, cmasks, vfeats, vmasks)
if return_score:
output = {"score": torch.bmm(
output["pooled_video"][:, None, :],
output["pooled_text"][:, :, None]
).squeeze(-1).squeeze(-1)}
return output
class MMFusion(nn.Module):
"""a MMPT wrapper class for MMBert style models.
TODO: move isolated mask to a subclass.
"""
def __init__(self, config, **kwargs):
super().__init__()
transformer_config = AutoConfig.from_pretrained(
config.dataset.bert_name)
self.hidden_size = transformer_config.hidden_size
self.is_train = False
if config.dataset.train_path is not None:
self.is_train = True
# 0 means no iso; 1-12 means iso up to that layer.
self.num_hidden_layers = transformer_config.num_hidden_layers
self.last_iso_layer = 0
if config.dataset.num_iso_layer is not None:
self.last_iso_layer = config.dataset.num_iso_layer - 1 + 1
if config.model.mm_encoder_cls is not None:
mm_encoder_cls = getattr(transformermodel, config.model.mm_encoder_cls)
model_config = AutoConfig.from_pretrained(config.dataset.bert_name)
model_config.max_video_len = config.dataset.max_video_len
# TODO: a general way to add parameter for a model.
model_config.use_seg_emb = config.model.use_seg_emb
self.mm_encoder = mm_encoder_cls.from_pretrained(
config.dataset.bert_name, config=model_config)
elif config.model.video_encoder_cls is not None\
and config.model.text_encoder_cls is not None:
video_encoder_cls = getattr(transformermodel, config.model.video_encoder_cls)
model_config = AutoConfig.from_pretrained(config.dataset.bert_name)
model_config.max_video_len = config.dataset.max_video_len
# TODO: make each model a set of config class.
if hasattr(model_config, "num_layers"):
model_config.num_layers = config.model.num_hidden_video_layers
else:
model_config.num_hidden_layers = config.model.num_hidden_video_layers
self.video_encoder = video_encoder_cls.from_pretrained(
config.dataset.bert_name, config=model_config)
# exact same NLP model from Huggingface.
text_encoder_cls = getattr(transformermodel, config.model.text_encoder_cls)
self.text_encoder = text_encoder_cls.from_pretrained(
config.dataset.bert_name)
else:
raise ValueError("the encoder must be either MM or two backbones.")
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
**kwargs
):
raise NotImplementedError(
"Please derive MMFusion module."
)
def _mm_on_the_fly(
self,
cmasks,
vmasks,
attention_mask
):
"""helper function for mask, seg_ids and token_type_ids."""
if attention_mask is None:
attention_mask = self._mm_attention_mask(cmasks, vmasks)
"""
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
"""
token_type_ids = torch.cat(
[
torch.zeros(
(vmasks.size(0), vmasks.size(1) + 2),
dtype=torch.long,
device=vmasks.device,
),
torch.ones(
(cmasks.size(0), cmasks.size(1) - 2),
dtype=torch.long,
device=cmasks.device,
),
],
dim=1,
)
return attention_mask, token_type_ids
def _mm_attention_mask(self, cmasks, vmasks):
assert cmasks.size(0) == vmasks.size(0), "{}, {}, {}, {}".format(
str(cmasks.size()),
str(vmasks.size()),
str(cmasks.size(0)),
str(vmasks.size(0)),
)
mm_mask = torch.cat([cmasks[:, :1], vmasks, cmasks[:, 1:]], dim=1)
if self.last_iso_layer == 0:
# hard attention mask.
return mm_mask
else:
# a gpu iso mask; 0 : num_iso_layer is isolated;
# num_iso_layer: are MM-fused.
# make an iso layer
batch_size = cmasks.size(0)
iso_mask = self._make_iso_mask(batch_size, cmasks, vmasks)
mm_mask = mm_mask[:, None, :].repeat(1, mm_mask.size(-1), 1)
iso_mm_masks = []
# hard attention mask.
iso_mask = iso_mask[:, None, :, :].repeat(
1, self.last_iso_layer, 1, 1)
iso_mm_masks.append(iso_mask)
if self.last_iso_layer < self.num_hidden_layers:
mm_mask = mm_mask[:, None, :, :].repeat(
1, self.num_hidden_layers - self.last_iso_layer, 1, 1
)
iso_mm_masks.append(mm_mask)
iso_mm_masks = torch.cat(iso_mm_masks, dim=1)
return iso_mm_masks
def _make_iso_mask(self, batch_size, cmasks, vmasks):
cls_self_mask = torch.cat(
[
torch.ones(
(batch_size, 1), dtype=torch.bool, device=cmasks.device),
torch.zeros(
(batch_size, cmasks.size(1) + vmasks.size(1) - 1),
dtype=torch.bool, device=cmasks.device)
], dim=1)
iso_video_mask = torch.cat(
[
# [CLS] is not used.
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=cmasks.device
),
vmasks,
# assume to be 1.
cmasks[:, 1:2],
# 2 means [CLS] + [SEP]
torch.zeros(
(batch_size, cmasks.size(1) - 2),
dtype=torch.bool,
device=cmasks.device,
),
],
dim=1,
)
iso_text_mask = torch.cat(
[
torch.zeros(
(batch_size, 2 + vmasks.size(1)),
dtype=torch.bool,
device=cmasks.device,
), # [CLS] is not used.
cmasks[:, 2:], # assume to be 1.
],
dim=1,
)
cls_self_mask = cls_self_mask[:, None, :]
iso_video_mask = iso_video_mask[:, None, :].repeat(
1, vmasks.size(1) + 1, 1)
iso_text_mask = iso_text_mask[:, None, :].repeat(
1, cmasks.size(1) - 2, 1)
return torch.cat([cls_self_mask, iso_video_mask, iso_text_mask], dim=1)
def _pooling_vt_layer(
self,
layered_sequence_output,
cmasks,
vmasks
):
layer_idx = self.last_iso_layer \
if self.last_iso_layer > 0 else self.num_hidden_layers
hidden_state = layered_sequence_output[layer_idx]
# also output pooled_video and pooled_text.
batch_size = cmasks.size(0)
# pool the modality.
text_offset = vmasks.size(1) + 2 # [CLS] + [SEP]
# video tokens + [SEP]
video_outputs = hidden_state[:, 1:text_offset]
video_attention_mask = torch.cat(
[
vmasks,
torch.ones(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
],
dim=1,
)
assert video_outputs.size(1) == video_attention_mask.size(1)
pooled_video = torch.sum(
video_outputs * video_attention_mask.unsqueeze(-1), dim=1
) / video_attention_mask.sum(1, keepdim=True)
# pooled_video = torch.mean(video_outputs[0], dim=1)
# text tokens + [SEP]
text_attention_mask = cmasks[:, 2:]
text_outputs = hidden_state[:, text_offset:]
assert text_outputs.size(1) == text_attention_mask.size(1)
pooled_text = torch.sum(
text_outputs * text_attention_mask.unsqueeze(-1), dim=1
) / text_attention_mask.sum(1, keepdim=True)
return pooled_video, pooled_text
class MMFusionMFMMLM(MMFusion):
"""forward function for MFM and MLM."""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
video_label=None,
text_label=None,
**kwargs
):
output_hidden_states = False if self.is_train else True
target_vfeats, non_masked_frame_mask = None, None
if video_label is not None:
target_vfeats = vfeats.masked_select(
video_label.unsqueeze(-1)).view(
-1, vfeats.size(-1)
)
# mask video token.
vfeats[video_label] = 0.0
non_masked_frame_mask = vmasks.clone()
non_masked_frame_mask[video_label] = False
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
outputs = self.mm_encoder(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
masked_frame_labels=video_label,
target_video_hidden_states=target_vfeats,
non_masked_frame_mask=non_masked_frame_mask,
masked_lm_labels=text_label,
output_hidden_states=output_hidden_states,
)
video_logits, text_logits = outputs[0], outputs[1]
if self.is_train: # return earlier for training.
return {
"video_logits": video_logits,
"text_logits": text_logits,
}
pooled_video, pooled_text = self._pooling_vt_layer(
outputs[2], cmasks, vmasks)
return {"pooled_video": pooled_video, "pooled_text": pooled_text}
class MMFusionMTM(MMFusionMFMMLM):
def __init__(self, config, **kwargs):
super().__init__(config)
"""
For reproducibility:
self.mm_encoder will be initialized then discarded.
"""
from .transformermodel import MMBertForMTM
model_config = AutoConfig.from_pretrained(config.dataset.bert_name)
model_config.max_video_len = config.dataset.max_video_len
model_config.use_seg_emb = config.model.use_seg_emb
self.mm_encoder = MMBertForMTM.from_pretrained(
config.dataset.bert_name, config=model_config)
class MMFusionShare(MMFusion):
"""A retrival wrapper using mm_encoder as both video/text backbone.
TODO: move formally.
"""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
video_label=None,
text_label=None,
output_hidden_states=False,
**kwargs
):
pooled_video = self.forward_video(
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states
)
pooled_text = self.forward_text(
caps,
cmasks,
output_hidden_states
)
return {"pooled_video": pooled_video, "pooled_text": pooled_text}
def forward_video(
self,
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states=False,
**kwargs
):
input_ids = caps[:, :2]
attention_mask = torch.cat([
cmasks[:, :1],
vmasks,
cmasks[:, 1:2]
], dim=1)
token_type_ids = torch.zeros(
(vmasks.size(0), vmasks.size(1) + 2),
dtype=torch.long,
device=vmasks.device)
outputs = self.mm_encoder(
input_ids=input_ids,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
video_outputs = outputs[0]
if output_hidden_states:
return video_outputs
batch_size = cmasks.size(0)
video_attention_mask = torch.cat(
[
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
vmasks,
torch.ones(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
],
dim=1,
)
assert video_outputs.size(1) == video_attention_mask.size(1)
video_attention_mask = video_attention_mask.type(video_outputs.dtype) \
/ video_attention_mask.sum(1, keepdim=True)
pooled_video = torch.bmm(
video_outputs.transpose(2, 1),
video_attention_mask.unsqueeze(2)
).squeeze(-1)
return pooled_video # video_outputs
def forward_text(
self,
caps,
cmasks,
output_hidden_states=False,
**kwargs
):
input_ids = torch.cat([
caps[:, :1], caps[:, 2:],
], dim=1)
attention_mask = torch.cat([
cmasks[:, :1],
cmasks[:, 2:]
], dim=1)
token_type_ids = torch.cat([
torch.zeros(
(cmasks.size(0), 1),
dtype=torch.long,
device=cmasks.device),
torch.ones(
(cmasks.size(0), cmasks.size(1) - 2),
dtype=torch.long,
device=cmasks.device)
], dim=1)
outputs = self.mm_encoder(
input_ids=input_ids,
input_video_embeds=None,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
text_outputs = outputs[0]
if output_hidden_states:
return text_outputs
batch_size = caps.size(0)
# text tokens + [SEP]
text_attention_mask = torch.cat([
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=cmasks.device),
cmasks[:, 2:]
], dim=1)
assert text_outputs.size(1) == text_attention_mask.size(1)
text_attention_mask = text_attention_mask.type(text_outputs.dtype) \
/ text_attention_mask.sum(1, keepdim=True)
pooled_text = torch.bmm(
text_outputs.transpose(2, 1),
text_attention_mask.unsqueeze(2)
).squeeze(-1)
return pooled_text # text_outputs
class MMFusionSeparate(MMFusionShare):
def forward_video(
self,
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states=False,
**kwargs
):
input_ids = caps[:, :2]
attention_mask = torch.cat([
cmasks[:, :1],
vmasks,
cmasks[:, 1:2]
], dim=1)
token_type_ids = torch.zeros(
(vmasks.size(0), vmasks.size(1) + 2),
dtype=torch.long,
device=vmasks.device)
outputs = self.video_encoder(
input_ids=input_ids,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
video_outputs = outputs[0]
if output_hidden_states:
return video_outputs
batch_size = cmasks.size(0)
video_attention_mask = torch.cat(
[
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
vmasks,
torch.ones(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
],
dim=1,
)
assert video_outputs.size(1) == video_attention_mask.size(1)
video_attention_mask = video_attention_mask.type(video_outputs.dtype) \
/ video_attention_mask.sum(1, keepdim=True)
pooled_video = torch.bmm(
video_outputs.transpose(2, 1),
video_attention_mask.unsqueeze(2)
).squeeze(-1)
return pooled_video # video_outputs
def forward_text(
self,
caps,
cmasks,
output_hidden_states=False,
**kwargs
):
input_ids = torch.cat([
caps[:, :1], caps[:, 2:],
], dim=1)
attention_mask = torch.cat([
cmasks[:, :1],
cmasks[:, 2:]
], dim=1)
# different from sharing, we use all-0 type.
token_type_ids = torch.zeros(
(cmasks.size(0), cmasks.size(1) - 1),
dtype=torch.long,
device=cmasks.device)
outputs = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
text_outputs = outputs[0]
if output_hidden_states:
return text_outputs
batch_size = caps.size(0)
# text tokens + [SEP]
text_attention_mask = torch.cat([
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=cmasks.device),
cmasks[:, 2:]
], dim=1)
assert text_outputs.size(1) == text_attention_mask.size(1)
text_attention_mask = text_attention_mask.type(text_outputs.dtype) \
/ text_attention_mask.sum(1, keepdim=True)
pooled_text = torch.bmm(
text_outputs.transpose(2, 1),
text_attention_mask.unsqueeze(2)
).squeeze(-1)
return pooled_text # text_outputs
class MMFusionJoint(MMFusion):
"""fine-tuning wrapper for retrival task."""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
video_label=None,
text_label=None,
**kwargs
):
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
output_hidden_states = True
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
separate_forward_split = (
None if self.is_train else vmasks.size(1) + 2
) # [CLS] + [SEP]
outputs = self.mm_encoder(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=output_hidden_states,
separate_forward_split=separate_forward_split,
)
pooled_video, pooled_text = self._pooling_vt_layer(
outputs[2], cmasks, vmasks)
return {"pooled_video": pooled_video, "pooled_text": pooled_text}
class MMFusionActionSegmentation(MMFusion):
"""Fine-tuning wrapper for action segmentation.
TODO: rename this for VLM.
"""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.view(-1, caps.size(-1))
cmasks = cmasks.view(-1, cmasks.size(-1))
vfeats = vfeats.view(-1, vfeats.size(2), vfeats.size(3))
vmasks = vmasks.view(-1, vmasks.size(-1))
# this may not cover all shapes of attention_mask.
attention_mask = attention_mask.view(
-1, attention_mask.size(2), attention_mask.size(3)) \
if attention_mask is not None else None
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
output_hidden_states = True
# video forwarding, text is dummy; never use attention_mask.
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
logits = self.mm_encoder(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=output_hidden_states,
)
return {"logits": logits[0][:, 1:vmasks.size(1)+1]}
class MMFusionActionLocalization(MMFusion):
"""fine-tuning model for retrival task."""
def __init__(self, config, **kwargs):
super().__init__(config)
tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name)
self.cls_token_id = tokenizer.cls_token_id
self.sep_token_id = tokenizer.sep_token_id
self.pad_token_id = tokenizer.pad_token_id
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.squeeze(0)
cmasks = cmasks.squeeze(0)
vfeats = vfeats.squeeze(0)
vmasks = vmasks.squeeze(0)
attention_mask = attention_mask.squeeze(0) if attention_mask is not None else None
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
output_hidden_states = True
# a len1 dummy video token.
dummy_vfeats = torch.zeros(
(caps.size(0), 1, vfeats.size(-1)), device=vfeats.device, dtype=vfeats.dtype)
dummy_vmasks = torch.ones(
(caps.size(0), 1), dtype=torch.bool,
device=vfeats.device)
dummy_caps = torch.LongTensor(
[[self.cls_token_id, self.sep_token_id,
self.pad_token_id, self.sep_token_id]],
).to(caps.device).repeat(vfeats.size(0), 1)
dummy_cmasks = torch.BoolTensor(
[[0, 1, 0, 1]] # pad are valid for attention.
).to(caps.device).repeat(vfeats.size(0), 1)
# video forwarding, text is dummy; never use attention_mask.
attention_mask, token_type_ids = self._mm_on_the_fly(
dummy_cmasks, vmasks, None)
outputs = self.mm_encoder(
input_ids=dummy_caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=output_hidden_states,
)
layer_idx = self.last_iso_layer \
if self.last_iso_layer > 0 else self.num_hidden_layers
video_seq = outputs[2][layer_idx][:, 1:vmasks.size(1)+1].masked_select(
vmasks.unsqueeze(-1)
).view(-1, self.hidden_size)
# text forwarding, video is dummy
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, dummy_vmasks, None)
outputs = self.mm_encoder(
input_ids=caps,
input_video_embeds=dummy_vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=output_hidden_states,
)
_, pooled_text = self._pooling_vt_layer(
outputs[2], cmasks, dummy_vmasks)
# this line is not right.
logits = torch.mm(video_seq, pooled_text.transpose(1, 0))
return {"logits": logits}
# --------------- MMFusionSeparate for end tasks ---------------
class MMFusionSeparateActionSegmentation(MMFusionSeparate):
"""Fine-tuning wrapper for action segmentation."""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.view(-1, caps.size(-1))
cmasks = cmasks.view(-1, cmasks.size(-1))
vfeats = vfeats.view(-1, vfeats.size(2), vfeats.size(3))
vmasks = vmasks.view(-1, vmasks.size(-1))
logits = self.forward_video(
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states=True
)
return {"logits": logits[:, 1:vmasks.size(1)+1]}
class MMFusionSeparateActionLocalization(MMFusionSeparate):
def __init__(self, config, **kwargs):
super().__init__(config)
tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name)
self.cls_token_id = tokenizer.cls_token_id
self.sep_token_id = tokenizer.sep_token_id
self.pad_token_id = tokenizer.pad_token_id
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.squeeze(0)
cmasks = cmasks.squeeze(0)
vfeats = vfeats.squeeze(0)
vmasks = vmasks.squeeze(0)
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
dummy_caps = torch.LongTensor(
[[self.cls_token_id, self.sep_token_id,
self.pad_token_id, self.sep_token_id]],
).to(caps.device).repeat(vfeats.size(0), 1)
dummy_cmasks = torch.BoolTensor(
[[0, 1, 0, 1]] # pad are valid for attention.
).to(caps.device).repeat(vfeats.size(0), 1)
outputs = self.forward_video(
vfeats,
vmasks,
dummy_caps,
dummy_cmasks,
output_hidden_states=True
)
video_seq = outputs[:, 1:vmasks.size(1)+1].masked_select(
vmasks.unsqueeze(-1)
).view(-1, self.hidden_size)
pooled_text = self.forward_text(
caps,
cmasks,
output_hidden_states=False
)
# this line is not right.
logits = torch.mm(video_seq, pooled_text.transpose(1, 0))
return {"logits": logits}
class MMFusionShareActionLocalization(MMFusionShare):
def __init__(self, config, **kwargs):
super().__init__(config)
tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name)
self.cls_token_id = tokenizer.cls_token_id
self.sep_token_id = tokenizer.sep_token_id
self.pad_token_id = tokenizer.pad_token_id
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.squeeze(0)
cmasks = cmasks.squeeze(0)
vfeats = vfeats.squeeze(0)
vmasks = vmasks.squeeze(0)
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
dummy_caps = torch.LongTensor(
[[self.cls_token_id, self.sep_token_id,
self.pad_token_id, self.sep_token_id]],
).to(caps.device).repeat(vfeats.size(0), 1)
dummy_cmasks = torch.BoolTensor(
[[0, 1, 0, 1]] # pad are valid for attention.
).to(caps.device).repeat(vfeats.size(0), 1)
outputs = self.forward_video(
vfeats,
vmasks,
dummy_caps,
dummy_cmasks,
output_hidden_states=True
)
video_seq = outputs[:, 1:vmasks.size(1)+1].masked_select(
vmasks.unsqueeze(-1)
).view(-1, self.hidden_size)
pooled_text = self.forward_text(
caps,
cmasks,
output_hidden_states=False
)
# this line is not right.
logits = torch.mm(video_seq, pooled_text.transpose(1, 0))
return {"logits": logits}
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/models/mmfusion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import json
import numpy as np
import torch
import pickle
import math
from tqdm import tqdm
class Predictor(object):
"""this base class is used to save predictions to disk
(and being called by a evaluator later).
Predictor has minimum support of single gpu prediction.
"""
def __init__(self, config):
self.pred_dir = None # on-the-fly eval does not save the results.
if hasattr(config, "eval") and config.eval is not None:
self.pred_dir = config.eval.save_path
os.makedirs(self.pred_dir, exist_ok=True)
def __call__(self, outputs):
"""extract the prediction and save it."""
raise NotImplementedError
def predict_loop(self, model, eval_dataloader, output_file=None):
"""on-the-fly prediction on a single gpu."""
self.full_scores = []
model.eval()
model = model.to(0)
with torch.no_grad():
for data in eval_dataloader:
data = self.to_ctx(data)
outputs = model(**data)
outputs.update(data)
self(outputs)
return self.finalize(output_file)
def finalize(self, output_file):
pass
def to_ctx(self, data, ctx=0, dtype=None):
if isinstance(data, dict):
for key in data:
if torch.is_tensor(data[key]):
if dtype is not None and data[key].dtype == torch.float32:
data[key] = data[key].to(dtype)
data[key] = data[key].to(ctx)
return data
else:
raise ValueError("non-dict type of batch is not supported yet.")
class NLGPredictor(Predictor):
"""Predicting Text from MMFusion models."""
"""TODO: make a context."""
def __init__(self, config):
super().__init__(config)
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name,
bos_token="[CLS]", eos_token="[SEP]")
self.bos_token_id = self.tokenizer.bos_token_id
self.eos_token_id = self.tokenizer.eos_token_id
def predict_loop(self, model, eval_dataloader, output_file=None):
"""TODO: refactor base classes."""
ctx = 0
outputs = {"outputs": [], "targets": [[]]}
model.eval()
model = model.to(ctx)
with torch.no_grad():
for data in tqdm(eval_dataloader):
data = self.to_ctx(data, ctx)
self(data, model, outputs)
return self.finalize(outputs, output_file)
def __call__(self, data, model, outputs):
data.update({
"bos_token_id": self.bos_token_id,
"eos_token_id": self.eos_token_id
})
output = model.generate(**data)
assert len(output) == len(data["ref"])
for idx, _output in enumerate(output):
generated_text = self.tokenizer.decode(
_output, skip_special_tokens=True)
if generated_text == "":
generated_text = "none"
outputs["outputs"].append(generated_text)
outputs["targets"][0].append(data["ref"][idx])
if random.random() < 0.001:
print("_output", _output)
print("generated_text", generated_text)
print("ref", data["ref"][idx])
def finalize(self, outputs, output_file=None):
if output_file is not None:
with open(os.path.join(
self.pred_dir, output_file + ".json"), "w") as fw:
json.dump(outputs, fw, indent=4)
return outputs
class RetrievalPredictor(Predictor):
"""generated `pooled_video` and `pooled_text`."""
def __init__(self, config):
super().__init__(config)
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name)
def predict_loop(
self,
model,
eval_dataloader,
output_file="retrieval.npy"
):
"""on-the-fly prediction on a single gpu."""
full_scores = []
texts = []
model.eval()
model = model.cuda()
with torch.no_grad():
for data in eval_dataloader:
# convert to dict.
if not isinstance(data, dict):
data = {
"caps": data[0],
"cmasks": data[1],
"vfeats": data[2],
"vmasks": data[3],
"video_id": data[4]
}
data = self.to_ctx(data)
outputs = model(**data)
outputs.update(data)
self(outputs, full_scores)
for _cap in data["caps"]:
texts.append(
self.tokenizer.decode(_cap, skip_special_tokens=True)
)
return self.finalize(full_scores, texts, output_file)
def __call__(self, sample, full_scores):
scores = self._get_pooled_outputs(sample)
self._append_scores(scores, full_scores)
def finalize(self, full_scores, texts, output_file=None):
outputs = self._aggregate_scores(full_scores)
if output_file is not None:
np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs)
return {"outputs": outputs, "texts": texts}
def _get_pooled_outputs(self, outputs):
if "pooled_video" in outputs:
return outputs["pooled_video"], outputs["pooled_text"]
else:
raise ValueError("unknown format of outputs.")
def _append_scores(self, scores, full_scores):
assert len(scores) == 2
if len(full_scores) == 0:
full_scores.append([])
full_scores.append([])
full_scores[0].append(scores[0].cpu().detach().numpy())
full_scores[1].append(scores[1].cpu().detach().numpy())
def _aggregate_scores(self, scores):
assert len(scores) == 2
video_hidden = np.concatenate(scores[0], axis=0)
text_hidden = np.concatenate(scores[1], axis=0)
# clear up.
self.full_scores = []
return np.matmul(text_hidden, video_hidden.T)
class QAPredictor(Predictor):
"""generated `pooled_video` and `pooled_text`."""
def __init__(self, config):
super().__init__(config)
"""predictor maintains scores and aggregate them."""
def predict_loop(self, model, eval_dataloader, output_file="qa.npy"):
"""on-the-fly prediction on a single gpu."""
self.full_scores = []
model.eval()
model = model.cuda()
with torch.no_grad():
for data in eval_dataloader:
# reshape ans and dup video 5 times.
v_len = data["vfeats"].size(1)
hidden_size = data["vfeats"].size(2)
data["vfeats"] = data["vfeats"].unsqueeze(1).repeat(1, 5, 1, 1).view(-1, v_len, hidden_size)
data["vmasks"] = data["vmasks"].unsqueeze(1).repeat(1, 5, 1).view(-1, v_len)
t_len = data["caps"].size(-1)
data["caps"] = data["caps"].view(-1, t_len)
data["cmasks"] = data["cmasks"].view(-1, t_len)
data = self.to_ctx(data)
outputs = model(**data)
outputs.update(data)
self(outputs)
return self.finalize(output_file)
def __call__(self, sample):
hidden_size = sample["pooled_video"].size(-1)
pooled_video = sample["pooled_video"].view(-1, 5, hidden_size)
pooled_text = sample["pooled_text"].view(-1, 5, hidden_size)
scores = torch.bmm(pooled_video, pooled_text.transpose(2, 1))
scores = scores.argmax(-1)
self._append_scores(scores[:, 0], sample["answers"], self.full_scores)
def finalize(self, output_file=None):
outputs, targets = self._aggregate_scores(self.full_scores)
if output_file is not None:
np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs)
return {"outputs": outputs, "targets": targets}
def _append_scores(self, scores, answers, full_scores):
if len(full_scores) == 0:
full_scores.append([])
full_scores.append([])
full_scores[0].append(scores.cpu().detach().numpy())
full_scores[1].append(answers.cpu().detach().numpy())
def _aggregate_scores(self, scores):
assert len(scores) == 2
outputs = np.concatenate(scores[0], axis=0)
targets = np.concatenate(scores[1], axis=0)
# clear up.
self.full_scores = []
return outputs, targets
class CrossTaskPredictor(Predictor):
"""
CrossTaskPredictor needs to compute the average of logits
for overlapped sliding-window.
"""
def __init__(self, config):
super().__init__(config)
self.lsm = torch.nn.LogSoftmax(dim=1)
self.max_video_len = config.dataset.max_video_len
self.sliding_window = config.dataset.sliding_window
self.sliding_window_size = config.dataset.sliding_window_size
self.annotation_path = config.dataset.annotation_path
def predict_loop(self, model, eval_dataloader, output_file="result.pkl"):
"""refactored from line 144:
https://github.com/DmZhukov/CrossTask/blob/master/train.py
"""
ctx = 0
model.eval()
model = model.to(ctx)
# this is not a loss but just compute neg_log_prob.
Y_pred = {}
Y_true = {}
with torch.no_grad():
for batch in eval_dataloader:
self(batch, model, Y_pred, Y_true)
return self.finalize(Y_pred, Y_true, output_file)
def __call__(self, sample, model, Y_pred, Y_true):
# please install dp from `https://github.com/DmZhukov/CrossTask`
from dp import dp
vid, task = sample['video_id'][0], sample['task'][0]
sample = self.to_ctx(sample)
# compute the average logits over sliding windows.
output = model(**sample)
batch_logits = output["logits"].cpu()
video_len = sample["video_len"][0]
# the following version is slow.
logits = torch.zeros((video_len, batch_logits.size(1)))
logits_counts = torch.zeros((video_len, 1), dtype=torch.long)
# use the same loop as aligner to recover.
batch_logit_idx = 0
for window_start in range(0, video_len, self.sliding_window):
video_end = min(video_len - window_start, self.sliding_window_size)
logits[window_start: window_start + video_end] += batch_logits[
batch_logit_idx: batch_logit_idx + video_end]
batch_logit_idx += video_end
logits_counts[window_start: window_start + video_end] += torch.ones((video_end, 1), dtype=torch.long)
if (video_len - window_start) <= self.sliding_window_size:
break
logits /= logits_counts
assert logits.size() == (video_len, batch_logits.size(1)), "{}, {}".format(logits.size(), video_len)
O = self.lsm(logits)
y = np.zeros(O.size(), dtype=np.float32)
dp(y, -O.detach().cpu().numpy())
if task not in Y_pred:
Y_pred[task] = {}
Y_pred[task][vid] = y
annot_path = os.path.join(
self.annotation_path, task+'_'+vid+'.csv')
if os.path.exists(annot_path):
if task not in Y_true:
Y_true[task] = {}
Y_true[task][vid] = self._read_assignment(
*y.shape, annot_path)
def finalize(self, Y_pred, Y_true, output_file=None):
if output_file is not None:
with open(
os.path.join(self.pred_dir, output_file + ".pkl"),
"wb") as fw:
pickle.dump(
{"Y_pred": Y_pred, "Y_true": Y_true}, fw,
protocol=pickle.HIGHEST_PROTOCOL)
return {"outputs": Y_pred, "targets": Y_true}
def _read_assignment(self, T, K, path):
"""
refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py
Howto interpret contraints on loss that is going to be minimized:
lambd is a big number;
self.lambd * C is a big number for all valid position (csv stores invalids)
def forward(self, O, Y, C):
return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum()
This will load the csv file and fill-in the step col from start to end rows.
"""
Y = np.zeros([T, K], dtype=np.uint8)
with open(path, 'r') as f:
for line in f:
step, start, end = line.strip().split(',')
start = int(math.floor(float(start)))
end = int(math.ceil(float(end)))
step = int(step) - 1
Y[start:end, step] = 1
return Y
class COINPredictor(Predictor):
"""
COINPredictor is similar to CrossTask on sliding windows.
"""
def __init__(self, config):
super().__init__(config)
self.max_video_len = config.dataset.max_video_len
self.sliding_window = config.dataset.sliding_window
self.sliding_window_size = config.dataset.sliding_window_size
def predict_loop(self, model, eval_dataloader, output_file="result.pkl"):
"""refactored from line 144:
https://github.com/DmZhukov/CrossTask/blob/master/train.py
"""
ctx = 0
model.eval()
model = model.to(ctx)
# this is not a loss but just compute neg_log_prob.
Y_pred = []
Y_true = []
with torch.no_grad():
for batch in eval_dataloader:
self(batch, model, Y_pred, Y_true)
return self.finalize(Y_pred, Y_true, output_file)
def __call__(self, sample, model, Y_pred, Y_true):
sample = self.to_ctx(sample)
# compute the average logits over sliding windows.
output = model(**sample)
logits = self._merge_windows(sample, output)
Y_pred.append(logits.argmax(dim=1))
Y_true.append(sample["video_targets"].squeeze(0).cpu())
def _merge_windows(self, sample, output):
targets = sample["targets"].reshape(-1).cpu()
valid_mask = targets != -100
targets = targets[valid_mask]
batch_logits = output["logits"].cpu()
batch_logits = batch_logits.reshape(-1, batch_logits.size(-1))
batch_logits = batch_logits[valid_mask]
video_len = sample["video_len"][0]
# the following version is slow.
logits = torch.zeros((video_len, batch_logits.size(1)))
logits_counts = torch.zeros((video_len, 1), dtype=torch.long)
# use the same loop as aligner to recover.
batch_logit_idx = 0
for window_start in range(0, video_len, self.sliding_window):
video_end = min(video_len - window_start, self.sliding_window_size)
logits[window_start: window_start + video_end] += batch_logits[
batch_logit_idx: batch_logit_idx + video_end]
batch_logit_idx += video_end
logits_counts[window_start: window_start + video_end] += torch.ones((video_end, 1), dtype=torch.long)
if (video_len - window_start) <= self.sliding_window_size:
break
logits /= logits_counts
assert logits.size() == (video_len, batch_logits.size(1)), "{}, {}".format(logits.size(), video_len)
return logits
def finalize(self, Y_pred, Y_true, output_file=None):
Y_pred = torch.cat(Y_pred, dim=0).numpy()
Y_true = torch.cat(Y_true, dim=0).numpy()
assert len(Y_pred) == len(Y_true)
error_mask = Y_pred != Y_true
print("sample error", Y_pred[error_mask][:10], Y_true[error_mask][:10])
print("sample error", Y_pred[error_mask][10:20], Y_true[error_mask][10:20])
if output_file is not None:
with open(
os.path.join(self.pred_dir, output_file + ".pkl"),
"wb") as fw:
pickle.dump(
{"Y_pred": Y_pred, "Y_true": Y_true}, fw,
protocol=pickle.HIGHEST_PROTOCOL)
return {"outputs": Y_pred, "targets": Y_true}
class COINZSPredictor(COINPredictor):
"""
COINZSPredictor for COIN zero-shot prediction.
"""
def __init__(self, config):
super().__init__(config)
self.dataset_config = config.dataset
def predict_loop(self, model, eval_dataloader, output_file="result.pkl"):
"""refactored from line 144:
https://github.com/DmZhukov/CrossTask/blob/master/train.py
"""
ctx = 0
model.eval()
model = model.to(ctx)
with torch.no_grad():
outputs = eval_dataloader.dataset.meta_processor.meta_text_labels(
self.dataset_config)
outputs = self.to_ctx(outputs, ctx)
label_hidden_states = model.forward_text(**outputs).cpu()
label_sim = label_hidden_states @ label_hidden_states.t()
num_labels = label_sim.size(0)
eye_mask = ~torch.eye(num_labels, dtype=torch.bool)
label_sim = label_sim.masked_select(eye_mask).view(num_labels, num_labels - 1)
lbd = label_sim.max()
# this is not a loss but just compute neg_log_prob.
Y_pred = []
Y_true = []
with torch.no_grad():
for batch in eval_dataloader:
self(batch, label_hidden_states, model, lbd, Y_pred, Y_true)
return self.finalize(Y_pred, Y_true, output_file)
def reshape_subsample(self, sample):
for key in sample:
if torch.is_tensor(sample[key]):
sample[key] = self.flat_subsample(sample[key])
return sample
def flat_subsample(self, tensor):
if len(tensor.size()) > 1 and tensor.size(0) == 1:
tensor = tensor.squeeze(0)
return tensor
def __call__(self, sample, label_hidden_states, model, lbd, Y_pred, Y_true):
sample = self.reshape_subsample(sample)
sample = self.to_ctx(sample)
# compute the average logits over sliding windows.
sample["output_hidden_states"] = True
video_outputs = model.forward_video(**sample).cpu()
output = {"logits": video_outputs[:, 1:sample["vmasks"].size(1)+1] @ label_hidden_states.t()}
logits = self._merge_windows(sample, output)
# logic of zero-shot for sequence labeling.
logits_argmax = logits.argmax(dim=1) + 1 # 0 is "O" label.
logits_max = logits.max(dim=1)[0]
pred = torch.zeros_like(logits_argmax)
label_select = logits_max > lbd # 73 or 74
pred[label_select] = logits_argmax[label_select]
Y_pred.append(pred)
Y_true.append(sample["video_targets"].squeeze(0).cpu())
def finalize(self, Y_pred, Y_true, output_file=None):
Y_pred = torch.cat(Y_pred, dim=0).numpy()
Y_true = torch.cat(Y_true, dim=0).numpy()
assert len(Y_pred) == len(Y_true)
error_mask = Y_pred != Y_true
print("sample error", Y_pred[error_mask][:10], Y_true[error_mask][:10])
print("sample error", Y_pred[error_mask][10:20], Y_true[error_mask][10:20])
if output_file is not None:
with open(
os.path.join(self.pred_dir, output_file + ".pkl"),
"wb") as fw:
pickle.dump(
{"Y_pred": Y_pred, "Y_true": Y_true}, fw,
protocol=pickle.HIGHEST_PROTOCOL)
return {"outputs": Y_pred, "targets": Y_true}
class DiDeMoPredictor(Predictor):
"""reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py
"""
def __init__(self, config):
super().__init__(config)
# load targets.
with open(config.dataset.test_path) as data_file:
self.test_data = json.load(data_file)
def predict_loop(self, model, eval_dataloader, output_file="didemo.npy"):
"""
TODO: two solutions here.
"""
import itertools
# 21 chunks.
self.possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)]
for i in itertools.combinations(range(6), 2):
self.possible_segments.append(i)
# pick segments from a video.
"""on-the-fly prediction on a single gpu."""
self.full_scores = []
model.eval()
model = model.cuda()
with torch.no_grad():
for data in eval_dataloader:
# TODO special forwarding logic here.
data = self.to_ctx(data)
data["output_hidden_states"] = True
hidden_video = model.forward_video(**data)
data["output_hidden_states"] = False
pooled_text = model.forward_text(**data)
outputs = {
"hidden_video": hidden_video,
"pooled_text": pooled_text
}
outputs.update(data)
self(outputs)
return self.finalize(output_file)
def __call__(self, sample):
# TODO: make an index select from self.possible_segments.
hidden_video = sample["hidden_video"]
pooled_text = sample["pooled_text"]
vmasks = sample["vmasks"]
# probably maintain valid results here.
hidden_video = hidden_video[:, 1:-1, :]
# probably maintain valid results here.
pooled_video = []
for s, e in self.possible_segments:
pooled_video.append(
torch.mean(
hidden_video[:, int(s*5):int((e+1)*5), :],
dim=1, keepdim=True)
)
pooled_video = torch.cat(pooled_video, dim=1)
scores = torch.bmm(
pooled_video, pooled_text.unsqueeze(-1)).squeeze(-1).cpu()
ranks = scores.argsort(dim=-1, descending=True)
for batch_idx, rank in enumerate(ranks):
rank_of_moment = []
for m_idx, moment in enumerate(rank):
s, e = self.possible_segments[moment.item()]
if torch.any(
vmasks[batch_idx, int(s*5):int((e+1)*5)]
):
rank_of_moment.append((s, e))
self.full_scores.append(rank_of_moment)
def finalize(self, output_file=None):
outputs = self._aggregate_scores(self.full_scores)
if output_file is not None:
np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs)
return {"outputs": outputs, "targets": self.test_data}
def _aggregate_scores(self, scores):
self.full_scores = []
return scores
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/evaluators/predictor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .metric import *
from .evaluator import *
# experimental.
try:
from .expmetric import *
except ImportError:
pass
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/evaluators/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import json
class Metric(object):
def __init__(self, config, metric_names):
self.metric_names = metric_names
def best_metric(self, metric):
return metric[self.metric_names[0]]
def save_metrics(self, fn, metrics):
with open(fn, "w") as fw:
json.dump(fw, metrics)
def print_computed_metrics(self, metrics):
raise NotImplementedError
class RetrievalMetric(Metric):
"""
this is modified from `howto100m/metrics.py`.
History of changes:
refactor as a class.
add metric_key in __init__
"""
def __init__(self, config, metric_names=["R1", "R5", "R10", "MR"]):
super().__init__(config, metric_names)
self.error = False # TODO(huxu): add to config to print error.
def compute_metrics(self, outputs, texts, **kwargs):
x = outputs
sx = np.sort(-x, axis=1)
d = np.diag(-x)
d = d[:, np.newaxis]
ind = sx - d
ind = np.where(ind == 0)
ind = ind[1]
metrics = {}
metrics["R1"] = float(np.sum(ind == 0)) / len(ind)
metrics["R5"] = float(np.sum(ind < 5)) / len(ind)
metrics["R10"] = float(np.sum(ind < 10)) / len(ind)
metrics["MR"] = np.median(ind) + 1
max_idx = np.argmax(outputs, axis=1)
if self.error:
# print top-20 errors.
error = []
for ex_idx in range(20):
error.append((texts[ex_idx], texts[max_idx[ex_idx]]))
metrics["error"] = error
return metrics
def print_computed_metrics(self, metrics):
r1 = metrics["R1"]
r5 = metrics["R5"]
r10 = metrics["R10"]
mr = metrics["MR"]
print(
"R@1: {:.4f} - R@5: {:.4f} - R@10: {:.4f} - Median R: {}".format(
r1, r5, r10, mr
)
)
if "error" in metrics:
print(metrics["error"])
class DiDeMoMetric(Metric):
"""
History of changes:
python 2.x to python 3.x.
merge utils.py into eval to save one file.
reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
Code to evaluate your results on the DiDeMo dataset.
"""
def __init__(self, config, metric_names=["rank1", "rank5", "miou"]):
super().__init__(config, metric_names)
def compute_metrics(self, outputs, targets, **kwargs):
assert len(outputs) == len(targets)
rank1, rank5, miou = self._eval_predictions(outputs, targets)
metrics = {
"rank1": rank1,
"rank5": rank5,
"miou": miou
}
return metrics
def print_computed_metrics(self, metrics):
rank1 = metrics["rank1"]
rank5 = metrics["rank5"]
miou = metrics["miou"]
# print("Average rank@1: %f" % rank1)
# print("Average rank@5: %f" % rank5)
# print("Average iou: %f" % miou)
print(
"Average rank@1: {:.4f} Average rank@5: {:.4f} Average iou: {:.4f}".format(
rank1, rank5, miou
)
)
def _iou(self, pred, gt):
intersection = max(0, min(pred[1], gt[1]) + 1 - max(pred[0], gt[0]))
union = max(pred[1], gt[1]) + 1 - min(pred[0], gt[0])
return float(intersection)/union
def _rank(self, pred, gt):
return pred.index(tuple(gt)) + 1
def _eval_predictions(self, segments, data):
'''
Inputs:
segments: For each item in the ground truth data, rank possible video segments given the description and video.
In DiDeMo, there are 21 posible moments extracted for each video so the list of video segments will be of length 21.
The first video segment should be the video segment that best corresponds to the text query.
There are 4180 sentence in the validation data, so when evaluating a model on the val dataset,
segments should be a list of lenght 4180, and each item in segments should be a list of length 21.
data: ground truth data
'''
average_ranks = []
average_iou = []
for s, d in zip(segments, data):
pred = s[0]
ious = [self._iou(pred, t) for t in d['times']]
average_iou.append(np.mean(np.sort(ious)[-3:]))
ranks = [self._rank(s, t) for t in d['times'] if tuple(t) in s] # if t in s] is added for s, e not in prediction.
average_ranks.append(np.mean(np.sort(ranks)[:3]))
rank1 = np.sum(np.array(average_ranks) <= 1)/float(len(average_ranks))
rank5 = np.sum(np.array(average_ranks) <= 5)/float(len(average_ranks))
miou = np.mean(average_iou)
# print("Average rank@1: %f" % rank1)
# print("Average rank@5: %f" % rank5)
# print("Average iou: %f" % miou)
return rank1, rank5, miou
class NLGMetric(Metric):
def __init__(
self,
config,
metric_names=[
"Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4",
"METEOR", "ROUGE_L", "CIDEr"
]
):
super().__init__(config, metric_names)
# please install NLGEval from `https://github.com/Maluuba/nlg-eval`
from nlgeval import NLGEval
self.nlg = NLGEval()
def compute_metrics(self, outputs, targets, **kwargs):
return self.nlg.compute_metrics(
hyp_list=outputs, ref_list=targets)
def print_computed_metrics(self, metrics):
Bleu_1 = metrics["Bleu_1"]
Bleu_2 = metrics["Bleu_2"]
Bleu_3 = metrics["Bleu_3"]
Bleu_4 = metrics["Bleu_4"]
METEOR = metrics["METEOR"]
ROUGE_L = metrics["ROUGE_L"]
CIDEr = metrics["CIDEr"]
print(
"Bleu_1: {:.4f} - Bleu_2: {:.4f} - Bleu_3: {:.4f} - Bleu_4: {:.4f} - METEOR: {:.4f} - ROUGE_L: {:.4f} - CIDEr: {:.4f}".format(
Bleu_1, Bleu_2, Bleu_3, Bleu_4, METEOR, ROUGE_L, CIDEr
)
)
class QAMetric(Metric):
def __init__(
self,
config,
metric_names=["acc"]
):
super().__init__(config, metric_names)
def compute_metrics(self, outputs, targets, **kwargs):
from sklearn.metrics import accuracy_score
return {"acc": accuracy_score(targets, outputs)}
def print_computed_metrics(self, metrics):
print("acc: {:.4f}".format(metrics["acc"]))
class COINActionSegmentationMetric(Metric):
"""
COIN dataset listed 3 repos for Action Segmentation.
Action Sets, NeuralNetwork-Viterbi, TCFPN-ISBA.
The first and second are the same.
https://github.com/alexanderrichard/action-sets/blob/master/eval.py
Future reference for the third:
`https://github.com/Zephyr-D/TCFPN-ISBA/blob/master/utils/metrics.py`
"""
def __init__(self, config, metric_name=["frame_acc"]):
super().__init__(config, metric_name)
def compute_metrics(self, outputs, targets):
n_frames = 0
n_errors = 0
n_errors = sum(outputs != targets)
n_frames = len(targets)
return {"frame_acc": 1.0 - float(n_errors) / n_frames}
def print_computed_metrics(self, metrics):
fa = metrics["frame_acc"]
print("frame accuracy:", fa)
class CrossTaskMetric(Metric):
def __init__(self, config, metric_names=["recall"]):
super().__init__(config, metric_names)
def compute_metrics(self, outputs, targets, **kwargs):
"""refactored from line 166:
https://github.com/DmZhukov/CrossTask/blob/master/train.py"""
recalls = self._get_recalls(Y_true=targets, Y_pred=outputs)
results = {}
for task, rec in recalls.items():
results[str(task)] = rec
avg_recall = np.mean(list(recalls.values()))
results["recall"] = avg_recall
return results
def print_computed_metrics(self, metrics):
print('Recall: {0:0.3f}'.format(metrics["recall"]))
for task in metrics:
if task != "recall":
print('Task {0}. Recall = {1:0.3f}'.format(
task, metrics[task]))
def _get_recalls(self, Y_true, Y_pred):
"""refactored from
https://github.com/DmZhukov/CrossTask/blob/master/train.py"""
step_match = {task: 0 for task in Y_true.keys()}
step_total = {task: 0 for task in Y_true.keys()}
for task, ys_true in Y_true.items():
ys_pred = Y_pred[task]
for vid in set(ys_pred.keys()).intersection(set(ys_true.keys())):
y_true = ys_true[vid]
y_pred = ys_pred[vid]
step_total[task] += (y_true.sum(axis=0) > 0).sum()
step_match[task] += (y_true*y_pred).sum()
recalls = {
task: step_match[task] / n for task, n in step_total.items()}
return recalls
class ActionRecognitionMetric(Metric):
def __init__(
self,
config,
metric_names=["acc", "acc_splits", "r1_splits", "r5_splits", "r10_splits"]
):
super().__init__(config, metric_names)
def compute_metrics(self, outputs, targets, splits, **kwargs):
all_video_embd = outputs
labels = targets
split1, split2, split3 = splits
accs = []
r1s = []
r5s = []
r10s = []
for split in range(3):
if split == 0:
s = split1
elif split == 1:
s = split2
else:
s = split3
X_pred = all_video_embd[np.where(s == 2)[0]]
label_test = labels[np.where(s == 2)[0]]
logits = X_pred
X_pred = np.argmax(X_pred, axis=1)
acc = np.sum(X_pred == label_test) / float(len(X_pred))
accs.append(acc)
# compute recall.
sorted_pred = (-logits).argsort(axis=-1)
label_test_sp = label_test.reshape(-1, 1)
r1 = np.mean((sorted_pred[:, :1] == label_test_sp).sum(axis=1), axis=0)
r5 = np.mean((sorted_pred[:, :5] == label_test_sp).sum(axis=1), axis=0)
r10 = np.mean((sorted_pred[:, :10] == label_test_sp).sum(axis=1), axis=0)
r1s.append(r1)
r5s.append(r5)
r10s.append(r10)
return {"acc": accs[0], "acc_splits": accs, "r1_splits": r1s, "r5_splits": r5s, "r10_splits": r10s}
def print_computed_metrics(self, metrics):
for split, acc in enumerate(metrics["acc_splits"]):
print("Top 1 accuracy on split {}: {}; r1 {}; r5 {}; r10 {}".format(
split + 1, acc,
metrics["r1_splits"][split],
metrics["r5_splits"][split],
metrics["r10_splits"][split],
)
)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/evaluators/metric.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import numpy as np
from . import metric as metric_path
from . import predictor as predictor_path
class Evaluator(object):
"""
perform evaluation on a single (downstream) task.
make this both offline and online.
TODO(huxu) saving evaluation results.
"""
def __init__(self, config, eval_dataloader=None):
if config.metric is None:
raise ValueError("config.metric is", config.metric)
metric_cls = getattr(metric_path, config.metric)
self.metric = metric_cls(config)
if config.predictor is None:
raise ValueError("config.predictor is", config.predictor)
predictor_cls = getattr(predictor_path, config.predictor)
self.predictor = predictor_cls(config)
self.eval_dataloader = eval_dataloader
def __call__(self):
try:
print(self.predictor.pred_dir)
for pred_file in glob.glob(
self.predictor.pred_dir + "/*_merged.npy"):
outputs = np.load(pred_file)
results = self.metric.compute_metrics(outputs)
self.metric.print_computed_metrics(results)
outputs = np.load(os.path.join(
self.predictor.pred_dir, "merged.npy"))
results = self.metric.compute_metrics(outputs)
return {"results": results, "metric": self.metric}
except FileNotFoundError:
print("\n[missing]", self.predictor.pred_dir)
return {}
def evaluate(self, model, eval_dataloader=None, output_file="merged"):
if eval_dataloader is None:
eval_dataloader = self.eval_dataloader
outputs = self.predictor.predict_loop(
model, eval_dataloader, output_file)
results = self.metric.compute_metrics(**outputs)
return results
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/evaluators/evaluator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import json
import pickle
from tqdm import tqdm
import os
import numpy as np
class CaptionDedupProcessor(object):
"""remove overlapping of caption sentences(clip).
Some statistics:
caption:
{'t_clip_len': 246.6448431320854,
'video_len': 281.09174795676245,
'clip_tps': 0.8841283727427481,
'video_tps': 0.7821156477732097,
'min_clip_len': 0.0,
'max_clip_len': 398.3,
'mean_clip_len': 3.196580003006861,
'num_clip': 77.15897706301081}
raw_caption:
{'t_clip_len': 238.95908778424115,
'video_len': 267.5914859862507,
'clip_tps': 2.4941363624267963,
'video_tps': 2.258989769647173,
'min_clip_len': 0.0,
'max_clip_len': 398.3,
'mean_clip_len': 3.0537954186814265,
'num_clip': 78.24986779481756}
"""
def __init__(self, pkl_file):
with open(pkl_file, "rb") as fd:
self.data = pickle.load(fd)
self.stat = {
"t_clip_len": [],
"video_len": [],
"clip_tps": [],
"video_tps": [],
"clip_len": [],
}
def __call__(self):
for idx, video_id in enumerate(tqdm(self.data)):
caption = json.loads(self.data[video_id])
caption = self._dedup(caption)
if idx < 4096: # for the first 4096 examples, compute the statistics.
self.save_stat(video_id, caption)
self.data[video_id] = json.dumps(caption)
self.print_stat()
def single(self, video_id):
caption = json.loads(self.data[video_id])
for clip_idx, (start, end, text) in enumerate(
zip(caption["start"], caption["end"], caption["text"])
):
print(start, end, text)
print("@" * 100)
caption = self._dedup(caption)
for clip_idx, (start, end, text) in enumerate(
zip(caption["start"], caption["end"], caption["text"])
):
print(start, end, text)
print("#" * 100)
self.save_stat(video_id, caption)
self.print_stat()
def finalize(self, tgt_fn):
with open(tgt_fn, "wb") as fw:
pickle.dump(self.data, fw, pickle.HIGHEST_PROTOCOL)
def save_stat(self, video_id, caption):
video_fn = os.path.join(
"data/feat/feat_how2_s3d", video_id + ".npy"
)
if os.path.isfile(video_fn):
with open(video_fn, "rb", 1) as fr: # 24 is the buffer size. buffered
version = np.lib.format.read_magic(fr)
shape, fortran, dtype = np.lib.format._read_array_header(fr, version)
video_len = shape[0]
t_clip_len = 0.0
t_tokens = 0
for idx, (start, end, text) in enumerate(
zip(caption["start"], caption["end"], caption["text"])
):
clip_len = (
(end - max(caption["end"][idx - 1], start))
if idx > 0
else end - start
)
t_clip_len += clip_len
t_tokens += len(text.split(" "))
self.stat["clip_len"].append(clip_len)
self.stat["t_clip_len"].append(t_clip_len)
self.stat["video_len"].append(video_len)
self.stat["clip_tps"].append(t_tokens / t_clip_len)
self.stat["video_tps"].append(t_tokens / video_len)
def print_stat(self):
result = {
"t_clip_len": np.mean(self.stat["t_clip_len"]),
"video_len": np.mean(self.stat["video_len"]),
"clip_tps": np.mean(self.stat["clip_tps"]),
"video_tps": np.mean(self.stat["video_tps"]),
"min_clip_len": min(self.stat["clip_len"]),
"max_clip_len": max(self.stat["clip_len"]),
"mean_clip_len": np.mean(self.stat["clip_len"]),
"num_clip": len(self.stat["clip_len"]) / len(self.stat["video_tps"]),
}
print(result)
def _dedup(self, caption):
def random_merge(end_idx, start, end, text, starts, ends, texts):
if random.random() > 0.5:
# print(clip_idx, "[PARTIAL INTO PREV]", end_idx)
# overlapped part goes to the end of previous.
ends[-1] = max(ends[-1], start) # ?
rest_text = text[end_idx:].strip()
if rest_text:
starts.append(max(ends[-1], start))
ends.append(max(end, starts[-1]))
texts.append(rest_text)
else: # goes to the beginning of the current.
# strip the previous.
left_text = texts[-1][:-end_idx].strip()
if left_text:
# print(clip_idx, "[PREV PARTIAL INTO CUR]", end_idx)
ends[-1] = min(ends[-1], start)
texts[-1] = left_text
else:
# print(clip_idx, "[PREV LEFT NOTHING ALL INTO CUR]", end_idx)
starts.pop(-1)
ends.pop(-1)
texts.pop(-1)
starts.append(start)
ends.append(end)
texts.append(text)
starts, ends, texts = [], [], []
for clip_idx, (start, end, text) in enumerate(
zip(caption["start"], caption["end"], caption["text"])
):
if not isinstance(text, str):
continue
text = text.replace("\n", " ").strip()
if len(text) == 0:
continue
starts.append(start)
ends.append(end)
texts.append(text)
break
for clip_idx, (start, end, text) in enumerate(
zip(
caption["start"][clip_idx + 1:],
caption["end"][clip_idx + 1:],
caption["text"][clip_idx + 1:],
)
):
if not isinstance(text, str):
continue
text = text.replace("\n", " ").strip()
if len(text) == 0:
continue
# print(clip_idx, texts[-5:])
# print(clip_idx, start, end, text)
if texts[-1].endswith(text): # subset of prev caption -> merge
# print(clip_idx, "[MERGE INTO PREV]")
ends[-1] = max(ends[-1], end)
elif text.startswith(texts[-1]): # superset of prev caption -> merge
# print(clip_idx, "[PREV MERGE INTO CUR]")
texts[-1] = text
starts[-1] = min(starts[-1], start)
ends[-1] = max(ends[-1], end)
else: # overlapping or non-overlapping.
for end_idx in range(1, len(text) + 1):
if texts[-1].endswith(text[:end_idx]):
random_merge(end_idx, start, end, text, starts, ends, texts)
break
else:
starts.append(start)
ends.append(end)
texts.append(text)
assert (ends[-1] + 0.001) >= starts[-1] and len(
texts[-1]
) > 0, "{} {} {} <- {} {} {}, {} {} {}".format(
str(starts[-1]),
str(ends[-1]),
texts[-1],
caption["start"][clip_idx - 1],
caption["end"][clip_idx - 1],
caption["text"][clip_idx - 1],
str(start),
str(end),
text,
)
return {"start": starts, "end": ends, "text": texts}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="dedup how2 caption")
parser.add_argument('--how2dir', default="data/how2")
args = parser.parse_args()
raw_caption_json = os.path.join(args.how2dir, "raw_caption.json")
raw_caption_pickle = os.path.join(args.how2dir, "raw_caption.pkl")
raw_caption_dedup_pickle = os.path.join(args.how2dir, "raw_caption_dedup.pkl")
def convert_to_pickle(src_fn, tgt_fn):
with open(src_fn) as fd:
captions = json.load(fd)
for video_id in captions:
captions[video_id] = json.dumps(captions[video_id])
with open(tgt_fn, "wb") as fw:
pickle.dump(captions, fw, pickle.HIGHEST_PROTOCOL)
if not os.path.isfile(raw_caption_pickle):
convert_to_pickle(raw_caption_json, raw_caption_pickle)
deduper = CaptionDedupProcessor(raw_caption_pickle)
deduper()
deduper.finalize(raw_caption_dedup_pickle)
"""
# demo
deduper = CaptionDedupProcessor("data/how2/raw_caption.pkl")
deduper.single("HfIeQ9pzL5U")
"""
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/processors/dedupprocessor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .how2processor import (
ShardedHow2MetaProcessor,
ShardedVideoProcessor,
ShardedTextProcessor,
VariedLenAligner,
OverlappedAligner
)
class ShardedHow2VideoRetriMetaProcessor(ShardedHow2MetaProcessor):
def __init__(self, config):
super().__init__(config)
self.num_video_per_batch = config.num_video_per_batch
self.cands = [
self.data[batch_offset:batch_offset + self.num_video_per_batch]
for batch_offset in
range(0, (len(self.data) // (8 * self.num_video_per_batch)) * 8 * self.num_video_per_batch, self.num_video_per_batch)]
def __len__(self):
return len(self.cands)
def set_candidates(self, cands):
# no changes on num of batches.
print(len(self.cands), "->", len(cands))
# assert len(self.cands) == len(cands)
self.cands = cands
def __getitem__(self, idx):
video_ids = self.cands[idx]
assert isinstance(video_ids, list)
sharded_video_idxs = []
for video_id in video_ids:
shard_id, video_idx = self.video_id_to_shard[video_id]
sharded_video_idxs.append((video_id, -1, shard_id, video_idx))
return sharded_video_idxs, sharded_video_idxs
class ShardedVideoRetriVideoProcessor(ShardedVideoProcessor):
"""In retrival case the video_id
is a list of tuples: `(shard_id, video_idx)` ."""
def __call__(self, sharded_video_idxs):
assert isinstance(sharded_video_idxs, list)
cand_feats = []
for shared_video_idx in sharded_video_idxs:
feat = super().__call__(shared_video_idx)
cand_feats.append(feat)
return cand_feats
class ShardedVideoRetriTextProcessor(ShardedTextProcessor):
"""In retrival case the video_id
is a list of tuples: `(shard_id, video_idx)` ."""
def __call__(self, sharded_video_idxs):
assert isinstance(sharded_video_idxs, list)
cand_caps = []
for shared_video_idx in sharded_video_idxs:
caps = super().__call__(shared_video_idx)
cand_caps.append(caps)
return cand_caps
class VideoRetriAligner(VariedLenAligner):
# Retritask will trim dim-0.
def __call__(self, sharded_video_idxs, video_features, text_features):
from transformers import default_data_collator
batch, video_ids = [], []
for video_id, video_feature, text_feature in \
zip(sharded_video_idxs, video_features, text_features):
sub_batch = super().__call__(video_id, video_feature, text_feature)
batch.append(sub_batch)
if isinstance(video_id, tuple):
video_id = video_id[0]
video_ids.append(video_id)
batch = default_data_collator(batch)
batch["video_id"] = video_ids
return batch
class VideoRetriOverlappedAligner(OverlappedAligner):
# Retritask will trim dim-0.
def __call__(self, sharded_video_idxs, video_features, text_features):
from transformers import default_data_collator
batch, video_ids = [], []
for video_id, video_feature, text_feature in \
zip(sharded_video_idxs, video_features, text_features):
sub_batch = super().__call__(video_id, video_feature, text_feature)
batch.append(sub_batch)
if isinstance(video_id, tuple):
video_id = video_id[0]
video_ids.append(video_id)
batch = default_data_collator(batch)
batch["video_id"] = video_ids
return batch
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/processors/how2retriprocessor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .processor import *
from .how2processor import *
from .how2retriprocessor import *
from .dsprocessor import *
try:
from .rawvideoprocessor import *
from .codecprocessor import *
from .webvidprocessor import *
from .expprocessor import *
from .exphow2processor import *
from .exphow2retriprocessor import *
from .expcodecprocessor import *
from .expfeatureencoder import *
from .expdsprocessor import *
except ImportError:
pass
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/processors/__init__.py |
# Copyright (c) Facebook, Inc. All Rights Reserved
import numpy as np
import os
import torch
class Processor(object):
"""
A generic processor for video (codec, feature etc.) and text.
"""
def __call__(self, **kwargs):
raise NotImplementedError
class MetaProcessor(Processor):
"""
A meta processor is expected to load the metadata of a dataset:
(e.g., video_ids, or captions).
You must implement the `__getitem__` (meta datasets are rather diverse.).
"""
def __init__(self, config):
self.split = config.split
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
raise NotImplementedError
def _get_split_path(self, config):
splits = {
"train": config.train_path,
"valid": config.val_path,
"test": config.test_path,
}
if config.split is not None:
return splits[config.split]
return config.train_path
class TextProcessor(Processor):
"""
A generic Text processor: rename this as `withTokenizer`.
tokenize a string of text on-the-fly.
Warning: mostly used for end tasks.
(on-the-fly tokenization is slow for how2.)
TODO(huxu): move this class as a subclass.
"""
def __init__(self, config):
self.bert_name = str(config.bert_name)
self.use_fast = config.use_fast
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.bert_name, use_fast=self.use_fast
)
def __call__(self, text_id):
caption = self.tokenizer(text_id, add_special_tokens=False)
return caption["input_ids"]
class VideoProcessor(Processor):
"""
A generic video processor: load a numpy video tokens by default.
"""
def __init__(self, config):
self.vfeat_dir = config.vfeat_dir
def __call__(self, video_fn):
if isinstance(video_fn, tuple):
video_fn = video_fn[0]
assert isinstance(video_fn, str)
video_fn = os.path.join(self.vfeat_dir, video_fn + ".npy")
feat = np.load(video_fn)
return feat
class Aligner(object):
"""
An alignprocessor align video and text and output a dict of tensors (for a model).
"""
def __init__(self, config):
"""__init__ needs to be light weight for more workers/threads."""
self.split = config.split
self.max_video_len = config.max_video_len
self.max_len = config.max_len
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
str(config.bert_name), use_fast=config.use_fast
)
self.cls_token_id = tokenizer.cls_token_id
self.sep_token_id = tokenizer.sep_token_id
self.pad_token_id = tokenizer.pad_token_id
self.mask_token_id = tokenizer.mask_token_id
def __call__(self, video_id, video_feature, text_feature):
raise NotImplementedError
def _build_video_seq(self, video_feature, video_clips=None):
"""
`video_feature`: available video tokens.
`video_clips`: video clip sequence to build.
"""
if not isinstance(video_feature, np.ndarray):
raise ValueError(
"unsupported type of video_feature", type(video_feature)
)
if video_clips is None:
# this is borrowed from DSAligner
video_start = 0
video_end = min(len(video_feature), self.max_video_len)
# the whole sequence is a single clip.
video_clips = {"start": [video_start], "end": [video_end]}
vfeats = np.zeros(
(self.max_video_len, video_feature.shape[1]), dtype=np.float32
)
vmasks = torch.zeros((self.max_video_len,), dtype=torch.bool)
video_len = 0
for start, end in zip(video_clips["start"], video_clips["end"]):
clip_len = min(self.max_video_len - video_len, (end - start))
if clip_len > 0:
vfeats[video_len: video_len + clip_len] = video_feature[
start: start + clip_len
]
vmasks[video_len: video_len + clip_len] = 1
video_len += clip_len
vfeats = torch.from_numpy(vfeats)
return vfeats, vmasks
def _build_text_seq(self, text_feature, text_clip_indexs=None):
"""
`text_feature`: all available clips.
`text_clip_indexes`: clip sequence to build.
"""
if text_clip_indexs is None:
text_clip_indexs = [0]
full_caps = []
if isinstance(text_feature, dict):
for clip_idx in text_clip_indexs:
full_caps.extend(text_feature["cap"][clip_idx])
else:
full_caps = text_feature
max_text_len = self.max_len - self.max_video_len - 3
full_caps = full_caps[:max_text_len]
full_caps = (
[self.cls_token_id, self.sep_token_id] + full_caps + [self.sep_token_id]
)
text_pad_len = self.max_len - len(full_caps) - self.max_video_len
padded_full_caps = full_caps + [self.pad_token_id] * text_pad_len
caps = torch.LongTensor(padded_full_caps)
cmasks = torch.zeros((len(padded_full_caps),), dtype=torch.bool)
cmasks[: len(full_caps)] = 1
return caps, cmasks
def batch_post_processing(self, batch, video_feature):
return batch
class MMAttentionMask2DProcessor(Processor):
"""text generation requires 2d mask
that is harder to generate by GPU at this stage."""
def __call__(self, vmask, cmask, mtype):
if mtype == "textgen":
return self._build_textgeneration_mask(vmask, cmask)
elif mtype == "videogen":
return self._build_videogeneration_mask(vmask, cmask)
else:
return self._build_mm_mask(vmask, cmask)
def _build_mm_mask(self, vmask, cmask):
mask_1d = torch.cat([cmask[:1], vmask, cmask[1:]], dim=0)
return mask_1d[None, :].repeat(mask_1d.size(0), 1)
def _build_videogeneration_mask(self, vmask, cmask):
# cls_mask is only about text otherwise it will leak generation.
cls_text_mask = torch.cat([
# [CLS]
torch.ones(
(1,), dtype=torch.bool, device=cmask.device),
# video tokens and [SEP] for video.
torch.zeros(
(vmask.size(0) + 1,), dtype=torch.bool, device=cmask.device),
cmask[2:]
], dim=0)
# concat horizontially.
video_len = int(vmask.sum())
video_masks = torch.cat([
# [CLS]
torch.ones(
(video_len, 1), dtype=torch.bool, device=cmask.device
),
torch.tril(
torch.ones(
(video_len, video_len),
dtype=torch.bool, device=cmask.device)),
# video_padding
torch.zeros(
(video_len, vmask.size(0) - video_len),
dtype=torch.bool, device=cmask.device
),
# [SEP] for video (unused).
torch.zeros(
(video_len, 1), dtype=torch.bool, device=cmask.device
),
cmask[2:].unsqueeze(0).repeat(video_len, 1)
], dim=1)
text_masks = cls_text_mask[None, :].repeat(
cmask.size(0) - 2, 1)
video_padding_masks = cls_text_mask[None, :].repeat(
vmask.size(0) - video_len, 1)
return torch.cat([
cls_text_mask[None, :],
video_masks,
video_padding_masks,
torch.cat([cmask[:1], vmask, cmask[1:]], dim=0)[None,:],
text_masks
], dim=0)
def _build_textgeneration_mask(self, vmask, cmask):
# cls_mask is only about video otherwise it will leak generation.
cls_video_mask = torch.cat([
# [CLS]
torch.ones(
(1,), dtype=torch.bool, device=cmask.device),
vmask,
# [SEP]
torch.ones((1,), dtype=torch.bool, device=cmask.device),
torch.zeros(
(cmask.size(0)-2,), dtype=torch.bool, device=cmask.device)
], dim=0)
# concat horizontially.
text_len = int(cmask[2:].sum())
text_masks = torch.cat([
# [CLS]
torch.ones(
(text_len, 1), dtype=torch.bool, device=cmask.device
),
vmask.unsqueeze(0).repeat(text_len, 1),
# [SEP] for video.
torch.ones(
(text_len, 1), dtype=torch.bool, device=cmask.device
),
torch.tril(
torch.ones(
(text_len, text_len),
dtype=torch.bool, device=cmask.device)),
# padding.
torch.zeros(
(text_len, cmask.size(0) - text_len - 2),
dtype=torch.bool, device=cmask.device
)
], dim=1)
cls_video_masks = cls_video_mask[None, :].repeat(
vmask.size(0) + 2, 1)
text_padding_masks = cls_video_mask[None, :].repeat(
cmask.size(0) - text_len - 2, 1)
return torch.cat([
cls_video_masks, text_masks, text_padding_masks], dim=0)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/processors/processor.py |
# Copyright (c) Facebook, Inc. All Rights Reserved
"""
Processors for all downstream (ds) tasks.
"""
import json
import os
import pickle
import random
import math
import numpy as np
import torch
from collections import defaultdict
from .processor import (
MetaProcessor,
VideoProcessor,
TextProcessor,
Aligner,
MMAttentionMask2DProcessor,
)
from .how2processor import TextGenerationProcessor
# ------------- A General Aligner for all downstream tasks-----------------
class DSAligner(Aligner):
"""
Downstream (DS) aligner shared by all datasets.
"""
def __call__(self, video_id, video_feature, text_feature, wps=0.7):
# random sample a starting sec for video.
video_start = 0
video_end = min(len(video_feature), self.max_video_len)
# the whole sequence is a single clip.
video_clips = {"start": [video_start], "end": [video_end]}
text_feature = {
"cap": [text_feature],
"start": [video_start],
"end": [len(text_feature) / wps],
}
text_clip_indexs = [0]
vfeats, vmasks = self._build_video_seq(
video_feature, video_clips
)
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats,
"vmasks": vmasks,
"video_id": video_id,
}
class NLGTextProcessor(TextProcessor):
"""
Also return the original text as ref.
"""
def __call__(self, text_id):
return super().__call__(text_id), text_id
class DSNLGAligner(DSAligner):
"""extend with the capability of 2d mask for generation."""
def __init__(self, config):
super().__init__(config)
self.attnmasker = MMAttentionMask2DProcessor()
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
self.bert_name, use_fast=self.use_fast,
bos_token="[CLS]", eos_token="[SEP]"
)
self.tokenizer = tokenizer
self.bos_token_id = tokenizer.bos_token_id
self.eos_token_id = tokenizer.eos_token_id
self.textgen = TextGenerationProcessor(tokenizer)
def __call__(self, video_id, video_feature, text_feature):
output = super().__call__(video_id, video_feature, text_feature[0])
if self.split == "test":
# output.update({"ref": text_feature[1]})
output.update({"ref": self.tokenizer.decode(
output["caps"], skip_special_tokens=True)})
text_label = output["caps"]
cmasks = torch.BoolTensor([1] * text_label.size(0))
caps = torch.LongTensor([
self.cls_token_id,
self.sep_token_id,
self.bos_token_id])
else:
caps, text_label = self.textgen(output["caps"])
cmasks = output["cmasks"]
attention_mask = self.attnmasker(
output["vmasks"], cmasks, "textgen")
output.update({
"caps": caps,
"cmasks": cmasks,
"text_label": text_label,
"attention_mask": attention_mask,
})
return output
# -------------------- MSRVTT ------------------------
class MSRVTTMetaProcessor(MetaProcessor):
"""MSRVTT dataset.
reference: `howto100m/msrvtt_dataloader.py`
"""
def __init__(self, config):
super().__init__(config)
import pandas as pd
data = pd.read_csv(self._get_split_path(config))
# TODO: add a text1ka flag.
if config.split == "train" \
and config.full_test_path is not None \
and config.jsfusion_path is not None:
# add testing videos from full_test_path not used by jfusion.
additional_data = pd.read_csv(config.full_test_path)
jsfusion_data = pd.read_csv(config.jsfusion_path)
for video_id in additional_data["video_id"]:
if video_id not in jsfusion_data["video_id"].values:
data = data.append(
{"video_id": video_id}, ignore_index=True)
if config.dup is not None and config.split == "train":
data = data.append([data] * (config.dup - 1), ignore_index=True)
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
"""slightly modify with if condition to combine train/test."""
vid, sentence = None, None
vid = self.data["video_id"].values[idx]
if "sentence" in self.data: # for testing.
sentence = self.data["sentence"].values[idx]
else: # for training.
sentence = vid
return vid, sentence
class MSRVTTTextProcessor(TextProcessor):
"""MSRVTT dataset.
reference: `msrvtt_dataloader.py` `MSRVTT_TrainDataLoader`.
TODO (huxu): add max_words.
"""
def __init__(self, config):
super().__init__(config)
self.sentences = None
if config.json_path is not None and config.split == "train":
with open(config.json_path) as fd:
self.data = json.load(fd)
self.sentences = defaultdict(list)
for s in self.data["sentences"]:
self.sentences[s["video_id"]].append(s["caption"])
def __call__(self, text_id):
if self.sentences is not None:
rind = random.randint(0, len(self.sentences[text_id]) - 1)
sentence = self.sentences[text_id][rind]
else:
sentence = text_id
caption = self.tokenizer(sentence, add_special_tokens=False)
return caption["input_ids"]
class MSRVTTNLGTextProcessor(MSRVTTTextProcessor):
"""TODO: change dsaligner and merge to avoid any NLG text processor."""
def __call__(self, text_id):
if self.sentences is not None:
rind = random.randint(0, len(self.sentences[text_id]) - 1)
sentence = self.sentences[text_id][rind]
else:
sentence = text_id
caption = self.tokenizer(sentence, add_special_tokens=False)
return caption["input_ids"], sentence
class MSRVTTQAMetaProcessor(MetaProcessor):
"""MSRVTT-QA: retrieval-based multi-choice QA from JSFusion dataset.
For simplicity, we use the train retrieval model.
reference: `https://github.com/yj-yu/lsmdc`
"""
def __init__(self, config):
super().__init__(config)
import pandas as pd
csv_data = pd.read_csv(self._get_split_path(config), sep="\t")
data = []
for video_id, a1, a2, a3, a4, a5, answer in zip(
csv_data["vid_key"].values,
csv_data["a1"].values,
csv_data["a2"].values,
csv_data["a3"].values,
csv_data["a4"].values,
csv_data["a5"].values,
csv_data["answer"].values):
video_id = video_id.replace("msr", "video")
data.append((video_id, (answer, [a1, a2, a3, a4, a5])))
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class MSRVTTQATextProcessor(TextProcessor):
"""MSRVTT-QA dataset.
text_ans is of format `(answer, [a1, a2, a3, a4, a5])`.
"""
def __call__(self, text_ans):
for ans_idx, ans in enumerate(text_ans[1]):
if isinstance(ans, str):
text_ans[1][ans_idx] = self.tokenizer(ans, add_special_tokens=False)["input_ids"]
return text_ans
class MSRVTTQAAligner(DSAligner):
"""MSRVTT dataset.
similar to sample in how2.
we call __call__ multiple times.
"""
def __call__(self, video_id, video_feature, text_feature, wps=0.7):
caps = []
cmasks = []
answer = text_feature[0]
for ans_idx, _text_feature in enumerate(text_feature[1]):
output = super().__call__(
video_id, video_feature, _text_feature, wps)
caps.append(output["caps"])
cmasks.append(output["cmasks"])
output.update({
"caps": torch.stack(caps),
"cmasks": torch.stack(cmasks),
"answers": torch.LongTensor([answer]),
})
return output
# -------------------- Youcook -----------------------
class YoucookMetaProcessor(MetaProcessor):
"""Youcook dataset.
reference: `howto100m/youcook_dataloader.py`
note that the data can be different as the
(1) some videos already in Howto100m are removed.
(2) stop words are removed from caption
TODO (huxu): make a flag to load the original caption.
(see youcookii_annotations_trainval.json).
The max_video_len can be 264 and text can be 64 tokens.
In reality we may not need that long. see projects/task/youcook.yaml
"""
def __init__(self, config):
super().__init__(config)
vfeat_dir = config.vfeat_dir
print(self._get_split_path(config))
with open(self._get_split_path(config), "rb") as fd:
data = pickle.load(fd)
all_valid_video_ids = set(
[os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)]
)
recs = []
video_ids = set()
valid_video_ids = set()
for rec in data: # filter videos not available.
udl_idx = rec["id"].rindex("_")
video_id = rec["id"][:udl_idx]
video_ids.add(video_id)
if video_id in all_valid_video_ids:
valid_video_ids.add(video_id)
recs.append(rec)
print("total video_ids in .pkl", len(video_ids))
print("valid video_ids in .pkl", len(valid_video_ids))
print("please verify {train,val}_list.txt")
data = recs
self.data = data
with open(config.trainval_annotation) as fd:
self.youcook_annotation = json.load(fd)["database"]
if config.use_annotation_text is True:
print("using text in annotation.")
self.use_annotation_caption = True
else:
self.use_annotation_caption = False
def __getitem__(self, idx):
def _get_video_and_caption(rec):
vid = rec["id"]
udl_idx = vid.rindex("_")
video_id, clip_id = vid[:udl_idx], int(vid[udl_idx + 1:])
clip = self.youcook_annotation[video_id]["annotations"][clip_id]
start, end = clip["segment"]
if self.use_annotation_caption:
caption = clip["sentence"]
else:
caption = rec["caption"]
return (video_id, start, end), caption
rec = self.data[idx]
video_info, text_info = _get_video_and_caption(rec)
return video_info, text_info
class YoucookVideoProcessor(VideoProcessor):
"""video_fn is a tuple of (video_id, start, end) now."""
def __call__(self, video_fn):
video_id, start, end = video_fn
feat = np.load(os.path.join(self.vfeat_dir, video_id + ".npy"))
return feat[start:end]
class YoucookNLGMetaProcessor(MetaProcessor):
"""NLG uses the original split:
`train_list.txt` and `val_list.txt`
"""
def __init__(self, config):
super().__init__(config)
vfeat_dir = config.vfeat_dir
print(self._get_split_path(config))
with open(self._get_split_path(config)) as fd:
video_ids = [
line.strip().split("/")[1] for line in fd.readlines()]
print("total video_ids in train/val_list.txt", len(video_ids))
all_valid_video_ids = set(
[os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)]
)
video_ids = [
video_id for video_id in video_ids
if video_id in all_valid_video_ids]
print("valid video_ids in train/val_list.txt", len(video_ids))
with open(config.trainval_annotation) as fd:
self.youcook_annotation = json.load(fd)["database"]
data = []
for video_id in video_ids:
for clip in self.youcook_annotation[video_id]["annotations"]:
start, end = clip["segment"]
caption = clip["sentence"]
data.append(((video_id, start, end), caption))
self.data = data
def __getitem__(self, idx):
return self.data[idx]
# --------------------- CrossTask -------------------------
class CrossTaskMetaProcessor(MetaProcessor):
def __init__(self, config):
super().__init__(config)
np.random.seed(0) # deterministic random split.
task_vids = self._get_vids(
config.train_csv_path,
config.vfeat_dir,
config.annotation_path)
val_vids = self._get_vids(
config.val_csv_path,
config.vfeat_dir,
config.annotation_path)
# filter out those task and vids appear in val_vids.
task_vids = {
task: [
vid for vid in vids
if task not in val_vids or vid not in val_vids[task]]
for task, vids in task_vids.items()}
primary_info = self._read_task_info(config.primary_path)
test_tasks = set(primary_info['steps'].keys())
# if args.use_related:
related_info = self._read_task_info(config.related_path)
task_steps = {**primary_info['steps'], **related_info['steps']}
n_steps = {**primary_info['n_steps'], **related_info['n_steps']}
# else:
# task_steps = primary_info['steps']
# n_steps = primary_info['n_steps']
all_tasks = set(n_steps.keys())
# filter and keep task in primary or related.
task_vids = {
task: vids for task, vids in task_vids.items()
if task in all_tasks}
# vocab-by-step matrix (A) and vocab (M)
# (huxu): we do not use BoW.
# A, M = self._get_A(task_steps, share="words")
train_vids, test_vids = self._random_split(
task_vids, test_tasks, config.n_train)
print("train_num_videos", sum(len(vids) for vids in train_vids.values()))
print("test_num_videos", sum(len(vids) for vids in test_vids.values()))
# added by huxu to automatically determine the split.
split_map = {
"train": train_vids,
"valid": test_vids,
"test": test_vids
}
task_vids = split_map[config.split]
self.vids = []
for task, vids in task_vids.items():
self.vids.extend([(task, vid) for vid in vids])
self.task_steps = task_steps
self.n_steps = n_steps
def __getitem__(self, idx):
task, vid = self.vids[idx]
n_steps = self.n_steps[task]
steps = self.task_steps[task]
assert len(steps) == n_steps
return (task, vid, steps, n_steps), (task, vid, steps, n_steps)
def __len__(self):
return len(self.vids)
def _random_split(self, task_vids, test_tasks, n_train):
train_vids = {}
test_vids = {}
for task, vids in task_vids.items():
if task in test_tasks and len(vids) > n_train:
train_vids[task] = np.random.choice(
vids, n_train, replace=False).tolist()
test_vids[task] = [
vid for vid in vids if vid not in train_vids[task]]
else:
train_vids[task] = vids
return train_vids, test_vids
def _get_vids(self, path, vfeat_dir, annotation_path):
"""refactored from
https://github.com/DmZhukov/CrossTask/blob/master/data.py
changes: add `vfeat_dir` to check if the video is available.
add `annotation_path` to check if the video is available.
"""
task_vids = {}
with open(path, 'r') as f:
for line in f:
task, vid, url = line.strip().split(',')
# double check the video is available.
if not os.path.exists(
os.path.join(vfeat_dir, vid + ".npy")):
continue
# double check the annotation is available.
if not os.path.exists(os.path.join(
annotation_path,
task + "_" + vid + ".csv")):
continue
if task not in task_vids:
task_vids[task] = []
task_vids[task].append(vid)
return task_vids
def _read_task_info(self, path):
titles = {}
urls = {}
n_steps = {}
steps = {}
with open(path, 'r') as f:
idx = f.readline()
while idx != '':
idx = idx.strip()
titles[idx] = f.readline().strip()
urls[idx] = f.readline().strip()
n_steps[idx] = int(f.readline().strip())
steps[idx] = f.readline().strip().split(',')
next(f)
idx = f.readline()
return {
'title': titles,
'url': urls,
'n_steps': n_steps,
'steps': steps
}
def _get_A(self, task_steps, share="words"):
raise ValueError("running get_A is not allowed for BERT.")
"""Step-to-component matrices."""
if share == 'words':
# share words
task_step_comps = {
task: [step.split(' ') for step in steps]
for task, steps in task_steps.items()}
elif share == 'task_words':
# share words within same task
task_step_comps = {
task: [[task+'_'+tok for tok in step.split(' ')] for step in steps]
for task, steps in task_steps.items()}
elif share == 'steps':
# share whole step descriptions
task_step_comps = {
task: [[step] for step in steps] for task, steps in task_steps.items()}
else:
# no sharing
task_step_comps = {
task: [[task+'_'+step] for step in steps]
for task, steps in task_steps.items()}
# BERT tokenizer here?
vocab = []
for task, steps in task_step_comps.items():
for step in steps:
vocab.extend(step)
vocab = {comp: m for m, comp in enumerate(set(vocab))}
M = len(vocab)
A = {}
for task, steps in task_step_comps.items():
K = len(steps)
a = torch.zeros(M, K)
for k, step in enumerate(steps):
a[[vocab[comp] for comp in step], k] = 1
a /= a.sum(dim=0)
A[task] = a
return A, M
class CrossTaskVideoProcessor(VideoProcessor):
def __call__(self, video_fn):
task, vid, steps, n_steps = video_fn
video_fn = os.path.join(self.vfeat_dir, vid + ".npy")
feat = np.load(video_fn)
return feat
class CrossTaskTextProcessor(TextProcessor):
def __call__(self, text_id):
task, vid, steps, n_steps = text_id
step_ids = []
for step_str in steps:
step_ids.append(
self.tokenizer(step_str, add_special_tokens=False)["input_ids"]
)
return step_ids
class CrossTaskAligner(Aligner):
"""
TODO: it's not clear yet the formulation of the task; finish this later.
"""
def __init__(self, config):
super().__init__(config)
self.annotation_path = config.annotation_path
self.sliding_window = config.sliding_window
self.sliding_window_size = config.sliding_window_size
def __call__(self, video_id, video_feature, text_feature):
task, vid, steps, n_steps = video_id
annot_path = os.path.join(
self.annotation_path, task + '_' + vid + '.csv')
video_len = len(video_feature)
labels = torch.from_numpy(self._read_assignment(
video_len, n_steps, annot_path)).float()
vfeats, vmasks, targets = [], [], []
# sliding window on video features and targets.
for window_start in range(0, video_len, self.sliding_window):
video_start = 0
video_end = min(video_len - window_start, self.sliding_window_size)
video_clip = {"start": [video_start], "end": [video_end]}
vfeat, vmask = self._build_video_seq(
video_feature[window_start: window_start + video_end],
video_clip
)
target = labels[window_start: window_start + video_end]
assert len(vfeat) >= len(target), "{},{}".format(len(vfeat), len(target))
# TODO: randomly drop all zero targets for training ?
# if self.split == "train" and target.sum() == 0:
# continue
vfeats.append(vfeat)
vmasks.append(vmask)
targets.append(target)
if (video_len - window_start) <= self.sliding_window_size:
break
vfeats = torch.stack(vfeats)
vmasks = torch.stack(vmasks)
targets = torch.cat(targets, dim=0)
caps, cmasks = [], []
for step in text_feature:
step_text_feature = {"start": [0], "end": [1], "cap": [step]}
step_text_clip_index = [0]
cap, cmask = self._build_text_seq(
step_text_feature, step_text_clip_index
)
caps.append(cap)
cmasks.append(cmask)
caps = torch.stack(caps)
cmasks = torch.stack(cmasks)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats, # X for original code.
"vmasks": vmasks,
"targets": targets,
"video_id": vid,
"task": task,
"video_len": video_len # for later checking.
}
def _read_assignment(self, T, K, path):
"""
refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py
Howto interpret contraints on loss that is going to be minimized:
lambd is a big number;
self.lambd * C is a big number for all valid position (csv stores invalids)
def forward(self, O, Y, C):
return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum()
This will load the csv file and fill-in the step col from start to end rows.
"""
Y = np.zeros([T, K], dtype=np.uint8)
with open(path, 'r') as f:
for line in f:
step, start, end = line.strip().split(',')
start = int(math.floor(float(start)))
end = int(math.ceil(float(end)))
step = int(step) - 1
Y[start:end, step] = 1
return Y
# --------------------- COIN -------------------------
class MetaTextBinarizer(Aligner):
def __call__(self, text_feature):
text_feature = {
"cap": [text_feature],
"start": [0.],
"end": [100.],
}
text_clip_indexs = [0]
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
return {"caps": caps, "cmasks": cmasks}
class COINActionSegmentationMetaProcessor(MetaProcessor):
split_map = {
"train": "training",
"valid": "testing",
"test": "testing",
}
def __init__(self, config):
super().__init__(config)
with open(self._get_split_path(config)) as fr:
database = json.load(fr)["database"]
id2label = {}
data = []
# filter the data by split.
for video_id, rec in database.items():
# always use testing to determine label_set
if rec["subset"] == "testing":
for segment in rec["annotation"]:
id2label[int(segment["id"])] = segment["label"]
# text_labels is used for ZS setting
self.text_labels = ["none"] * len(id2label)
for label_id in id2label:
self.text_labels[label_id-1] = id2label[label_id]
id2label[0] = "O"
print("num of labels", len(id2label))
for video_id, rec in database.items():
if not os.path.isfile(os.path.join(config.vfeat_dir, video_id + ".npy")):
continue
if rec["subset"] == COINActionSegmentationMetaProcessor.split_map[self.split]:
starts, ends, labels = [], [], []
for segment in rec["annotation"]:
start, end = segment["segment"]
label = int(segment["id"])
starts.append(start)
ends.append(end)
labels.append(label)
data.append(
(video_id, {"start": starts, "end": ends, "label": labels}))
self.data = data
def meta_text_labels(self, config):
from transformers import default_data_collator
from ..utils import get_local_rank
text_processor = TextProcessor(config)
binarizer = MetaTextBinarizer(config)
# TODO: add prompts to .yaml.
text_labels = [label for label in self.text_labels]
if get_local_rank() == 0:
print(text_labels)
outputs = []
for text_label in text_labels:
text_feature = text_processor(text_label)
outputs.append(binarizer(text_feature))
return default_data_collator(outputs)
def __getitem__(self, idx):
return self.data[idx]
class COINActionSegmentationTextProcessor(TextProcessor):
def __call__(self, text_label):
return text_label
class COINActionSegmentationAligner(Aligner):
def __init__(self, config):
super().__init__(config)
self.sliding_window = config.sliding_window
self.sliding_window_size = config.sliding_window_size
def __call__(self, video_id, video_feature, text_feature):
starts, ends, label_ids = text_feature["start"], text_feature["end"], text_feature["label"]
# sliding window.
video_len = len(video_feature)
vfeats, vmasks, targets = [], [], []
# sliding window on video features and targets.
for window_start in range(0, video_len, self.sliding_window):
video_start = 0
video_end = min(video_len - window_start, self.sliding_window_size)
video_clip = {"start": [video_start], "end": [video_end]}
vfeat, vmask = self._build_video_seq(
video_feature[window_start: window_start + video_end],
video_clip
)
# covers video length only.
target = torch.full_like(vmask, -100, dtype=torch.long)
target[vmask] = 0
for start, end, label_id in zip(starts, ends, label_ids):
if (window_start < end) and (start < (window_start + video_end)):
start_offset = max(0, math.floor(start) - window_start)
end_offset = min(video_end, math.ceil(end) - window_start)
target[start_offset:end_offset] = label_id
vfeats.append(vfeat)
vmasks.append(vmask)
targets.append(target)
if (video_len - window_start) <= self.sliding_window_size:
break
vfeats = torch.stack(vfeats)
vmasks = torch.stack(vmasks)
targets = torch.stack(targets)
video_targets = torch.full((video_len,), 0)
for start, end, label_id in zip(starts, ends, label_ids):
start_offset = max(0, math.floor(start))
end_offset = min(video_len, math.ceil(end))
video_targets[start_offset:end_offset] = label_id
caps = torch.LongTensor(
[[self.cls_token_id, self.sep_token_id,
self.pad_token_id, self.sep_token_id]],
).repeat(vfeats.size(0), 1)
cmasks = torch.BoolTensor(
[[0, 1, 0, 1]] # pad are valid for attention.
).repeat(vfeats.size(0), 1)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats, # X for original code.
"vmasks": vmasks,
"targets": targets,
"video_id": video_id,
"video_len": video_len, # for later checking.
"video_targets": video_targets
}
class DiDeMoMetaProcessor(MetaProcessor):
"""reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py
"""
def __init__(self, config):
super().__init__(config)
assert "test" in self._get_split_path(config), "DiDeMo only supports zero-shot testing for now."
with open(self._get_split_path(config)) as data_file:
json_data = json.load(data_file)
data = []
for record in json_data:
data.append((record["video"], record["description"]))
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class DiDeMoTextProcessor(TextProcessor):
"""reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py
"""
def __call__(self, text):
return self.tokenizer(text, add_special_tokens=False)["input_ids"]
class DiDeMoAligner(DSAligner):
"""
check video length.
"""
def __call__(self, video_id, video_feature, text_feature):
# print(video_feature.shape[0])
return super().__call__(video_id, video_feature, text_feature)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/processors/dsprocessor.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
import math
import pickle
import random
import os
import numpy as np
from collections import deque
from typing import Optional, Tuple, List
from .processor import (
Processor,
MetaProcessor,
TextProcessor,
Aligner,
MMAttentionMask2DProcessor
)
from ..utils import ShardedTensor
class How2MetaProcessor(MetaProcessor):
def __init__(self, config):
super().__init__(config)
path = self._get_split_path(config)
with open(path) as fd:
self.data = [line.strip() for line in fd]
def __getitem__(self, idx):
video_id = self.data[idx]
return video_id, video_id
class ShardedHow2MetaProcessor(How2MetaProcessor):
def __init__(self, config):
super().__init__(config)
self.split = str(config.split)
self.vfeat_dir = config.vfeat_dir
self._init_shard()
def _init_shard(self):
if self.split == "train":
meta_fn = os.path.join(self.vfeat_dir, "train" + "_meta.pkl")
with open(meta_fn, "rb") as fr:
meta = pickle.load(fr)
elif self.split == "valid":
meta_fn = os.path.join(self.vfeat_dir, "val" + "_meta.pkl")
with open(meta_fn, "rb") as fr:
meta = pickle.load(fr)
elif self.split == "test":
print("use how2 val as test.")
meta_fn = os.path.join(self.vfeat_dir, "val" + "_meta.pkl")
with open(meta_fn, "rb") as fr:
meta = pickle.load(fr)
else:
raise ValueError("unsupported for MetaProcessor:", self.split)
video_id_to_shard = {}
for shard_id in meta:
for video_idx, video_id in enumerate(meta[shard_id]):
video_id_to_shard[video_id] = (shard_id, video_idx)
self.video_id_to_shard = video_id_to_shard
def __getitem__(self, idx):
video_id, video_id = super().__getitem__(idx)
shard_id, shard_idx = self.video_id_to_shard[video_id]
meta = (video_id, idx, shard_id, shard_idx)
return meta, meta
class ShardedVideoProcessor(Processor):
"""
mmaped shards of numpy video features.
"""
def __init__(self, config):
self.split = str(config.split)
self.vfeat_dir = config.vfeat_dir
def __call__(self, video_id):
_, _, shard_id, video_idx = video_id
if self.split == "train":
shard = ShardedTensor.load(
os.path.join(self.vfeat_dir, "train" + "_" + str(shard_id)),
"r"
)
elif self.split == "valid":
shard = ShardedTensor.load(
os.path.join(self.vfeat_dir, "val" + "_" + str(shard_id)),
"r"
)
elif self.split == "test":
shard = ShardedTensor.load(
os.path.join(self.vfeat_dir, "val" + "_" + str(shard_id)),
"r"
)
else:
raise ValueError("unknown split", self.split)
feat = shard[video_idx]
return feat
class ShardedTextProcessor(Processor):
def __init__(self, config):
self.tfeat_dir = str(config.tfeat_dir)
self.split = str(config.split)
def __call__(self, video_id):
_, _, shard_id, shard_idx = video_id
if self.split == "train":
target_path = self.tfeat_dir + "train" + "_" + str(shard_id)
elif self.split == "valid":
target_path = self.tfeat_dir + "val" + "_" + str(shard_id)
elif self.split == "test":
target_path = self.tfeat_dir + "val" + "_" + str(shard_id)
else:
raise ValueError("unknown split", self.split)
startend = ShardedTensor.load(
target_path + ".startends", "r")[shard_idx]
cap_ids = ShardedTensor.load(
target_path + ".caps_ids", "r")[shard_idx]
cap = []
for clip_idx in range(len(cap_ids)):
clip = cap_ids[clip_idx]
cap.append(clip[clip != -1].tolist())
start, end = startend[:, 0].tolist(), startend[:, 1].tolist()
return {"start": start, "end": end, "cap": cap}
class FixedLenAligner(Aligner):
"""
In the model we assume text is on the left (closer to BERT formulation)
and video is on the right.
We fix the total length of text + video.
max_video_len is in number of secs.
max_text_len is in number of tokens.
special tokens formats:
we use the format [CLS] [SEP] text tokens [SEP] [PAD] ...
[CLS] will be splitted out into:
[CLS] video tokens [SEP] text tokens [SEP] [PAD] ...
token_type_ids will be generated by the model (for now).
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
so each sequence owns a [SEP] token for no-ops.
"""
def __init__(self, config):
super().__init__(config)
self.text_clip_sampler = TextClipSamplingProcessor(
self.max_len - self.max_video_len - 3
)
"""
decide subsampling:
`config.subsampling` will change batch_size in trainer.
`config.clip_per_video` (used by RetriTask) doesn't
change batch_size in trainer.
"""
subsampling = config.subsampling \
if config.subsampling is not None else None
if config.clip_per_video is not None:
subsampling = config.clip_per_video
self.subsampling = subsampling
def _get_text_maxlen(self):
# use max text len
return self.text_clip_sampler.max_text_len
def __call__(self, video_id, video_feature, text_feature):
from transformers import default_data_collator
video_idx = video_id[1]
if self.subsampling is not None and self.subsampling >= 1:
batch = []
for _ in range(self.subsampling):
centerclip_idx = random.randint(
0, len(text_feature["start"]) - 1)
batch.append(
self.sampling(
video_idx,
video_feature,
text_feature,
centerclip_idx,
self._get_text_maxlen()
))
batch = self.batch_post_processing(batch, video_feature)
batch = default_data_collator(batch)
else:
raise ValueError(
"dataset.subsampling must be >= 1 for efficient video loading.")
batch = self.sampling(video_idx, video_feature, text_feature)
batch = self.batch_post_processing(batch, video_feature)
batch["video_id"] = video_id if isinstance(video_id, str) \
else video_id[0]
# e2e: make sure frame ids is into tensor.
assert torch.is_tensor(batch["vfeats"])
return batch
def sampling(
self,
video_idx,
video_feature,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
):
text_clip_indexs = self.text_clip_sampler(
text_feature, centerclip_idx,
sampled_max_text_len
)
if isinstance(video_feature, np.ndarray):
video_len = len(video_feature)
else:
video_len = math.ceil(text_feature["end"][-1])
video_end = min(
math.ceil(text_feature["end"][text_clip_indexs[-1]]),
video_len
)
video_start = max(
min(
math.floor(text_feature["start"][text_clip_indexs[0]]),
video_end),
0
)
video_clips = {"start": [video_start], "end": [video_end]}
# tensorize.
vfeats, vmasks = self._build_video_seq(
video_feature, video_clips
)
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
text_start = text_clip_indexs[0]
text_end = text_clip_indexs[-1] + 1
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats,
"vmasks": vmasks,
"video_start": video_start,
"video_end": video_end,
"text_start": text_start,
"text_end": text_end,
}
class VariedLenAligner(FixedLenAligner):
def __init__(self, config):
super().__init__(config)
self.sampled_min_len = config.sampled_min_len
self.sampled_max_len = config.sampled_max_len
def _get_text_maxlen(self):
return random.randint(self.sampled_min_len, self.sampled_max_len)
class StartClipAligner(VariedLenAligner):
def sampling(
self,
video_idx,
video_feature,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
):
return super().sampling(
video_idx, video_feature, text_feature, 0)
class OverlappedAligner(VariedLenAligner):
"""video clip and text clip has overlappings
but may not be the same start/end."""
def __init__(self, config):
super().__init__(config)
self.sampled_video_min_len = config.sampled_video_min_len
self.sampled_video_max_len = config.sampled_video_max_len
self.video_clip_sampler = VideoClipSamplingProcessor()
def _get_video_maxlen(self):
return random.randint(
self.sampled_video_min_len, self.sampled_video_max_len)
def sampling(
self,
video_idx,
video_feature,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
):
text_clip_indexs = self.text_clip_sampler(
text_feature, centerclip_idx,
sampled_max_text_len
)
if isinstance(video_feature, np.ndarray):
video_len = len(video_feature)
else:
video_len = math.ceil(text_feature["end"][-1])
low = math.floor(text_feature["start"][text_clip_indexs[0]])
high = math.ceil(text_feature["end"][text_clip_indexs[-1]])
if low < high:
center = random.randint(low, high)
else:
center = int((low + high) // 2)
center = max(0, min(video_feature.shape[0] - 1, center))
assert 0 <= center < video_feature.shape[0]
video_clips = self.video_clip_sampler(
video_len, self._get_video_maxlen(), center
)
video_start = video_clips["start"][0]
video_end = video_clips["end"][0]
# tensorize.
vfeats, vmasks = self._build_video_seq(
video_feature, video_clips
)
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
text_start = text_clip_indexs[0]
text_end = text_clip_indexs[-1] + 1
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats,
"vmasks": vmasks,
"video_start": video_start,
"video_end": video_end,
"text_start": text_start,
"text_end": text_end,
}
class MFMMLMAligner(FixedLenAligner):
"""
`FixedLenAligner` with Masked Language Model and Masked Frame Model.
"""
def __init__(self, config):
super().__init__(config)
keep_prob = config.keep_prob if config.keep_prob is not None else 1.0
self.text_clip_sampler = TextClipSamplingProcessor(
self.max_len - self.max_video_len - 3, keep_prob
)
self.sampled_min_len = config.sampled_min_len
self.sampled_max_len = config.sampled_max_len
self.masked_token_sampler = TextMaskingProcessor(config)
self.mm_type = config.mm_type \
if config.mm_type is not None else "full"
self.attnmasker = MMAttentionMask2DProcessor() \
if self.mm_type == "textgen" else None
self.masked_frame_sampler = FrameMaskingProcessor(config)
self.lazy_vfeat_mask = (
False if config.lazy_vfeat_mask is None else config.lazy_vfeat_mask
)
self.mm_prob = config.mm_prob if config.mm_prob is not None else 0.
def __call__(self, video_id, video_feature, text_feature):
from transformers import default_data_collator
if self.subsampling is not None and self.subsampling > 1:
batch = []
for _ in range(self.subsampling):
centerclip_idx = random.randint(
0, len(text_feature["start"]) - 1)
sampled_max_text_len = random.randint(
self.sampled_min_len, self.sampled_max_len
)
batch.append(
self.sampling(
video_id,
video_feature,
text_feature,
centerclip_idx,
sampled_max_text_len,
)
)
batch = self.batch_post_processing(batch, video_feature)
batch = default_data_collator(batch)
else:
batch = self.sampling(video_id, video_feature, text_feature)
batch = self.batch_post_processing(batch, video_feature)
batch["video_id"] = video_id if isinstance(video_id, str) \
else video_id[0]
return batch
def sampling(
self,
video_id,
video_feature,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
):
output = FixedLenAligner.sampling(self,
video_id, video_feature, text_feature,
centerclip_idx, sampled_max_text_len)
masking_text, masking_video = None, None
if random.random() < self.mm_prob:
if random.random() > 0.5:
masking_text, masking_video = self.mm_type, "no"
else:
masking_text, masking_video = "no", "full"
video_feats = output["vfeats"] if not self.lazy_vfeat_mask else None
video_label = self.masked_frame_sampler(
output["vmasks"], masking_video, vfeats=video_feats)
caps, text_label = self.masked_token_sampler(
output["caps"], masking_text)
output.update({
"caps": caps,
"video_label": video_label,
"text_label": text_label,
})
if self.attnmasker is not None:
attention_mask = self.attnmasker(
output["vmasks"], output["cmasks"], masking_text)
output.update({
"attention_mask": attention_mask
})
return output
class FrameMaskingProcessor(Processor):
def __init__(self, config):
self.mfm_probability = 0.15
if config.mfm_probability is not None:
self.mfm_probability = config.mfm_probability
def __call__(self, vmasks, modality_masking=None, vfeats=None):
"""
We perform lazy masking to save data transfer time.
It only generates video_labels by default and MFM model
will do actualy masking.
Return: `video_label` is a binary mask.
"""
video_label = vmasks.clone()
if modality_masking is not None:
if modality_masking == "full":
probability_matrix = torch.full(video_label.shape, 1.)
elif modality_masking == "no":
probability_matrix = torch.full(video_label.shape, 0.)
elif modality_masking == "inverse":
probability_matrix = torch.full(
video_label.shape, 1. - self.mfm_probability)
else:
raise ValueError("unknown modality masking.", modality_masking)
else:
probability_matrix = torch.full(
video_label.shape, self.mfm_probability)
masked_indices = torch.bernoulli(probability_matrix).bool()
# We only compute loss on masked tokens
video_label[~masked_indices] = 0
if vfeats is not None:
vfeats[video_label, :] = 0.0
return video_label
class TextGenerationProcessor(Processor):
def __init__(self, tokenizer):
self.bos_token_id = tokenizer.bos_token_id
self.pad_token_id = tokenizer.pad_token_id
def __call__(self, inputs):
labels = inputs.clone()
# [CLS] [SEP] for video
labels[:2] = -100
# keep [SEP] for text.
pad_mask = labels == self.pad_token_id
labels[pad_mask] = -100
inputs[2:] = torch.cat([
torch.LongTensor([self.bos_token_id]),
inputs[2:-1]])
inputs[pad_mask] = self.pad_token_id
assert len(inputs) == len(labels)
return inputs, labels
class TextMaskingProcessor(Processor):
def __init__(self, config):
"""this function is borrowed from
`transformers/data/data_collator.DataCollatorForLanguageModeling`"""
self.mlm_probability = 0.15
if config.mlm_probability is not None:
self.mlm_probability = config.mlm_probability
self.bert_name = config.bert_name
# [CLS] is used as bos_token and [SEP] is used as eos_token.
# https://huggingface.co/transformers/master/model_doc/bertgeneration.html
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.bert_name, bos_token="[CLS]", eos_token="[SEP]")
self.textgen = TextGenerationProcessor(self.tokenizer)
def __call__(
self, inputs: torch.Tensor,
modality_masking=None,
special_tokens_mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
expand modality_masking into
None: traditional bert masking.
"no": no masking.
"full": all [MASK] token for generation.
"gen": autoregressive generation.
"""
"""
Prepare masked tokens inputs/labels for masked language modeling:
80% MASK, 10% random, 10% original.
"""
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training
# (with probability `self.mlm_probability`)
if modality_masking is not None:
if modality_masking == "full":
probability_matrix = torch.full(labels.shape, 1.)
elif modality_masking == "no":
probability_matrix = torch.full(labels.shape, 0.)
elif modality_masking.startswith("textgen"):
# [CLS] [SEP] <s> ...
inputs, labels = self.textgen(inputs)
if "mask" not in modality_masking:
return inputs, labels
inputs = self.mask_input(inputs, special_tokens_mask)
return inputs, labels
elif modality_masking == "mask":
inputs = self.mask_input(inputs, special_tokens_mask)
labels = torch.full(inputs.shape, -100)
return inputs, labels
elif modality_masking == "inverse":
probability_matrix = torch.full(labels.shape, 1. - self.mlm_probability)
else:
raise ValueError("unknown modality masking.", modality_masking)
else:
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = self.get_special_tokens_mask(
labels.tolist(), already_has_special_tokens=True
)
special_tokens_mask = torch.tensor(
special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time,
# we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(
torch.full(labels.shape, 0.8)).bool() & masked_indices
)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.mask_token
)
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(labels.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(
len(self.tokenizer), labels.shape, dtype=torch.long
)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input
# tokens unchanged
return inputs, labels
def mask_input(self, inputs, special_tokens_mask=None):
# the following is new with masked autoregressive.
probability_matrix = torch.full(
inputs.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = self.get_special_tokens_mask(
inputs.tolist(), already_has_special_tokens=True
)
special_tokens_mask = torch.tensor(
special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
indices_replaced = (
torch.bernoulli(
torch.full(inputs.shape, 0.8)).bool() & masked_indices
)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.mask_token
)
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(inputs.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(
len(self.tokenizer), inputs.shape, dtype=torch.long
)
inputs[indices_random] = random_words[indices_random]
return inputs
def get_special_tokens_mask(
self, token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False
) -> List[int]:
"""
Note: the version from transformers do not consider pad
as special tokens.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if"
"the provided sequence of "
"ids is already formated with special tokens "
"for the model."
)
return list(map(lambda x: 1 if x in [
self.tokenizer.sep_token_id,
self.tokenizer.cls_token_id,
self.tokenizer.pad_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
class TextClipSamplingProcessor(Processor):
def __init__(self, max_text_len, keep_prob=1.0):
self.max_text_len = max_text_len
self.max_video_len = 256 # always hold.
self.keep_prob = keep_prob
def __call__(
self,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
sampled_max_video_len=None,
):
# Let's use all caps for now and see if 256 can cover all of them.
if sampled_max_text_len is not None:
max_text_len = sampled_max_text_len
else:
max_text_len = self.max_text_len
if sampled_max_video_len is not None:
max_video_len = sampled_max_video_len
else:
max_video_len = self.max_video_len
t_num_clips = len(text_feature["start"])
if centerclip_idx is None:
centerclip_idx = random.randint(0, t_num_clips - 1)
start_idx, end_idx = centerclip_idx, centerclip_idx + 1
text_clip_indexs = deque()
text_clip_indexs.append(start_idx)
text_len = len(text_feature["cap"][start_idx])
video_len = max(
0,
text_feature["end"][start_idx]
- text_feature["start"][start_idx],
)
while (
(start_idx > 0 or end_idx < t_num_clips)
and text_len < max_text_len
and video_len < max_video_len
):
if random.random() > 0.5 and end_idx < t_num_clips:
# skip the next one?
if random.random() > self.keep_prob and (end_idx + 1) < t_num_clips:
end_idx = end_idx + 1
text_clip_indexs.append(end_idx)
text_len += len(text_feature["cap"][end_idx])
end_idx += 1
elif start_idx > 0:
if random.random() > self.keep_prob and (start_idx - 1) > 0:
start_idx = start_idx - 1
start_idx -= 1
text_clip_indexs.insert(0, start_idx)
text_len += len(text_feature["cap"][start_idx])
else:
if end_idx < t_num_clips:
if random.random() > self.keep_prob and (end_idx + 1) < t_num_clips:
end_idx = end_idx + 1
text_clip_indexs.append(end_idx)
text_len += len(text_feature["cap"][end_idx])
end_idx += 1
else:
return text_clip_indexs
video_len = max(
0,
text_feature["end"][text_clip_indexs[-1]]
- text_feature["start"][text_clip_indexs[0]],
)
return text_clip_indexs
class VideoClipSamplingProcessor(Processor):
def __call__(self, video_len, max_video_len, center):
"""
`video_len`: length of the video.
`max_video_len`: maximum video tokens allowd in a sequence.
`center`: initial starting index.
"""
assert center >= 0 and center < video_len
t_clip_len = 0
start, end = center, center
while (start > 0 or end < video_len) and t_clip_len < max_video_len:
# decide the direction to grow.
if start <= 0:
end += 1
elif end >= video_len:
start -= 1
elif random.random() > 0.5:
end += 1
else:
start -= 1
t_clip_len += 1
return {"start": [start], "end": [end]}
class How2MILNCEAligner(FixedLenAligner):
"""reference: `antoine77340/MIL-NCE_HowTo100M/video_loader.py`"""
def __init__(self, config):
super().__init__(config)
self.num_candidates = 4
self.min_time = 5.0
self.num_sec = 3.2
# self.num_sec = self.num_frames / float(self.fps) num_frames=16 / fps = 5
# self.num_frames = 16
def sampling(
self,
video_id,
video_feature,
text_feature,
centerclip_idx=None, # will be ignored.
sampled_max_text_len=None # will be ignored.
):
text, start, end = self._get_text(text_feature)
video = self._get_video(video_feature, start, end)
vfeats = torch.zeros((self.max_video_len, video_feature.shape[1]))
vmasks = torch.zeros((self.max_video_len,), dtype=torch.bool)
vfeats[: video.shape[0]] = torch.from_numpy(np.array(video))
vmasks[: video.shape[0]] = 1
caps, cmasks = [], []
for words in text:
cap, cmask = self._build_text_seq(text_feature, words)
caps.append(cap)
cmasks.append(cmask)
caps = torch.stack(caps)
cmasks = torch.stack(cmasks)
# video of shape: (video_len)
# text of shape (num_candidates, max_text_len)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats,
"vmasks": vmasks,
# "video_id": video_id,
}
def _get_video(self, video_feature, start, end):
start_seek = random.randint(start, int(max(start, end - self.num_sec)))
# duration = self.num_sec + 0.1
return video_feature[start_seek : int(start_seek + self.num_sec)]
def _get_text(self, cap):
ind = random.randint(0, len(cap["start"]) - 1)
if self.num_candidates == 1:
words = [ind]
else:
words = []
cap_start = self._find_nearest_candidates(cap, ind)
for i in range(self.num_candidates):
words.append([max(0, min(len(cap["cap"]) - 1, cap_start + i))])
start, end = cap["start"][ind], cap["end"][ind]
# TODO: May need to be improved for edge cases.
# expand the min time.
if end - start < self.min_time:
diff = self.min_time - end + start
start = max(0, start - diff / 2)
end = start + self.min_time
return words, int(start), int(end)
def _find_nearest_candidates(self, caption, ind):
"""find the range of the clips."""
start, end = ind, ind
#diff = caption["end"][end] - caption["start"][start]
n_candidate = 1
while n_candidate < self.num_candidates:
# the first clip
if start == 0:
return 0
# we add () in the following condition to fix the bug.
elif end == (len(caption["start"]) - 1):
return start - (self.num_candidates - n_candidate)
elif (caption["end"][end] - caption["start"][start - 1]) < (
caption["end"][end + 1] - caption["start"][start]
):
start -= 1
else:
end += 1
n_candidate += 1
return start
class PKLJSONStrTextProcessor(TextProcessor):
"""`caption.json` from howto100m are preprocessed as a
dict `[video_id, json_str]`.
Json parsing tokenization are conducted on-the-fly and cached into dict.
"""
def __init__(self, config, max_clip_text_len=96):
print("[Warning] PKLJSONStrTextProcessor is slow for num_workers > 0.")
self.caption_pkl_path = str(config.caption_pkl_path)
with open(self.caption_pkl_path, "rb") as fd:
self.data = pickle.load(fd)
self.max_clip_text_len = max_clip_text_len
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
str(config.bert_name), use_fast=config.use_fast
)
def __call__(self, video_id):
caption = self.data[video_id]
if isinstance(caption, str):
import json
caption = json.loads(caption)
cap = []
for clip_idx, text_clip in enumerate(caption["text"]):
clip_ids = []
if isinstance(text_clip, str):
clip_ids = self.tokenizer(
text_clip[: self.max_clip_text_len],
add_special_tokens=False
)["input_ids"]
cap.append(clip_ids)
caption["cap"] = cap
caption.pop("text") # save space.
self.data[video_id] = caption
return caption
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/processors/how2processor.py |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Contains a PyTorch definition for Gated Separable 3D network (S3D-G)
with a text module for computing joint text-video embedding from raw text
and video input. The following code will enable you to load the HowTo100M
pretrained S3D Text-Video model from:
A. Miech, J.-B. Alayrac, L. Smaira, I. Laptev, J. Sivic and A. Zisserman,
End-to-End Learning of Visual Representations from Uncurated Instructional Videos.
https://arxiv.org/abs/1912.06430.
S3D-G was proposed by:
S. Xie, C. Sun, J. Huang, Z. Tu and K. Murphy,
Rethinking Spatiotemporal Feature Learning For Video Understanding.
https://arxiv.org/abs/1712.04851.
Tensorflow code: https://github.com/tensorflow/models/blob/master/research/slim/nets/s3dg.py
The S3D architecture was slightly modified with a space to depth trick for TPU
optimization.
"""
import torch as th
import torch.nn.functional as F
import torch.nn as nn
import os
import numpy as np
import re
class InceptionBlock(nn.Module):
def __init__(
self,
input_dim,
num_outputs_0_0a,
num_outputs_1_0a,
num_outputs_1_0b,
num_outputs_2_0a,
num_outputs_2_0b,
num_outputs_3_0b,
gating=True,
):
super(InceptionBlock, self).__init__()
self.conv_b0 = STConv3D(input_dim, num_outputs_0_0a, [1, 1, 1])
self.conv_b1_a = STConv3D(input_dim, num_outputs_1_0a, [1, 1, 1])
self.conv_b1_b = STConv3D(
num_outputs_1_0a, num_outputs_1_0b, [3, 3, 3], padding=1, separable=True
)
self.conv_b2_a = STConv3D(input_dim, num_outputs_2_0a, [1, 1, 1])
self.conv_b2_b = STConv3D(
num_outputs_2_0a, num_outputs_2_0b, [3, 3, 3], padding=1, separable=True
)
self.maxpool_b3 = th.nn.MaxPool3d((3, 3, 3), stride=1, padding=1)
self.conv_b3_b = STConv3D(input_dim, num_outputs_3_0b, [1, 1, 1])
self.gating = gating
self.output_dim = (
num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b + num_outputs_3_0b
)
if gating:
self.gating_b0 = SelfGating(num_outputs_0_0a)
self.gating_b1 = SelfGating(num_outputs_1_0b)
self.gating_b2 = SelfGating(num_outputs_2_0b)
self.gating_b3 = SelfGating(num_outputs_3_0b)
def forward(self, input):
"""Inception block
"""
b0 = self.conv_b0(input)
b1 = self.conv_b1_a(input)
b1 = self.conv_b1_b(b1)
b2 = self.conv_b2_a(input)
b2 = self.conv_b2_b(b2)
b3 = self.maxpool_b3(input)
b3 = self.conv_b3_b(b3)
if self.gating:
b0 = self.gating_b0(b0)
b1 = self.gating_b1(b1)
b2 = self.gating_b2(b2)
b3 = self.gating_b3(b3)
return th.cat((b0, b1, b2, b3), dim=1)
class SelfGating(nn.Module):
def __init__(self, input_dim):
super(SelfGating, self).__init__()
self.fc = nn.Linear(input_dim, input_dim)
def forward(self, input_tensor):
"""Feature gating as used in S3D-G.
"""
spatiotemporal_average = th.mean(input_tensor, dim=[2, 3, 4])
weights = self.fc(spatiotemporal_average)
weights = th.sigmoid(weights)
return weights[:, :, None, None, None] * input_tensor
class STConv3D(nn.Module):
def __init__(
self, input_dim, output_dim, kernel_size, stride=1, padding=0, separable=False
):
super(STConv3D, self).__init__()
self.separable = separable
self.relu = nn.ReLU(inplace=True)
assert len(kernel_size) == 3
if separable and kernel_size[0] != 1:
spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]
temporal_kernel_size = [kernel_size[0], 1, 1]
if isinstance(stride, list) and len(stride) == 3:
spatial_stride = [1, stride[1], stride[2]]
temporal_stride = [stride[0], 1, 1]
else:
spatial_stride = [1, stride, stride]
temporal_stride = [stride, 1, 1]
if isinstance(padding, list) and len(padding) == 3:
spatial_padding = [0, padding[1], padding[2]]
temporal_padding = [padding[0], 0, 0]
else:
spatial_padding = [0, padding, padding]
temporal_padding = [padding, 0, 0]
if separable:
self.conv1 = nn.Conv3d(
input_dim,
output_dim,
kernel_size=spatial_kernel_size,
stride=spatial_stride,
padding=spatial_padding,
bias=False,
)
self.bn1 = nn.BatchNorm3d(output_dim)
self.conv2 = nn.Conv3d(
output_dim,
output_dim,
kernel_size=temporal_kernel_size,
stride=temporal_stride,
padding=temporal_padding,
bias=False,
)
self.bn2 = nn.BatchNorm3d(output_dim)
else:
self.conv1 = nn.Conv3d(
input_dim,
output_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False,
)
self.bn1 = nn.BatchNorm3d(output_dim)
def forward(self, input):
out = self.relu(self.bn1(self.conv1(input)))
if self.separable:
out = self.relu(self.bn2(self.conv2(out)))
return out
class MaxPool3dTFPadding(th.nn.Module):
def __init__(self, kernel_size, stride=None, padding="SAME"):
super(MaxPool3dTFPadding, self).__init__()
if padding == "SAME":
padding_shape = self._get_padding_shape(kernel_size, stride)
self.padding_shape = padding_shape
self.pad = th.nn.ConstantPad3d(padding_shape, 0)
self.pool = th.nn.MaxPool3d(kernel_size, stride, ceil_mode=True)
def _get_padding_shape(self, filter_shape, stride):
def _pad_top_bottom(filter_dim, stride_val):
pad_along = max(filter_dim - stride_val, 0)
pad_top = pad_along // 2
pad_bottom = pad_along - pad_top
return pad_top, pad_bottom
padding_shape = []
for filter_dim, stride_val in zip(filter_shape, stride):
pad_top, pad_bottom = _pad_top_bottom(filter_dim, stride_val)
padding_shape.append(pad_top)
padding_shape.append(pad_bottom)
depth_top = padding_shape.pop(0)
depth_bottom = padding_shape.pop(0)
padding_shape.append(depth_top)
padding_shape.append(depth_bottom)
return tuple(padding_shape)
def forward(self, inp):
inp = self.pad(inp)
out = self.pool(inp)
return out
class Sentence_Embedding(nn.Module):
def __init__(
self,
embd_dim,
num_embeddings=66250,
word_embedding_dim=300,
token_to_word_path="dict.npy",
max_words=16,
output_dim=2048,
):
super(Sentence_Embedding, self).__init__()
self.word_embd = nn.Embedding(num_embeddings, word_embedding_dim)
self.fc1 = nn.Linear(word_embedding_dim, output_dim)
self.fc2 = nn.Linear(output_dim, embd_dim)
self.word_to_token = {}
self.max_words = max_words
token_to_word = np.load(token_to_word_path)
for i, t in enumerate(token_to_word):
self.word_to_token[t] = i + 1
def _zero_pad_tensor_token(self, tensor, size):
if len(tensor) >= size:
return tensor[:size]
else:
zero = th.zeros(size - len(tensor)).long()
return th.cat((tensor, zero), dim=0)
def _split_text(self, sentence):
w = re.findall(r"[\w']+", str(sentence))
return w
def _words_to_token(self, words):
words = [
self.word_to_token[word] for word in words if word in self.word_to_token
]
if words:
we = self._zero_pad_tensor_token(th.LongTensor(words), self.max_words)
return we
else:
return th.zeros(self.max_words).long()
def _words_to_ids(self, x):
split_x = [self._words_to_token(self._split_text(sent.lower())) for sent in x]
return th.stack(split_x, dim=0)
def forward(self, x):
x = self._words_to_ids(x)
x = self.word_embd(x)
x = F.relu(self.fc1(x))
x = th.max(x, dim=1)[0]
x = self.fc2(x)
return {'text_embedding': x}
class S3D(nn.Module):
def __init__(self, dict_path, num_classes=512, gating=True, space_to_depth=True):
super(S3D, self).__init__()
self.num_classes = num_classes
self.gating = gating
self.space_to_depth = space_to_depth
if space_to_depth:
self.conv1 = STConv3D(
24, 64, [2, 4, 4], stride=1, padding=(1, 2, 2), separable=False
)
else:
self.conv1 = STConv3D(
3, 64, [3, 7, 7], stride=2, padding=(1, 3, 3), separable=False
)
self.conv_2b = STConv3D(64, 64, [1, 1, 1], separable=False)
self.conv_2c = STConv3D(64, 192, [3, 3, 3], padding=1, separable=True)
self.gating = SelfGating(192)
self.maxpool_2a = MaxPool3dTFPadding(
kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME"
)
self.maxpool_3a = MaxPool3dTFPadding(
kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME"
)
self.mixed_3b = InceptionBlock(192, 64, 96, 128, 16, 32, 32)
self.mixed_3c = InceptionBlock(
self.mixed_3b.output_dim, 128, 128, 192, 32, 96, 64
)
self.maxpool_4a = MaxPool3dTFPadding(
kernel_size=(3, 3, 3), stride=(2, 2, 2), padding="SAME"
)
self.mixed_4b = InceptionBlock(
self.mixed_3c.output_dim, 192, 96, 208, 16, 48, 64
)
self.mixed_4c = InceptionBlock(
self.mixed_4b.output_dim, 160, 112, 224, 24, 64, 64
)
self.mixed_4d = InceptionBlock(
self.mixed_4c.output_dim, 128, 128, 256, 24, 64, 64
)
self.mixed_4e = InceptionBlock(
self.mixed_4d.output_dim, 112, 144, 288, 32, 64, 64
)
self.mixed_4f = InceptionBlock(
self.mixed_4e.output_dim, 256, 160, 320, 32, 128, 128
)
self.maxpool_5a = self.maxPool3d_5a_2x2 = MaxPool3dTFPadding(
kernel_size=(2, 2, 2), stride=(2, 2, 2), padding="SAME"
)
self.mixed_5b = InceptionBlock(
self.mixed_4f.output_dim, 256, 160, 320, 32, 128, 128
)
self.mixed_5c = InceptionBlock(
self.mixed_5b.output_dim, 384, 192, 384, 48, 128, 128
)
self.fc = nn.Linear(self.mixed_5c.output_dim, num_classes)
self.text_module = Sentence_Embedding(num_classes,
token_to_word_path=dict_path)
def _space_to_depth(self, input):
"""3D space to depth trick for TPU optimization.
"""
B, C, T, H, W = input.shape
input = input.view(B, C, T // 2, 2, H // 2, 2, W // 2, 2)
input = input.permute(0, 3, 5, 7, 1, 2, 4, 6)
input = input.contiguous().view(B, 8 * C, T // 2, H // 2, W // 2)
return input
def forward(self, inputs):
"""Defines the S3DG base architecture."""
if self.space_to_depth:
inputs = self._space_to_depth(inputs)
net = self.conv1(inputs)
if self.space_to_depth:
# we need to replicate 'SAME' tensorflow padding
net = net[:, :, 1:, 1:, 1:]
net = self.maxpool_2a(net)
net = self.conv_2b(net)
net = self.conv_2c(net)
if self.gating:
net = self.gating(net)
net = self.maxpool_3a(net)
net = self.mixed_3b(net)
net = self.mixed_3c(net)
net = self.maxpool_4a(net)
net = self.mixed_4b(net)
net = self.mixed_4c(net)
net = self.mixed_4d(net)
net = self.mixed_4e(net)
net = self.mixed_4f(net)
net = self.maxpool_5a(net)
net = self.mixed_5b(net)
net = self.mixed_5c(net)
net = th.mean(net, dim=[2, 3, 4])
return {'video_embedding': self.fc(net), 'mixed_5c': net}
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/processors/models/s3dg.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import pickle
import time
try:
import faiss
except ImportError:
pass
from collections import defaultdict
from ..utils import get_local_rank, print_on_rank0
class VectorRetriever(object):
"""
How2 Video Retriver.
Reference usage of FAISS:
https://github.com/fairinternal/fairseq-py/blob/paraphrase_pretraining/fairseq/data/multilingual_faiss_dataset.py
"""
def __init__(self, hidden_size, cent, db_type, examples_per_cent_to_train):
if db_type == "flatl2":
quantizer = faiss.IndexFlatL2(hidden_size) # the other index
self.db = faiss.IndexIVFFlat(
quantizer, hidden_size, cent, faiss.METRIC_L2)
elif db_type == "pq":
self.db = faiss.index_factory(
hidden_size, f"IVF{cent}_HNSW32,PQ32"
)
else:
raise ValueError("unknown type of db", db_type)
self.train_thres = cent * examples_per_cent_to_train
self.train_cache = []
self.train_len = 0
self.videoid_to_vectoridx = {}
self.vectoridx_to_videoid = None
self.make_direct_maps_done = False
def make_direct_maps(self):
faiss.downcast_index(self.db).make_direct_map()
def __len__(self):
return self.db.ntotal
def save(self, out_dir):
faiss.write_index(
self.db,
os.path.join(out_dir, "faiss_idx")
)
with open(
os.path.join(
out_dir, "videoid_to_vectoridx.pkl"),
"wb") as fw:
pickle.dump(
self.videoid_to_vectoridx, fw,
protocol=pickle.HIGHEST_PROTOCOL
)
def load(self, out_dir):
fn = os.path.join(out_dir, "faiss_idx")
self.db = faiss.read_index(fn)
with open(
os.path.join(out_dir, "videoid_to_vectoridx.pkl"), "rb") as fr:
self.videoid_to_vectoridx = pickle.load(fr)
def add(self, hidden_states, video_ids, last=False):
assert len(hidden_states) == len(video_ids), "{}, {}".format(
str(len(hidden_states)), str(len(video_ids)))
assert len(hidden_states.shape) == 2
assert hidden_states.dtype == np.float32
valid_idx = []
for idx, video_id in enumerate(video_ids):
if video_id not in self.videoid_to_vectoridx:
valid_idx.append(idx)
self.videoid_to_vectoridx[video_id] = \
len(self.videoid_to_vectoridx)
hidden_states = hidden_states[valid_idx]
if not self.db.is_trained:
self.train_cache.append(hidden_states)
self.train_len += hidden_states.shape[0]
if self.train_len < self.train_thres:
return
self.finalize_training()
else:
self.db.add(hidden_states)
def finalize_training(self):
hidden_states = np.concatenate(self.train_cache, axis=0)
del self.train_cache
local_rank = get_local_rank()
if local_rank == 0:
start = time.time()
print("training db on", self.train_thres, "/", self.train_len)
self.db.train(hidden_states[:self.train_thres])
if local_rank == 0:
print("training db for", time.time() - start)
self.db.add(hidden_states)
def search(
self,
query_hidden_states,
orig_dist,
):
if len(self.videoid_to_vectoridx) != self.db.ntotal:
raise ValueError(
"cannot search: size mismatch in-between index and db",
len(self.videoid_to_vectoridx),
self.db.ntotal
)
if self.vectoridx_to_videoid is None:
self.vectoridx_to_videoid = {
self.videoid_to_vectoridx[videoid]: videoid
for videoid in self.videoid_to_vectoridx
}
assert len(self.vectoridx_to_videoid) \
== len(self.videoid_to_vectoridx)
# MultilingualFaissDataset uses the following; not sure the purpose.
# faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10)
queried_dist, index = self.db.search(query_hidden_states, 1)
queried_dist, index = queried_dist[:, 0], index[:, 0]
outputs = np.array(
[self.vectoridx_to_videoid[_index]
if _index != -1 else (-1, -1, -1) for _index in index],
dtype=np.int32)
outputs[queried_dist <= orig_dist] = -1
return outputs
def search_by_video_ids(
self,
video_ids,
retri_factor
):
if len(self.videoid_to_vectoridx) != self.db.ntotal:
raise ValueError(
len(self.videoid_to_vectoridx),
self.db.ntotal
)
if not self.make_direct_maps_done:
self.make_direct_maps()
if self.vectoridx_to_videoid is None:
self.vectoridx_to_videoid = {
self.videoid_to_vectoridx[videoid]: videoid
for videoid in self.videoid_to_vectoridx
}
assert len(self.vectoridx_to_videoid) \
== len(self.videoid_to_vectoridx)
query_hidden_states = []
vector_ids = []
for video_id in video_ids:
vector_id = self.videoid_to_vectoridx[video_id]
vector_ids.append(vector_id)
query_hidden_state = self.db.reconstruct(vector_id)
query_hidden_states.append(query_hidden_state)
query_hidden_states = np.stack(query_hidden_states)
# MultilingualFaissDataset uses the following; not sure the reason.
# faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10)
_, index = self.db.search(query_hidden_states, retri_factor)
outputs = []
for sample_idx, sample in enumerate(index):
# the first video_id is always the video itself.
cands = [video_ids[sample_idx]]
for vector_idx in sample:
if vector_idx >= 0 \
and vector_ids[sample_idx] != vector_idx:
cands.append(
self.vectoridx_to_videoid[vector_idx]
)
outputs.append(cands)
return outputs
class VectorRetrieverDM(VectorRetriever):
"""
with direct map.
How2 Video Retriver.
Reference usage of FAISS:
https://github.com/fairinternal/fairseq-py/blob/paraphrase_pretraining/fairseq/data/multilingual_faiss_dataset.py
"""
def __init__(
self,
hidden_size,
cent,
db_type,
examples_per_cent_to_train
):
super().__init__(
hidden_size, cent, db_type, examples_per_cent_to_train)
self.make_direct_maps_done = False
def make_direct_maps(self):
faiss.downcast_index(self.db).make_direct_map()
self.make_direct_maps_done = True
def search(
self,
query_hidden_states,
orig_dist,
):
if len(self.videoid_to_vectoridx) != self.db.ntotal:
raise ValueError(
len(self.videoid_to_vectoridx),
self.db.ntotal
)
if not self.make_direct_maps_done:
self.make_direct_maps()
if self.vectoridx_to_videoid is None:
self.vectoridx_to_videoid = {
self.videoid_to_vectoridx[videoid]: videoid
for videoid in self.videoid_to_vectoridx
}
assert len(self.vectoridx_to_videoid) \
== len(self.videoid_to_vectoridx)
# MultilingualFaissDataset uses the following; not sure the reason.
# faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10)
queried_dist, index = self.db.search(query_hidden_states, 1)
outputs = []
for sample_idx, sample in enumerate(index):
# and queried_dist[sample_idx] < thres \
if sample >= 0 \
and queried_dist[sample_idx] < orig_dist[sample_idx]:
outputs.append(self.vectoridx_to_videoid[sample])
else:
outputs.append(None)
return outputs
def search_by_video_ids(
self,
video_ids,
retri_factor=8
):
if len(self.videoid_to_vectoridx) != self.db.ntotal:
raise ValueError(
len(self.videoid_to_vectoridx),
self.db.ntotal
)
if not self.make_direct_maps_done:
self.make_direct_maps()
if self.vectoridx_to_videoid is None:
self.vectoridx_to_videoid = {
self.videoid_to_vectoridx[videoid]: videoid
for videoid in self.videoid_to_vectoridx
}
assert len(self.vectoridx_to_videoid) \
== len(self.videoid_to_vectoridx)
query_hidden_states = []
vector_ids = []
for video_id in video_ids:
vector_id = self.videoid_to_vectoridx[video_id]
vector_ids.append(vector_id)
query_hidden_state = self.db.reconstruct(vector_id)
query_hidden_states.append(query_hidden_state)
query_hidden_states = np.stack(query_hidden_states)
# MultilingualFaissDataset uses the following; not sure the reason.
# faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10)
_, index = self.db.search(query_hidden_states, retri_factor)
outputs = []
for sample_idx, sample in enumerate(index):
# the first video_id is always the video itself.
cands = [video_ids[sample_idx]]
for vector_idx in sample:
if vector_idx >= 0 \
and vector_ids[sample_idx] != vector_idx:
cands.append(
self.vectoridx_to_videoid[vector_idx]
)
outputs.append(cands)
return outputs
class MMVectorRetriever(VectorRetrieverDM):
"""
multimodal vector retriver:
text retrieve video or video retrieve text.
"""
def __init__(self, hidden_size, cent, db_type, examples_per_cent_to_train):
super().__init__(
hidden_size, cent, db_type, examples_per_cent_to_train)
video_db = self.db
super().__init__(
hidden_size, cent, db_type, examples_per_cent_to_train)
text_db = self.db
self.db = {"video": video_db, "text": text_db}
self.video_to_videoid = defaultdict(list)
def __len__(self):
assert self.db["video"].ntotal == self.db["text"].ntotal
return self.db["video"].ntotal
def make_direct_maps(self):
faiss.downcast_index(self.db["video"]).make_direct_map()
faiss.downcast_index(self.db["text"]).make_direct_map()
def save(self, out_dir):
faiss.write_index(
self.db["video"],
os.path.join(out_dir, "video_faiss_idx")
)
faiss.write_index(
self.db["text"],
os.path.join(out_dir, "text_faiss_idx")
)
with open(
os.path.join(
out_dir, "videoid_to_vectoridx.pkl"),
"wb") as fw:
pickle.dump(
self.videoid_to_vectoridx, fw,
protocol=pickle.HIGHEST_PROTOCOL
)
def load(self, out_dir):
fn = os.path.join(out_dir, "video_faiss_idx")
video_db = faiss.read_index(fn)
fn = os.path.join(out_dir, "text_faiss_idx")
text_db = faiss.read_index(fn)
self.db = {"video": video_db, "text": text_db}
with open(
os.path.join(out_dir, "videoid_to_vectoridx.pkl"), "rb") as fr:
self.videoid_to_vectoridx = pickle.load(fr)
self.video_to_videoid = defaultdict(list)
def add(self, hidden_states, video_ids):
"""hidden_states is a pair `(video, text)`"""
assert len(hidden_states) == len(video_ids), "{}, {}".format(
str(len(hidden_states)), str(len(video_ids)))
assert len(hidden_states.shape) == 3
assert len(self.video_to_videoid) == 0
valid_idx = []
for idx, video_id in enumerate(video_ids):
if video_id not in self.videoid_to_vectoridx:
valid_idx.append(idx)
self.videoid_to_vectoridx[video_id] = \
len(self.videoid_to_vectoridx)
batch_size = hidden_states.shape[0]
hidden_states = hidden_states[valid_idx]
hidden_states = np.transpose(hidden_states, (1, 0, 2)).copy()
if not self.db["video"].is_trained:
self.train_cache.append(hidden_states)
train_len = batch_size * len(self.train_cache)
if train_len < self.train_thres:
return
hidden_states = np.concatenate(self.train_cache, axis=1)
del self.train_cache
self.db["video"].train(hidden_states[0, :self.train_thres])
self.db["text"].train(hidden_states[1, :self.train_thres])
self.db["video"].add(hidden_states[0])
self.db["text"].add(hidden_states[1])
def get_clips_by_video_id(self, video_id):
if not self.video_to_videoid:
for video_id, video_clip, text_clip in self.videoid_to_vectoridx:
self.video_to_videoid[video_id].append(
(video_id, video_clip, text_clip))
return self.video_to_videoid[video_id]
def search(
self,
video_ids,
target_modality,
retri_factor=8
):
if len(self.videoid_to_vectoridx) != len(self):
raise ValueError(
len(self.videoid_to_vectoridx),
len(self)
)
if not self.make_direct_maps_done:
self.make_direct_maps()
if self.vectoridx_to_videoid is None:
self.vectoridx_to_videoid = {
self.videoid_to_vectoridx[videoid]: videoid
for videoid in self.videoid_to_vectoridx
}
assert len(self.vectoridx_to_videoid) \
== len(self.videoid_to_vectoridx)
src_modality = "text" if target_modality == "video" else "video"
query_hidden_states = []
vector_ids = []
for video_id in video_ids:
vector_id = self.videoid_to_vectoridx[video_id]
vector_ids.append(vector_id)
query_hidden_state = self.db[src_modality].reconstruct(vector_id)
query_hidden_states.append(query_hidden_state)
query_hidden_states = np.stack(query_hidden_states)
# MultilingualFaissDataset uses the following; not sure the reason.
# faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10)
_, index = self.db[target_modality].search(
query_hidden_states, retri_factor)
outputs = []
for sample_idx, sample in enumerate(index):
cands = []
for vector_idx in sample:
if vector_idx >= 0:
cands.append(
self.vectoridx_to_videoid[vector_idx]
)
outputs.append(cands)
return outputs
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/modules/retri.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .mm import *
try:
from .expmm import *
except ImportError:
pass
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/modules/__init__.py |
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
import os
import numpy as np
import pickle
from . import retri
from ..utils import get_local_rank
class VectorPool(object):
"""
Base class of retrieval space.
"""
def __init__(self, config):
from transformers import AutoConfig
self.hidden_size = AutoConfig.from_pretrained(
config.dataset.bert_name).hidden_size
self.retriever_cls = getattr(retri, config.retriever_cls)
def __call__(self, sample, **kwargs):
raise NotImplementedError
def build_retriver(
self,
retriever_cls=None,
hidden_size=None,
centroids=512,
db_type="flatl2",
examples_per_cent_to_train=48
):
"""merge results from multiple gpus and return a retriver.."""
self.retriver = retriever_cls(
hidden_size, centroids, db_type, examples_per_cent_to_train)
return self.retriver
def __repr__(self):
if hasattr(self, "retriver"):
retriver_name = str(len(self.retriver))
else:
retriver_name = "no retriver field yet"
return self.__class__.__name__ \
+ "(" + retriver_name + ")"
class VideoVectorPool(VectorPool):
"""
average clips of a video as video representation.
"""
def __init__(self, config):
super().__init__(config)
self.build_retriver(self.retriever_cls, self.hidden_size)
def __call__(self, sample, subsampling, **kwargs):
hidden_states = (
sample["pooled_video"] + sample["pooled_text"]) / 2.
hidden_states = hidden_states.view(
-1, subsampling,
hidden_states.size(-1))
hidden_states = torch.mean(hidden_states, dim=1)
hidden_states = hidden_states.cpu().detach().numpy()
video_ids = []
for offset_idx, video_id in enumerate(sample["video_id"]):
if isinstance(video_id, tuple) and len(video_id) == 3:
# a sharded video_id.
video_id = video_id[0]
video_ids.append(video_id)
assert len(video_ids) == len(hidden_states)
self.retriver.add(
hidden_states.astype("float32"),
video_ids
)
class DistributedVectorPool(VectorPool):
"""
support sync of multiple gpus/nodes.
"""
def __init__(self, config):
super().__init__(config)
self.out_dir = os.path.join(
config.fairseq.checkpoint.save_dir,
"retri")
os.makedirs(self.out_dir, exist_ok=True)
self.hidden_states = []
self.video_ids = []
def build_retriver(
self,
retriever_cls=None,
hidden_size=None,
centroids=4096,
db_type="flatl2",
examples_per_cent_to_train=48
):
if retriever_cls is None:
retriever_cls = self.retriever_cls
if hidden_size is None:
hidden_size = self.hidden_size
"""merge results from multiple gpus and return a retriver.."""
if torch.distributed.is_initialized():
self.save()
# sync saving.
torch.distributed.barrier()
world_size = torch.distributed.get_world_size()
else:
world_size = 1
self.retriver = retriever_cls(
hidden_size, centroids, db_type, examples_per_cent_to_train)
# each gpu process has its own retriever.
for local_rank in range(world_size):
if get_local_rank() == 0:
print("load local_rank", local_rank)
hidden_states, video_ids = self.load(local_rank)
hidden_states = hidden_states.astype("float32")
self.retriver.add(hidden_states, video_ids)
return self.retriver
def load(self, local_rank):
hidden_states = np.load(
os.path.join(
self.out_dir,
"hidden_state" + str(local_rank) + ".npy"
)
)
with open(
os.path.join(
self.out_dir, "video_id" + str(local_rank) + ".pkl"),
"rb") as fr:
video_ids = pickle.load(fr)
return hidden_states, video_ids
def save(self):
hidden_states = np.vstack(self.hidden_states)
assert len(hidden_states) == len(self.video_ids), "{}, {}".format(
len(hidden_states),
len(self.video_ids)
)
local_rank = torch.distributed.get_rank() \
if torch.distributed.is_initialized() else 0
np.save(
os.path.join(
self.out_dir,
"hidden_state" + str(local_rank) + ".npy"),
hidden_states)
with open(
os.path.join(
self.out_dir,
"video_id" + str(local_rank) + ".pkl"),
"wb") as fw:
pickle.dump(
self.video_ids,
fw,
protocol=pickle.HIGHEST_PROTOCOL
)
class DistributedVideoVectorPool(DistributedVectorPool):
"""
average clips of a video as video representation.
"""
def __call__(self, sample, subsampling, **kwargs):
hidden_states = (
sample["pooled_video"] + sample["pooled_text"]) / 2.
hidden_states = hidden_states.view(
-1, subsampling,
hidden_states.size(-1))
hidden_states = torch.mean(hidden_states, dim=1)
hidden_states = hidden_states.cpu().detach().numpy()
video_ids = []
for offset_idx, video_id in enumerate(sample["video_id"]):
if isinstance(video_id, tuple) and len(video_id) == 3:
# a sharded video_id.
video_id = video_id[0]
video_ids.append(video_id)
assert len(video_ids) == len(hidden_states)
self.hidden_states.append(hidden_states)
self.video_ids.extend(video_ids)
# ------------ the following are deprecated --------------
class TextClipVectorPool(VectorPool):
def __init__(self, config):
from transformers import AutoConfig
hidden_size = AutoConfig.from_pretrained(
config.dataset.bert_name).hidden_size
retriever_cls = getattr(retri, config.retriever_cls)
self.build_retriver(retriever_cls, hidden_size)
def __call__(self, sample, **kwargs):
clip_meta = sample["clip_meta"].cpu()
assert torch.all(torch.le(clip_meta[:, 4], clip_meta[:, 5]))
text_meta = [tuple(item.tolist()) for item in clip_meta[:, 3:]]
if hasattr(self, "retriver"):
# build_retriver is called.
self.retriver.add(
sample["pooled_text"].cpu().numpy().astype("float32"),
text_meta
)
else:
raise NotImplementedError
class MMClipVectorPool(VectorPool):
"""
Multimodal Clip-level vector pool.
"""
def __init__(self, out_dir):
"""use hidden_states to store `(video, text)`."""
"""use video_ids to store `(video_id, start, end)`."""
super().__init__(out_dir)
def __call__(self, sample, **kwargs):
pooled_video = sample["pooled_video"].cpu().unsqueeze(1).numpy()
pooled_text = sample["pooled_text"].cpu().unsqueeze(1).numpy()
self.hidden_states.append(
np.concatenate([pooled_video, pooled_text], axis=1)
)
video_starts = sample["video_start"].cpu()
video_ends = sample["video_end"].cpu()
assert torch.all(torch.le(video_starts, video_ends))
text_starts = sample["text_start"].cpu()
text_ends = sample["text_end"].cpu()
assert torch.all(torch.le(text_starts, text_ends))
subsample_size = sample["pooled_video"].size(0) // len(sample["video_id"])
video_ids = [video_id for video_id in sample["video_id"]
for _ in range(subsample_size)
]
for video_id, video_start, video_end, text_start, text_end in zip(
video_ids, video_starts, video_ends, text_starts, text_ends):
self.video_ids.append((
video_id,
(int(video_start), int(video_end)),
(int(text_start), int(text_end))
))
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/modules/vectorpool.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch import nn
try:
from transformers.modeling_bert import (
BertEmbeddings,
ACT2FN,
)
except ImportError:
pass
class VideoTokenMLP(nn.Module):
def __init__(self, config):
super().__init__()
input_dim = config.input_dim if hasattr(config, "input_dim") else 512
self.linear1 = nn.Linear(input_dim, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.linear2 = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.linear1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.linear2(hidden_states)
return hidden_states
class MMBertEmbeddings(BertEmbeddings):
def __init__(self, config):
super().__init__(config)
self.max_video_len = config.max_video_len
if hasattr(config, "use_seg_emb") and config.use_seg_emb:
"""the original VLM paper uses seg_embeddings for temporal space.
although not used it changed the randomness of initialization.
we keep it for reproducibility.
"""
self.seg_embeddings = nn.Embedding(256, config.hidden_size)
def forward(
self,
input_ids,
input_video_embeds,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
input_tensor = input_ids if input_ids is not None else inputs_embeds
if input_video_embeds is not None:
input_shape = (
input_tensor.size(0),
input_tensor.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (input_tensor.size(0), input_tensor.size(1))
if position_ids is None:
"""
Auto skip position embeddings for text only case.
use cases:
(1) action localization and segmentation:
feed in len-1 dummy video token needs text part to
skip input_video_embeds.size(1) for the right
position_ids for video [SEP] and rest text tokens.
(2) MMFusionShare for two forward passings:
in `forward_text`: input_video_embeds is None.
need to skip video [SEP] token.
# video_len + 1: [CLS] + video_embed
# self.max_video_len + 1: [SEP] for video.
# self.max_video_len + 2: [SEP] for video.
# self.max_video_len + input_ids.size(1): rest for text.
"""
if input_video_embeds is not None:
video_len = input_video_embeds.size(1)
starting_offset = self.max_video_len + 1 # video [SEP]
ending_offset = self.max_video_len + input_ids.size(1)
else:
video_len = 0
starting_offset = self.max_video_len + 2 # first text token.
ending_offset = self.max_video_len + input_ids.size(1) + 1
position_ids = torch.cat([
self.position_ids[:, :video_len + 1],
self.position_ids[:, starting_offset:ending_offset]
], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
"""
the format of input_ids is [CLS] [SEP] caption [SEP] padding.
the goal is to build [CLS] video tokens [SEP] caption [SEP] .
"""
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if input_video_embeds is not None:
inputs_mm_embeds = torch.cat([
inputs_embeds[:, :1], input_video_embeds, inputs_embeds[:, 1:]
], dim=1)
else:
# text only for `MMFusionShare`.
inputs_mm_embeds = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_mm_embeds + position_embeddings
embeddings += token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class AlignHead(nn.Module):
"""this will load pre-trained weights for NSP, which is desirable."""
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, dropout_pooled_output):
logits = self.seq_relationship(dropout_pooled_output)
return logits
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/modules/mm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import pickle
from mmpt.utils import ShardedTensor
class Shard(object):
def __init__(
self,
vfeat_dir,
tfeat_dir,
target_dir,
file_paths,
shard_size=4096
):
self.vfeat_dir = vfeat_dir
self.tfeat_dir = tfeat_dir
self.target_dir = target_dir
self.video_ids = {}
for split, file_path in zip(["train", "val"], file_paths):
with open(file_path) as fr:
self.video_ids[split] = [
line.strip() for line in fr.readlines()]
self.shard_size = shard_size
def __call__(self, split="train"):
for split in ["train", "val"]:
meta = {}
for shard_idx, shard_offset in enumerate(
range(0, len(self.video_ids[split]), self.shard_size)
):
print(shard_idx)
meta_shard = []
video_shard = []
for video_id in self.video_ids[split][shard_offset:shard_offset+self.shard_size]:
meta_shard.append(video_id)
npy_file = os.path.join(self.vfeat_dir, video_id + ".npy")
video_shard.append(np.load(npy_file))
meta[shard_idx] = meta_shard
video_shard = ShardedTensor.from_list(video_shard)
target_path = os.path.join(
self.target_dir, split + "_" + str(shard_idx))
video_shard.save(target_path)
target_path = os.path.join(self.target_dir, split + "_meta")
with open(target_path + ".pkl", "wb") as fw:
pickle.dump(meta, fw, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
shard = Shard(
"data/feat/feat_how2_s3d",
"data/how2/raw_caption_dedup.bert-base-uncased",
"data/feat/feat_how2_s3d_shard_small",
["data/how2/how2_s3d_train.lst", "data/how2/how2_s3d_val.lst"]
)
shard()
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/scripts/video_feature_extractor/shard_feature.py |
# Copyright Howto100M authors.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch as th
import pandas as pd
import os
import numpy as np
import ffmpeg
import random
from torch.utils.data import Dataset
class VideoLoader(Dataset):
"""modified from how2's video_feature_extractor."""
def __init__(
self,
csv=None,
video_dict=None,
framerate=1,
size=112,
centercrop=False,
hflip=False,
**kwargs
):
if csv is None and video_dict is None:
raise ValueError("csv and video_dict cannot be both None.")
if csv is not None:
self.csv = pd.read_csv(csv)
if video_dict is not None:
self.csv = pd.DataFrame.from_dict(video_dict)
self.centercrop = centercrop
self.size = size
self.framerate = framerate
self.hflip = hflip
def __len__(self):
return len(self.csv)
def _get_video_dim(self, video_path):
probe = ffmpeg.probe(video_path)
video_stream = next((stream for stream in probe['streams']
if stream['codec_type'] == 'video'), None)
width = int(video_stream['width'])
height = int(video_stream['height'])
return height, width
def _get_video_info(self, video_path):
probe = ffmpeg.probe(video_path)
video_stream = next((stream for stream in probe['streams']
if stream['codec_type'] == 'video'), None)
return video_stream
def _get_output_dim(self, h, w):
if isinstance(self.size, tuple) and len(self.size) == 2:
return self.size
elif h >= w:
return int(h * self.size / w), self.size
else:
return self.size, int(w * self.size / h)
def __getitem__(self, idx):
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
return self._decode(output_file, video_path)
def _decode(self, output_file, video_path):
if not(os.path.isfile(output_file)) and os.path.isfile(video_path):
try:
h, w = self._get_video_dim(video_path)
except Exception:
print('ffprobe failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path,
'output': output_file}
try:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
height, width = self._get_output_dim(h, w)
cmd = (
ffmpeg
.input(video_path)
.filter('fps', fps=self.framerate)
.filter('scale', width, height)
)
if self.hflip:
cmd = cmd.filter('hflip')
if self.centercrop:
x = int((width - self.size) / 2.0)
y = int((height - self.size) / 2.0)
cmd = cmd.crop(x, y, self.size, self.size)
video = self._run(cmd, output_file)
except Exception:
video = th.zeros(1)
else:
video = th.zeros(1)
return {'video': video, 'input': video_path, 'output': output_file}
def _run(self, cmd, output_file):
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True, quiet=True)
)
if self.centercrop and isinstance(self.size, int):
height, width = self.size, self.size
video = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3])
video = th.from_numpy(video.astype('float32'))
return video.permute(0, 3, 1, 2)
class VideoVerifier(VideoLoader):
def __getitem__(self, idx):
video_path = self.csv['video_path'].values[idx]
try:
return self._get_video_info(video_path)
except Exception:
# print('ffprobe failed at: {}'.format(video_path))
return None
class VideoCompressor(VideoLoader):
def __init__(
self,
csv=None,
video_dict=None,
framerate=1,
size=112,
centercrop=False,
hflip=False,
crf=32,
**kwargs
):
super().__init__(
csv,
video_dict,
framerate,
size,
centercrop,
hflip
)
self.crf = crf
def _run(self, cmd, output_file):
out, _ = (
cmd.output(filename=output_file, crf=self.crf)
.run(quiet=True)
)
video = None
return video
class VideoDownloader(VideoCompressor):
"""download"""
def __getitem__(self, idx):
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
if not(os.path.isfile(output_file)):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
cmd = "wget -O" + output_file + " " + video_path
# import subprocess
# subprocess.check_output(
# cmd,
# stderr=subprocess.STDOUT, shell=True)
os.system(cmd)
return {'video': None, 'input': video_path, 'output': output_file}
class AvKeyframeVideoCompressor(VideoLoader):
"""extract keyframes from a video and save it as jpg.
TODO: consider to merge with `CodecProcessor`.
"""
def __init__(
self,
csv=None,
video_dict=None,
framerate=1,
size=112,
centercrop=False,
max_num_frames=5,
**kwargs
):
super().__init__(csv, video_dict, framerate, size, centercrop)
self.max_num_frames = max_num_frames
def _get_video_dim(self, video_fn):
"""decord cannot probe the size of a video, we use pyav instead."""
import av
with av.open(video_fn) as container:
height = container.streams.video[0].codec_context.height
width = container.streams.video[0].codec_context.width
return height, width
def _get_output_dim(self, height, width):
"""
keep the shorter side be `self.size`, strech the other.
"""
if height >= width:
return int(height * self.size / width), self.size
else:
return self.size, int(width * self.size / height)
def __getitem__(self, idx):
import av
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
if not(os.path.isdir(output_file)) and os.path.isfile(video_path):
try:
h, w = self._get_video_dim(video_path)
except Exception:
print('probe failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path,
'output': output_file}
try:
height, width = self._get_output_dim(h, w)
# new for av.
with av.open(video_path) as container:
container.streams.video[0].thread_type = "AUTO"
container.streams.video[0].codec_context.height = height
container.streams.video[0].codec_context.width = width
if self.framerate == 0: # keyframe.
container.streams.video[0].codec_context.skip_frame = 'NONKEY'
frames = []
for frame in container.decode(video=0):
frames.append(frame)
frames = random.sample(frames, self.max_num_frames)
os.makedirs(output_file, exist_ok=True)
for frame in frames:
frame.to_image().save(
os.path.join(
output_file,
"%04d.jpg" % frame.index))
except Exception:
print('extract failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path,
'output': output_file}
video = th.zeros(1)
return {'video': video, 'input': video_path, 'output': output_file}
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/scripts/video_feature_extractor/videoreader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import urllib.parse
import json
import pandas as pd
from tqdm import tqdm
# TODO: extending to other datasets.
supported_formats = {}
class PathBuilder(object):
@classmethod
def build(cls, video_dirs, feature_dir, ext, shards=0, split=None):
meta_fn = os.path.join(feature_dir, "meta_plan.json")
os.makedirs(feature_dir, exist_ok=True)
if os.path.isfile(meta_fn):
with open(meta_fn) as fr:
meta = json.load(fr)
return meta
print("searching videos...")
video_id_to_path = {}
for video_dir in video_dirs.split(","):
# TODO: add supports of recursive listdir.
if video_dir in supported_formats:
supported_formats[video_dir].load(video_dir, video_id_to_path)
else:
for idx, fn in enumerate(tqdm(os.listdir(video_dir))):
video_fn = os.path.join(video_dir, fn)
if os.path.isfile(video_fn):
video_id = os.path.splitext(fn)[0]
video_id_to_path[video_id] = video_fn
elif os.path.isdir(video_fn):
# shards of folders.
shard_dir = video_fn
for idx, fn in enumerate(os.listdir(shard_dir)):
video_fn = os.path.join(shard_dir, fn)
if os.path.isfile(video_fn):
video_id = os.path.splitext(fn)[0]
video_id_to_path[video_id] = video_fn
video_path, feature_path = [], []
valid_ext = set()
for idx, video_id in enumerate(video_id_to_path):
video_path.append(video_id_to_path[video_id])
if ext is None:
# use original file ext for format compatibility.
video_id_to_path[video_id]
path = urllib.parse.urlparse(video_id_to_path[video_id]).path
ext = os.path.splitext(path)[1]
if ext not in valid_ext:
valid_ext.add(ext)
print("adding", ext)
if shards:
shard_id = str(idx % shards)
feature_fn = os.path.join(
feature_dir, shard_id, video_id + ext)
else:
feature_fn = os.path.join(
feature_dir, video_id + ext)
feature_path.append(feature_fn)
print("targeting", len(feature_path), "videos")
meta = {
"video_path": video_path, "feature_path": feature_path}
with open(meta_fn, "w") as fw:
json.dump(meta, fw)
if split is not None:
splits = split.split("/")
assert len(splits) == 2
cur, total = int(splits[0]), int(splits[1])
assert cur < total
import math
chunk = math.ceil(len(meta["video_path"]) / total)
start = cur * chunk
end = (cur + 1) * chunk
meta = {
"video_path": meta["video_path"][start:end],
"feature_path": meta["feature_path"][start:end]
}
return meta
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/scripts/video_feature_extractor/pathbuilder.py |
# Copyright (c) Howto100M authors and Facebook, Inc. All Rights Reserved
import torch as th
from torch import nn
class GlobalAvgPool(nn.Module):
def __init__(self):
super(GlobalAvgPool, self).__init__()
def forward(self, x):
return th.mean(x, dim=[-2, -1])
def get_model(args):
assert args.type in ['2d', '3d', 'vmz', 's3d', 'vae']
if args.type == '2d':
print('Loading 2D-ResNet-152 ...')
import torchvision.models as models
model = models.resnet152(pretrained=True)
model = nn.Sequential(*list(model.children())[:-2], GlobalAvgPool())
model = model.cuda()
elif args.type == 'vmz':
print('Loading VMZ ...')
from vmz34 import r2plus1d_34
model = r2plus1d_34(pretrained_path=args.vmz_model_path, pretrained_num_classes=487)
model = model.cuda()
elif args.type == 's3d':
# we use one copy of s3d instead of dup another one for feature extraction.
from mmpt.processors.models.s3dg import S3D
model = S3D('pretrained_models/s3d_dict.npy', 512)
model.load_state_dict(th.load('pretrained_models/s3d_howto100m.pth'))
model = model.cuda()
elif args.type == '3d':
print('Loading 3D-ResneXt-101 ...')
from videocnn.models import resnext
model = resnext.resnet101(
num_classes=400,
shortcut_type='B',
cardinality=32,
sample_size=112,
sample_duration=16,
last_fc=False)
model = model.cuda()
model_data = th.load(args.resnext101_model_path)
model.load_state_dict(model_data)
elif args.type == 'vae':
from openaivae import OpenAIParallelDiscreteVAE
model = OpenAIParallelDiscreteVAE()
model = model.cuda()
else:
raise ValueError("model not supported yet.")
model.eval()
print('loaded')
return model
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/scripts/video_feature_extractor/model.py |
# Copyright Howto100m authors.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch as th
class Normalize(object):
def __init__(self, mean, std):
self.mean = th.FloatTensor(mean).view(1, 3, 1, 1)
self.std = th.FloatTensor(std).view(1, 3, 1, 1)
def __call__(self, tensor):
tensor = (tensor - self.mean) / (self.std + 1e-8)
return tensor
class Preprocessing(object):
def __init__(self, type):
self.type = type
if type == '2d':
self.norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
elif type == '3d':
self.norm = Normalize(mean=[110.6, 103.2, 96.3], std=[1.0, 1.0, 1.0])
elif type == 'vmz':
self.norm = Normalize(mean=[110.201, 100.64, 95.997], std=[58.1489, 56.4701, 55.3324])
def _zero_pad(self, tensor, size):
n = size - len(tensor) % size
if n == size:
return tensor
else:
z = th.zeros(n, tensor.shape[1], tensor.shape[2], tensor.shape[3])
return th.cat((tensor, z), 0)
def __call__(self, tensor):
if self.type == '2d':
tensor = tensor / 255.0
tensor = self.norm(tensor)
elif self.type == 'vmz':
#tensor = self._zero_pad(tensor, 8)
tensor = self._zero_pad(tensor, 10)
tensor = self.norm(tensor)
#tensor = tensor.view(-1, 8, 3, 112, 112)
tensor = tensor.view(-1, 10, 3, 112, 112)
tensor = tensor.transpose(1, 2)
elif self.type == '3d':
tensor = self._zero_pad(tensor, 16)
tensor = self.norm(tensor)
tensor = tensor.view(-1, 16, 3, 112, 112)
tensor = tensor.transpose(1, 2)
elif self.type == 's3d':
tensor = tensor / 255.0
tensor = self._zero_pad(tensor, 30)
tensor = tensor.view(-1, 30, 3, 224, 224) # N x 30 x 3 x H x W
tensor = tensor.transpose(1, 2) # N x 3 x 30 x H x W
# for vae do nothing
return tensor
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/scripts/video_feature_extractor/preprocessing.py |
# Copyright Howto100M authors.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch as th
import torch.nn.functional as F
import math
import numpy as np
import argparse
from torch.utils.data import DataLoader
from model import get_model
from preprocessing import Preprocessing
from random_sequence_shuffler import RandomSequenceSampler
from tqdm import tqdm
from pathbuilder import PathBuilder
from videoreader import VideoLoader
parser = argparse.ArgumentParser(description='Easy video feature extractor')
parser.add_argument('--vdir', type=str)
parser.add_argument('--fdir', type=str)
parser.add_argument('--hflip', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=64,
help='batch size')
parser.add_argument('--type', type=str, default='2d',
help='CNN type')
parser.add_argument('--half_precision', type=int, default=0,
help='output half precision float')
parser.add_argument('--num_decoding_thread', type=int, default=4,
help='Num parallel thread for video decoding')
parser.add_argument('--l2_normalize', type=int, default=1,
help='l2 normalize feature')
parser.add_argument('--resnext101_model_path', type=str, default='model/resnext101.pth',
help='Resnext model path')
parser.add_argument('--vmz_model_path', type=str, default='model/r2plus1d_34_clip8_ig65m_from_scratch-9bae36ae.pth',
help='vmz model path')
args = parser.parse_args()
# TODO: refactor all args into config. (current code is from different people.)
CONFIGS = {
"2d": {
"fps": 1,
"size": 224,
"centercrop": False,
"shards": 0,
},
"3d": {
"fps": 24,
"size": 112,
"centercrop": True,
"shards": 0,
},
"s3d": {
"fps": 30,
"size": 224,
"centercrop": True,
"shards": 0,
},
"vmz": {
"fps": 24,
"size": 112,
"centercrop": True,
"shards": 0,
},
"vae": {
"fps": 2,
"size": 256,
"centercrop": True,
"shards": 100,
}
}
config = CONFIGS[args.type]
video_dirs = args.vdir
feature_dir = args.fdir
video_dict = PathBuilder.build(video_dirs, feature_dir, ".npy", config["shards"])
dataset = VideoLoader(
video_dict=video_dict,
framerate=config["fps"],
size=config["size"],
centercrop=config["centercrop"],
hflip=args.hflip
)
n_dataset = len(dataset)
sampler = RandomSequenceSampler(n_dataset, 10)
loader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=args.num_decoding_thread,
sampler=sampler if n_dataset > 10 else None,
)
preprocess = Preprocessing(args.type)
model = get_model(args)
with th.no_grad():
for k, data in tqdm(enumerate(loader), total=loader.__len__(), ascii=True):
input_file = data['input'][0]
output_file = data['output'][0]
if len(data['video'].shape) > 3:
video = data['video'].squeeze()
if len(video.shape) == 4:
video = preprocess(video)
n_chunk = len(video)
if args.type == 'vmz':
n_chunk = math.ceil(n_chunk/float(3))
features = th.cuda.FloatTensor(n_chunk, 512).fill_(0)
elif args.type == 's3d':
features = th.cuda.FloatTensor(n_chunk, 512).fill_(0)
elif args.type == "vae":
features = th.cuda.LongTensor(n_chunk, 1024).fill_(0)
else:
features = th.cuda.FloatTensor(n_chunk, 2048).fill_(0)
n_iter = int(math.ceil(n_chunk / float(args.batch_size)))
for i in range(n_iter):
factor = 1
if args.type == 'vmz':
factor = 3
min_ind = factor * i * args.batch_size
max_ind = factor * (i + 1) * args.batch_size
video_batch = video[min_ind:max_ind:factor].cuda()
if args.type == '2d':
batch_features = model(video_batch) # (51, 487), (51, 512)
elif args.type == 's3d':
batch_features = model(video_batch)
batch_features = batch_features['video_embedding']
elif args.type == "vae":
# image_code.
batch_features = model(video_batch)
else:
batch_pred, batch_features = model(video_batch) # (51, 487), (51, 512)
if args.l2_normalize:
batch_features = F.normalize(batch_features, dim=1)
features[i*args.batch_size:(i+1)*args.batch_size] = batch_features
features = features.cpu().numpy()
if args.half_precision:
if args.type == "vae":
features = features.astype(np.int16)
else:
features = features.astype('float16')
else:
if args.type == "vae":
features = features.astype(np.int32)
else:
features = features.astype('float32')
np.save(output_file, features)
else:
print('Video {} error.'.format(input_file))
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/scripts/video_feature_extractor/extract.py |
# Copyright (c) Facebook, Inc. All Rights Reserved
import numpy as np
from torch.utils.data.sampler import Sampler
class RandomSequenceSampler(Sampler):
def __init__(self, n_sample, seq_len):
self.n_sample = n_sample
self.seq_len = seq_len
def _pad_ind(self, ind):
zeros = np.zeros(self.seq_len - self.n_sample % self.seq_len)
ind = np.concatenate((ind, zeros))
return ind
def __iter__(self):
idx = np.arange(self.n_sample)
if self.n_sample % self.seq_len != 0:
idx = self._pad_ind(idx)
idx = np.reshape(idx, (-1, self.seq_len))
np.random.shuffle(idx)
idx = np.reshape(idx, (-1))
return iter(idx.astype(int))
def __len__(self):
return self.n_sample + (self.seq_len - self.n_sample % self.seq_len)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/scripts/video_feature_extractor/random_sequence_shuffler.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import os
import argparse
import numpy as np
from torch.utils.data import Dataset, DataLoader
from mmpt.processors import PKLJSONStrTextProcessor
from mmpt.utils import ShardedTensor, recursive_config
class TokenizerDataset(Dataset):
def __init__(self, config):
self.text_processor = PKLJSONStrTextProcessor(config)
self.video_ids = list(self.text_processor.data.keys())
def __getitem__(self, idx):
video_id = self.video_ids[idx]
return video_id, self.text_processor(video_id)
def __len__(self):
return len(self.video_ids)
def numpify(shard_idx, video_ids, captions, target_dir, split, prefix, max_cap_len=32):
startends = []
caps_ids = []
for video_id in video_ids:
caption = captions[video_id]
startend = []
cap_ids = []
for start, end, cap in zip(
caption["start"], caption["end"], caption["cap"]):
startend.append(np.array([start, end]).astype("float32"))
cap_id = np.full((max_cap_len,), -1, dtype=np.int32)
cap = cap[:max_cap_len]
cap_id[:len(cap)] = cap
cap_ids.append(cap_id)
startends.append(np.stack(startend))
caps_ids.append(np.stack(cap_ids))
startends = ShardedTensor.from_list(startends)
target_path = os.path.join(
target_dir,
prefix + split + "_" + str(shard_idx)
)
print("save to", target_path)
startends.save(target_path + ".startends")
caps_ids = ShardedTensor.from_list(caps_ids)
caps_ids.save(target_path + ".caps_ids")
def sharding(config, out_file):
with open(out_file, "rb") as fr:
captions = pickle.load(fr)
target_dir = config.target_dir
prefix = os.path.basename(
os.path.splitext(config.caption_pkl_path)[0]
) + "." + config.bert_name + "."
for split in ["train", "val"]:
target_path = os.path.join(target_dir, split + "_meta")
with open(target_path + ".pkl", "rb") as fr:
meta = pickle.load(fr)
print("load meta", target_path, len(meta))
for shard_id in meta:
numpify(
shard_id, meta[shard_id], captions,
target_dir, split, prefix
)
def tokenize(config, out_file):
def collator(samples):
return samples
dataset = TokenizerDataset(config)
data = {}
for idx, batch in enumerate(
DataLoader(dataset, collate_fn=collator, num_workers=16)):
for video_id, caption in batch:
data[video_id] = caption
if idx % 5000 == 0:
print(idx)
with open(out_file, "wb") as fw:
pickle.dump(data, fw, pickle.HIGHEST_PROTOCOL)
def main(args):
config = recursive_config(args.config).dataset
out_file = os.path.splitext(config.caption_pkl_path)[0] \
+ "." + config.bert_name + ".pkl"
if not os.path.isfile(out_file):
tokenize(config, out_file)
sharding(config, out_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="pretokenize (raw_)caption.json into pkl.")
parser.add_argument('config', type=str)
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/scripts/text_token_extractor/pretokenization.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import argparse
import pprint
import omegaconf
from omegaconf import OmegaConf
from torch.utils.data import DataLoader
from mmpt.utils import load_config, set_seed
from mmpt.evaluators import Evaluator
from mmpt.evaluators import predictor as predictor_path
from mmpt.tasks import Task
from mmpt import processors
from mmpt.datasets import MMDataset
def get_dataloader(config):
meta_processor_cls = getattr(processors, config.dataset.meta_processor)
video_processor_cls = getattr(processors, config.dataset.video_processor)
text_processor_cls = getattr(processors, config.dataset.text_processor)
aligner_cls = getattr(processors, config.dataset.aligner)
meta_processor = meta_processor_cls(config.dataset)
video_processor = video_processor_cls(config.dataset)
text_processor = text_processor_cls(config.dataset)
aligner = aligner_cls(config.dataset)
test_data = MMDataset(
meta_processor,
video_processor,
text_processor,
aligner,
)
print("test_len", len(test_data))
output = test_data[0]
test_data.print_example(output)
test_dataloader = DataLoader(
test_data,
batch_size=config.fairseq.dataset.batch_size,
shuffle=False,
num_workers=6,
collate_fn=test_data.collater,
)
return test_dataloader
def main(args):
config = load_config(args)
if isinstance(config, omegaconf.dictconfig.DictConfig):
print(OmegaConf.to_yaml(config))
else:
pp = pprint.PrettyPrinter(indent=4)
pp.print(config)
mmtask = Task.config_task(config)
mmtask.build_model()
test_dataloader = get_dataloader(config)
checkpoint_search_path = os.path.dirname(config.eval.save_path)
results = []
prefix = os.path.basename(args.taskconfig)
if prefix.startswith("test"):
# loop all checkpoint for datasets without validation set.
if "best" not in config.fairseq.common_eval.path:
print("eval each epoch.")
for checkpoint in glob.glob(checkpoint_search_path + "/checkpoint*"):
model = mmtask.load_checkpoint(checkpoint)
ckpt = os.path.basename(checkpoint)
evaluator = Evaluator(config)
output = evaluator.evaluate(
model, test_dataloader, ckpt + "_merged")
results.append((checkpoint, output))
# use the one specified by the config lastly.
model = mmtask.load_checkpoint(config.fairseq.common_eval.path)
evaluator = Evaluator(config)
output = evaluator.evaluate(model, test_dataloader)
results.append((config.fairseq.common_eval.path, output))
best_result = None
best_metric = 0.
for checkpoint, result in results:
print(checkpoint)
evaluator.metric.print_computed_metrics(result)
best_score = evaluator.metric.best_metric(result)
if best_score > best_metric:
best_result = (checkpoint, result)
best_metric = best_score
print("best results:")
print(best_result[0])
evaluator.metric.print_computed_metrics(best_result[1])
elif prefix.startswith("vis"):
model = mmtask.load_checkpoint(config.fairseq.common_eval.path)
predictor_cls = getattr(predictor_path, config.predictor)
predictor = predictor_cls(config)
predictor.predict_loop(model, test_dataloader, mmtask, None)
else:
raise ValueError("unknown prefix of the config file", args.taskconfig)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("taskconfig", type=str)
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt_cli/predict.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from mmpt.utils import recursive_config
class BaseJob(object):
def __init__(self, yaml_file, dryrun=False):
self.yaml_file = yaml_file
self.config = recursive_config(yaml_file)
self.dryrun = dryrun
def submit(self, **kwargs):
raise NotImplementedError
def _normalize_cmd(self, cmd_list):
cmd_list = list(cmd_list)
yaml_index = cmd_list.index("[yaml]")
cmd_list[yaml_index] = self.yaml_file
return cmd_list
class LocalJob(BaseJob):
CMD_CONFIG = {
"local_single": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
],
"local_small": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "2"
],
"local_big": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "8"
],
"local_predict": ["python", "mmpt_cli/predict.py", "[yaml]"],
}
def __init__(self, yaml_file, job_type=None, dryrun=False):
super().__init__(yaml_file, dryrun)
if job_type is None:
self.job_type = "local_single"
if self.config.task_type is not None:
self.job_type = self.config.task_type
else:
self.job_type = job_type
if self.job_type in ["local_single", "local_small"]:
if self.config.fairseq.dataset.batch_size > 32:
print("decreasing batch_size to 32 for local testing?")
def submit(self):
cmd_list = self._normalize_cmd(LocalJob.CMD_CONFIG[self.job_type])
if "predict" not in self.job_type:
# append fairseq args.
from mmpt.utils import load_config
config = load_config(config_file=self.yaml_file)
for field in config.fairseq:
for key in config.fairseq[field]:
if key in ["fp16", "reset_optimizer", "reset_dataloader", "reset_meters"]: # a list of binary flag.
param = ["--" + key.replace("_", "-")]
else:
if key == "lr":
value = str(config.fairseq[field][key][0])
elif key == "adam_betas":
value = "'"+str(config.fairseq[field][key])+"'"
else:
value = str(config.fairseq[field][key])
param = [
"--" + key.replace("_", "-"),
value
]
cmd_list.extend(param)
print("launching", " ".join(cmd_list))
if not self.dryrun:
os.system(" ".join(cmd_list))
return JobStatus("12345678")
class JobStatus(object):
def __init__(self, job_id):
self.job_id = job_id
def __repr__(self):
return self.job_id
def __str__(self):
return self.job_id
def done(self):
return False
def running(self):
return False
def result(self):
if self.done():
return "{} is done.".format(self.job_id)
else:
return "{} is running.".format(self.job_id)
def stderr(self):
return self.result()
def stdout(self):
return self.result()
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt_cli/localjob.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import rxf_src # noqa
| EXA-1-master | exa/libraries/fairseq/examples/rxf/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa
| EXA-1-master | exa/libraries/fairseq/examples/rxf/rxf_src/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("sentence_prediction_r3f")
class SentencePredictionR3F(FairseqCriterion):
def __init__(
self,
task,
eps,
r3f_lambda,
noise_type,
classification_head_name,
regression_target,
):
super().__init__(task)
self.eps = eps
self.r3f_lambda = r3f_lambda
self.noise_type = noise_type
self.classification_head_name = classification_head_name
self.regression_target = regression_target
if self.noise_type in {"normal"}:
self.noise_sampler = torch.distributions.normal.Normal(
loc=0.0, scale=self.eps
)
elif self.noise_type == "uniform":
self.noise_sampler = torch.distributions.uniform.Uniform(
low=-self.eps, high=self.eps
)
else:
raise Exception(f"unrecognized noise type {self.noise_type}")
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--eps', type=float, default=1e-5,
help='noise eps')
parser.add_argument('--r3f-lambda', type=float, default=1.0,
help='lambda for combining logistic loss and noisy KL loss')
parser.add_argument('--noise-type', type=str, default='uniform',
choices=['normal', 'uniform'],
help='type of noises for RXF methods')
parser.add_argument('--classification-head-name',
default='sentence_classification_head',
help='name of the classification head to use')
parser.add_argument('--regression-target', action='store_true')
# fmt: on
def _get_symm_kl(self, noised_logits, input_logits):
return (
F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
+ F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
) / noised_logits.size(0)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.classification_head_name in model.classification_heads
), "model must provide sentence classification head for --criterion=sentence_prediction"
token_embeddings = model.encoder.sentence_encoder.embed_tokens(
sample["net_input"]["src_tokens"]
)
input_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=token_embeddings,
)
if model.training and self.noise_sampler:
noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
token_embeddings
)
noised_embeddings = token_embeddings.detach().clone() + noise
noised_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=noised_embeddings,
)
symm_kl = self._get_symm_kl(noised_logits, input_logits)
else:
symm_kl = 0
targets = model.get_targets(sample, [input_logits]).view(-1)
sample_size = targets.numel()
if not self.regression_target:
loss = F.nll_loss(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
targets,
reduction="sum",
)
if model.training:
symm_kl = symm_kl * sample_size
loss = loss + self.r3f_lambda * symm_kl
else:
logits = input_logits.squeeze().float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction="sum")
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.regression_target:
preds = input_logits.max(dim=1)[1]
logging_output.update(ncorrect=(preds == targets).sum().item())
if model.training and self.noise_sampler:
logging_output.update(
symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"symm_kl": symm_kl_sum / sample_size,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
agg_output.update(accuracy=ncorrect / nsentences)
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
return agg_output
| EXA-1-master | exa/libraries/fairseq/examples/rxf/rxf_src/sentence_prediction_r3f.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.logging import metrics
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
@register_criterion("label_smoothed_cross_entropy_r3f")
class LabelSmoothedCrossEntropyR3FCriterion(FairseqCriterion):
def __init__(
self, task, sentence_avg, label_smoothing, eps, r3f_lambda, noise_type
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.label_smoothing = label_smoothing
self.eps = eps
self.r3f_lambda = r3f_lambda
self.noise_type = noise_type
if self.noise_type in {"normal"}:
self.noise_sampler = torch.distributions.normal.Normal(
loc=0.0, scale=self.eps
)
elif self.noise_type == "uniform":
self.noise_sampler = torch.distributions.uniform.Uniform(
low=-self.eps, high=self.eps
)
else:
raise Exception(f"unrecognized noise type {self.noise_type}")
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--eps', type=float, default=1e-5,
help='noise eps')
parser.add_argument('--r3f-lambda', type=float, default=1.0,
help='lambda for combining logistic loss and noisy KL loss')
parser.add_argument('--noise-type', type=str, default='normal',
choices=['normal', 'uniform'],
help='type of noises')
# fmt: on
def _get_symm_kl(self, noised_logits, input_logits):
return (
F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
+ F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
) / noised_logits.size(0)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
token_embeddings = model.encoder.embed_tokens(sample["net_input"]["src_tokens"])
input_logits, extra = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(
model, (input_logits, extra), sample, reduce=reduce
)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
if model.training:
noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
token_embeddings
)
noised_embeddings = token_embeddings.clone() + noise
noised_logits, _ = model(
**sample["net_input"], token_embeddings=noised_embeddings
)
symm_kl = self._get_symm_kl(noised_logits, input_logits)
if model.training:
symm_kl = symm_kl * sample_size
loss = loss + self.r3f_lambda * symm_kl
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if model.training:
logging_output.update(
symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
)
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.label_smoothing,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
metrics.log_scalar("symm_kl", symm_kl_sum / sample_size, sample_size, round=3)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| EXA-1-master | exa/libraries/fairseq/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import transformer_xl_model, truncated_bptt_lm_task # noqa
| EXA-1-master | exa/libraries/fairseq/examples/truncated_bptt/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
TokenBlockDataset,
data_utils,
iterators,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed import utils as dist_utils
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TruncatedBPTTLMConfig(FairseqDataclass):
data: str = field(default="???", metadata={"help": "path to data directory"})
tokens_per_sample: int = field(
default=1024, metadata={"help": "max number of tokens per sequence"},
)
batch_size: int = II("dataset.batch_size")
# Some models use *max_target_positions* to know how many positional
# embeddings to learn. We use II(...) to make it default to
# *tokens_per_sample*, but in principle there could be more positional
# embeddings than tokens in a single batch. This may also be irrelevant for
# custom model implementations.
max_target_positions: int = II("task.tokens_per_sample")
# these will be populated automatically if not provided
data_parallel_rank: Optional[int] = None
data_parallel_size: Optional[int] = None
@register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig)
class TruncatedBPTTLMTask(FairseqTask):
def __init__(self, cfg: TruncatedBPTTLMConfig):
super().__init__(cfg)
if cfg.data_parallel_rank is None or cfg.data_parallel_size is None:
if torch.distributed.is_initialized():
cfg.data_parallel_rank = dist_utils.get_data_parallel_rank()
cfg.data_parallel_size = dist_utils.get_data_parallel_world_size()
else:
cfg.data_parallel_rank = 0
cfg.data_parallel_size = 1
# load the dictionary
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(self.dictionary)))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)"""
# support sharded datasets
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each element of *data* will be a tensorized line from the original
# text dataset, similar to ``open(split_path).readlines()``
data = data_utils.load_indexed_dataset(
split_path, self.dictionary, combine=combine
)
if data is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# this is similar to ``data.view(-1).split(tokens_per_sample)``
data = TokenBlockDataset(
data,
data.sizes,
block_size=self.cfg.tokens_per_sample,
pad=None, # unused
eos=None, # unused
break_mode="none",
)
self.datasets[split] = TruncatedBPTTDataset(
data=data,
bsz_per_shard=self.cfg.batch_size,
shard_id=self.cfg.data_parallel_rank,
num_shards=self.cfg.data_parallel_size,
)
def dataset(self, split):
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
num_workers=0,
epoch=1,
data_buffer_size=0,
skip_remainder_batch=False,
**kwargs
):
return iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=self._collate_fn,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
# we don't use the batching functionality from EpochBatchIterator;
# instead every item in *dataset* is a whole batch
batch_sampler=[[i] for i in range(len(dataset))],
disable_shuffling=True,
skip_remainder_batch=skip_remainder_batch,
)
def _collate_fn(self, items: List[List[torch.Tensor]]):
# we don't use fairseq's batching functionality, so we expect a single
# Tensor of type List[torch.Tensor]
assert len(items) == 1
# item will have shape B x T (the last batch may have length < T)
id, item = items[0]
item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad())
B, T = item.size()
# shift item one position over and append a padding token for the target
target = torch.nn.functional.pad(
item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad()
)
# fairseq expects batches to have the following structure
return {
"id": torch.tensor([id] * item.size(0)),
"net_input": {"src_tokens": item,},
"target": target,
"nsentences": item.size(0),
"ntokens": item.numel(),
}
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
eos = self.source_dictionary.eos()
dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=eos,
break_mode="eos",
)
class Dataset(torch.utils.data.Dataset):
def __getitem__(self, i):
item = dataset[i]
if item[-1] == eos:
# remove eos to support generating with a prefix
item = item[:-1]
return (i, [item])
def __len__(self):
return len(dataset)
return Dataset()
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
if constraints is not None:
raise NotImplementedError
# SequenceGenerator doesn't use *src_tokens* directly, we need to
# pass the *prefix_tokens* argument instead.
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
# begin generation with the end-of-sentence token
bos_token = self.source_dictionary.eos()
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
context_window: int = 0,
):
if context_window > 0:
raise NotImplementedError(
"Transformer-XL doesn't need --context-window, try "
"--model-overrides '{\"mem_len\":42}' instead "
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
).next_epoch_itr(shuffle=False)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class TruncatedBPTTDataset(torch.utils.data.Dataset):
def __init__(
self,
data: List[torch.Tensor], # ordered list of items
bsz_per_shard, # number of items processed per GPUs per forward
shard_id, # current GPU ID
num_shards, # number of GPUs
):
super().__init__()
self.data = data
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).contiguous()
return data
# total number of sequences processed by all GPUs in each forward pass
global_batch_size = bsz_per_shard * num_shards
"""
With a 16 item dataset, bsz_per_shard=2 and num_shards=3,
*indices* might look like:
indices = [[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11]]
The size of the TruncatedBPTTDataset instance will be 2,
and shard 1 will see items:
[(0, [data[4], data[6]]),
(1, [data[5], data[7]])]
"""
indices = batchify(torch.arange(len(data)), global_batch_size)
assert indices.size(0) == global_batch_size
self.my_indices = indices[
shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard
]
assert self.my_indices.size(0) == bsz_per_shard
def __len__(self):
return self.my_indices.size(1)
def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]:
return (i, [self.data[idx] for idx in self.my_indices[:, i]])
| EXA-1-master | exa/libraries/fairseq/examples/truncated_bptt/truncated_bptt_lm_task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TransformerXLConfig(FairseqDataclass):
# defaults come from the original Transformer-XL code
cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000])
d_model: int = 500
n_head: int = 10
d_head: int = 50
d_inner: int = 1000
div_val: int = 1
n_layer: int = 12
mem_len: int = 0
clamp_len: int = -1
same_length: bool = False
dropout: float = 0.0
dropatt: float = 0.0
checkpoint_activations: bool = False
offload_activations: bool = False
max_target_positions: int = II("task.max_target_positions")
@register_model("transformer_xl", dataclass=TransformerXLConfig)
class TransformerXLLanguageModel(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: TransformerXLConfig, task):
return cls(TransformerXLDecoder(cfg, task))
class TransformerXLDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
try:
from transformers.models.transfo_xl import (
TransfoXLConfig,
TransfoXLLMHeadModel,
)
except ImportError:
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.modeling_transfo_xl import TransfoXLLMHeadModel
super().__init__(task.target_dictionary)
self.cfg = cfg
# remove any cutoffs larger than the vocab size
cutoffs = [
cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary)
]
config = TransfoXLConfig(
vocab_size=len(task.target_dictionary),
cutoffs=cutoffs,
d_model=cfg.d_model,
d_embed=cfg.d_model,
n_head=cfg.n_head,
d_head=cfg.d_head,
d_inner=cfg.d_inner,
div_val=cfg.div_val,
n_layer=cfg.n_layer,
mem_len=cfg.mem_len,
clamp_len=cfg.clamp_len,
same_length=cfg.same_length,
dropout=cfg.dropout,
dropatt=cfg.dropatt,
)
logger.info(config)
self.model = TransfoXLLMHeadModel(config)
if cfg.checkpoint_activations or cfg.offload_activations:
for i in range(len(self.model.transformer.layers)):
self.model.transformer.layers[i] = checkpoint_wrapper(
self.model.transformer.layers[i],
offload_to_cpu=cfg.offload_activations,
)
# TODO: may save mem to wrap(layer.pos_ff.CoreNet[3])
self._mems = None
def forward(
self,
src_tokens,
src_lengths=None, # unused
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
if incremental_state is not None: # used during inference
mems = self.get_incremental_state(incremental_state, "mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
output = self.model(
input_ids=src_tokens,
mems=mems,
return_dict=False,
)
if len(output) >= 2:
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.cfg.max_target_positions
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
mems = self.get_incremental_state(incremental_state, "mems")
if mems is not None:
new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
self.set_incremental_state(incremental_state, "mems", new_mems)
| EXA-1-master | exa/libraries/fairseq/examples/truncated_bptt/transformer_xl_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import tasks, criterions, models # noqa
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from argparse import Namespace
from pathlib import Path
import torch
from fairseq.data import (
encoders,
Dictionary,
ResamplingDataset,
TransformEosLangPairDataset,
ConcatDataset,
)
from fairseq.data.iterators import GroupedEpochBatchIterator
from fairseq.data.audio.multi_modality_dataset import (
MultiModalityDataset,
LangPairMaskDataset,
ModalityDatasetItem,
)
from fairseq.data.audio.speech_to_text_dataset import (
SpeechToTextDataset,
SpeechToTextDatasetCreator,
)
from fairseq.data.audio.speech_to_text_joint_dataset import (
S2TJointDataConfig,
SpeechToTextJointDatasetCreator,
)
from fairseq.tasks import register_task
from fairseq.tasks.speech_to_text import SpeechToTextTask
from fairseq.tasks.translation import load_langpair_dataset
logger = logging.getLogger(__name__)
LANG_TAG_TEMPLATE = "<lang:{}>"
@register_task("speech_text_joint_to_text")
class SpeechTextJointToTextTask(SpeechToTextTask):
"""
Task for joint training speech and text to text.
"""
@classmethod
def add_args(cls, parser):
"""Add task-specific arguments to the parser."""
super(SpeechTextJointToTextTask, cls).add_args(parser)
###
parser.add_argument(
"--parallel-text-data",
default="",
help="path to parallel text data directory",
)
parser.add_argument(
"--max-tokens-text",
type=int,
metavar="N",
help="maximum tokens for encoder text input ",
)
parser.add_argument(
"--max-positions-text",
type=int,
metavar="N",
default=400,
help="maximum tokens for per encoder text input ",
)
parser.add_argument(
"--langpairs",
default=None,
metavar="S",
help='language pairs for text training, separated with ","',
)
parser.add_argument(
"--speech-sample-ratio",
default=1,
type=float,
metavar="N",
help="Multiple Ratio for speech dataset with transcripts ",
)
parser.add_argument(
"--text-sample-ratio",
default=1,
type=float,
metavar="N",
help="Multiple Ratio for text set ",
)
parser.add_argument(
"--update-mix-data",
action="store_true",
help="use mixed data in one update when update-freq > 1",
)
parser.add_argument(
"--load-speech-only", action="store_true", help="load speech data only",
)
parser.add_argument(
"--mask-text-ratio",
type=float,
metavar="V",
default=0.0,
help="mask V source tokens for text only mode",
)
parser.add_argument(
"--mask-text-type",
default="random",
choices=["random", "tail"],
help="mask text typed",
)
parser.add_argument(
"--noise-token",
default="",
help="noise token for masking src text tokens if mask-text-ratio > 0",
)
parser.add_argument(
"--infer-target-lang",
default="",
metavar="S",
help="target language for inference",
)
def __init__(self, args, src_dict, tgt_dict, infer_tgt_lang_id=None):
super().__init__(args, tgt_dict)
self.src_dict = src_dict
self.data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml)
assert self.tgt_dict.pad() == self.src_dict.pad()
assert self.tgt_dict.eos() == self.src_dict.eos()
self.speech_only = args.load_speech_only
self._infer_tgt_lang_id = infer_tgt_lang_id
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries)."""
data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml)
tgt_dict_path = Path(args.data) / data_cfg.vocab_filename
src_dict_path = Path(args.data) / data_cfg.src_vocab_filename
if (not os.path.isfile(src_dict_path)) or (not os.path.isfile(tgt_dict_path)):
raise FileNotFoundError("Dict not found: {}".format(args.data))
src_dict = Dictionary.load(src_dict_path.as_posix())
tgt_dict = Dictionary.load(tgt_dict_path.as_posix())
print("| src dictionary: {} types".format(len(src_dict)))
print("| tgt dictionary: {} types".format(len(tgt_dict)))
if args.parallel_text_data != "":
if not os.path.isabs(args.parallel_text_data):
args.parallel_text_data = os.path.join(
args.data, args.parallel_text_data
)
if args.langpairs is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
infer_tgt_lang_id = None
if args.infer_target_lang != "" and data_cfg.prepend_tgt_lang_tag_no_change:
tgt_lang_tag = SpeechToTextDataset.LANG_TAG_TEMPLATE.format(
args.infer_target_lang
)
infer_tgt_lang_id = tgt_dict.index(tgt_lang_tag)
assert infer_tgt_lang_id != tgt_dict.unk()
return cls(args, src_dict, tgt_dict, infer_tgt_lang_id=infer_tgt_lang_id)
def load_langpair_dataset(
self, prepend_tgt_lang_tag=False, sampling_alpha=1.0, epoch=0
):
lang_pairs = []
text_dataset = None
split = "train"
for lp in self.args.langpairs.split(","):
src, tgt = lp.split("-")
text_dataset = load_langpair_dataset(
self.args.parallel_text_data,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=True,
dataset_impl=None,
upsample_primary=1,
left_pad_source=False,
left_pad_target=False,
max_source_positions=self.args.max_positions_text,
max_target_positions=self.args.max_target_positions,
load_alignments=False,
truncate_source=False,
)
if prepend_tgt_lang_tag:
# TODO
text_dataset = TransformEosLangPairDataset(
text_dataset,
src_eos=self.src_dict.eos(),
tgt_bos=self.tgt_dict.eos(), # 'prev_output_tokens' starts with eos
new_tgt_bos=self.tgt_dict.index(LANG_TAG_TEMPLATE.format(tgt)),
)
lang_pairs.append(text_dataset)
if len(lang_pairs) > 1:
if sampling_alpha != 1.0:
size_ratios = SpeechToTextDatasetCreator.get_size_ratios(
self.args.langpairs.split(","),
[len(s) for s in lang_pairs],
alpha=sampling_alpha,
)
lang_pairs = [
ResamplingDataset(d, size_ratio=r, epoch=epoch, replace=(r >= 1.0))
for d, r in zip(lang_pairs, size_ratios)
]
return ConcatDataset(lang_pairs)
return text_dataset
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=self._infer_tgt_lang_id,
)
def build_src_tokenizer(self, args):
logger.info(f"src-pre-tokenizer: {self.data_cfg.src_pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.src_pre_tokenizer))
def build_src_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.src_bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.src_bpe_tokenizer))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
src_pre_tokenizer = self.build_src_tokenizer(self.args)
src_bpe_tokenizer = self.build_src_bpe(self.args)
ast_dataset = SpeechToTextJointDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
src_dict=None if self.speech_only else self.src_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
src_pre_tokenizer=src_pre_tokenizer,
src_bpe_tokenizer=src_bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
)
noise_token_id = -1
text_dataset = None
if self.args.parallel_text_data != "" and is_train_split:
text_dataset = self.load_langpair_dataset(
self.data_cfg.prepend_tgt_lang_tag_no_change, 1.0, epoch=epoch,
)
if self.args.mask_text_ratio > 0:
# add mask
noise_token_id = (
self.src_dict.unk()
if self.args.noise_token == ""
else self.src_dict.index(self.args.noise_token)
)
text_dataset = LangPairMaskDataset(
text_dataset,
src_bos=self.src_dict.bos(),
src_eos=self.src_dict.eos(),
noise_id=noise_token_id,
mask_ratio=self.args.mask_text_ratio,
mask_type=self.args.mask_text_type,
)
if text_dataset is not None:
mdsets = [
ModalityDatasetItem(
"sup_speech",
ast_dataset,
(self.args.max_source_positions, self.args.max_target_positions),
self.args.max_tokens,
self.args.batch_size,
),
ModalityDatasetItem(
"text",
text_dataset,
(self.args.max_positions_text, self.args.max_target_positions),
self.args.max_tokens_text
if self.args.max_tokens_text is not None
else self.args.max_tokens,
self.args.batch_size,
),
]
ast_dataset = MultiModalityDataset(mdsets)
self.datasets[split] = ast_dataset
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.tgt_dict
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
return None if self.speech_only else self.src_dict
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=0,
data_buffer_size=0,
disable_iterator_cache=False,
skip_remainder_batch=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
if not isinstance(dataset, MultiModalityDataset):
return super(SpeechTextJointToTextTask, self).get_batch_iterator(
dataset,
max_tokens,
max_sentences,
max_positions,
ignore_invalid_inputs,
required_batch_size_multiple,
seed,
num_shards,
shard_id,
num_workers,
epoch,
data_buffer_size,
disable_iterator_cache,
skip_remainder_batch=skip_remainder_batch,
update_epoch_batch_itr=update_epoch_batch_itr,
)
mult_ratio = [self.args.speech_sample_ratio, self.args.text_sample_ratio]
assert len(dataset.datasets) == 2
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
batch_samplers = dataset.get_batch_samplers(
mult_ratio, required_batch_size_multiple, seed
)
# return a reusable, sharded iterator
epoch_iter = GroupedEpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_samplers=batch_samplers,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
mult_rate=1 if self.args.update_mix_data else max(self.args.update_freq),
buffer_size=data_buffer_size,
skip_remainder_batch=skip_remainder_batch,
)
self.dataset_to_epoch_iter[dataset] = {} # refresh it every epoch
return epoch_iter
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/tasks/speech_text_joint.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/tasks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import re
from argparse import Namespace
from pathlib import Path
from fairseq.data import ConcatDataset, Dictionary, encoders
from fairseq.data.audio.multi_modality_dataset import (
FileAudioDatasetWrapper,
ModalityDatasetItem,
MultiModalityDataset,
)
from fairseq.data.audio.speech_to_text_joint_dataset import (
S2TJointDataConfig,
SpeechToTextJointDatasetCreator,
)
from fairseq.data.iterators import GroupedEpochBatchIterator
from fairseq.tasks import register_task
from .pair_denoising import PairedDenoisingTask
logger = logging.getLogger(__name__)
@register_task("speech_text_joint_denoising")
class SpeechTextJointDenoisingPreTask(PairedDenoisingTask):
"""
Joint denoising training task for speech and text.
"""
SIL_TOKEN = "sil"
@classmethod
def add_args(cls, parser):
PairedDenoisingTask.add_args(parser)
# set max tokens and position
parser.add_argument(
"--max-text-tokens",
type=int,
metavar="N",
default=1024,
help="maximum samples for encoder text input ",
)
parser.add_argument(
"--max-speech-tokens",
type=int,
metavar="N",
default=50000,
help="maximum samples for encoder speech input ",
)
parser.add_argument(
"--max-speech-positions",
type=int,
metavar="N",
default=400,
help="maximum tokens for per encoder text input ",
)
parser.add_argument(
"--max-sample-size",
type=int,
metavar="N",
default=32000,
help="max sample size to crop to for batching (unsupervised speech) ",
)
parser.add_argument(
"--min-sample-size",
type=int,
metavar="N",
default=4000,
help="min sample size to crop to for batching (unsupervised speech) ",
)
# set mini-batch ratio for different modalities/subtasks
# s2p
parser.add_argument(
"--supervised-speech-sample-ratio",
default="1",
type=str,
metavar="N",
help="Multiple Ratio for speech dataset with transcripts ",
)
# s2t
parser.add_argument(
"--supervised-speech-s2s-sample-ratio",
default="1",
type=str,
metavar="N",
help="Multiple Ratio for speech dataset with transcripts ",
)
# ssl
parser.add_argument(
"--unsupervised-speech-sample-ratio",
default="1",
type=str,
metavar="N",
help="Multiple Ratio for speech dataset without transcripts ",
)
# t2t with monolingual data (masking)
parser.add_argument(
"--text-sample-ratio",
default="1",
type=str,
metavar="N",
help="Multiple Ratio for text set ",
)
# t2t with parallel data (no masking)
parser.add_argument(
"--bitext-sample-ratio",
default="1",
type=str,
metavar="N",
help="Multiple Ratio for text set (bitext) ",
)
# train_subset = "train", 'valid' or so
# parallel data is loaded according to string lang_pairs and lang_pairs_no_mask from args.data
# (un)supervised speech is loaded from args.(un)sup_speech_{train,valid}_subset
parser.add_argument(
"--sup-speech-data", default="", help="path to supervised speech data"
)
parser.add_argument(
"--sup-speech-train-subset",
default="",
help="supervised speech training subsets",
)
parser.add_argument(
"--sup-speech-valid-subset",
default="",
help="supervised speech validation subsets",
)
parser.add_argument(
"--config-yaml",
default="config.yaml",
help="supervised speech configuration yaml file",
)
parser.add_argument(
"--sup-speech-s2s-data", default="", help="path to supervised speech data"
)
parser.add_argument(
"--sup-speech-s2s-train-subset",
default="",
help="supervised speech training subsets",
)
parser.add_argument(
"--sup-speech-s2s-valid-subset",
default="",
help="supervised speech validation subsets",
)
parser.add_argument(
"--config-s2s-yaml",
default="config.yaml",
help="supervised speech configuration yaml file",
)
parser.add_argument(
"--unsup-speech-train-data",
default="",
help="path to unsupervised speech training data (tsv)",
)
parser.add_argument(
"--unsup-speech-valid-data",
default="",
help="path to unsupervised speech valid data (tsv)",
)
parser.add_argument(
"--sample-rate",
type=int,
metavar="N",
default=16000,
help="input audio sampling rate",
)
parser.add_argument(
"--no-emb-update-unsup",
default=False,
action="store_true",
help="no update for output embedding during unsupervised_speech mode",
)
parser.add_argument("--same-data-update", default=False, action="store_true")
# used for sup_speech_ali
parser.add_argument(
"--use-sup-speech-ctc",
default=False,
action="store_true",
help="use speech_sup_ctc instead of speech_sup_ali",
)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
paths = args.data.split(":")
assert len(paths) > 0
src_dict = Dictionary.load(
os.path.join(paths[0], "src_dict.txt")
) # assume all languages share a source dictionary
tgt_dict = Dictionary.load(
os.path.join(paths[0], "tgt_dict.txt")
) # assume all languages share a target dictionary
lang_pairs = args.lang_pairs + "," + args.lang_pairs_bitext
lang_pairs = re.sub(",$", "", re.sub("^,", "", lang_pairs))
if lang_pairs != "":
src_langs = [lp.split("-")[0] for lp in lang_pairs.split(",")]
tgt_langs = [lp.split("-")[1] for lp in lang_pairs.split(",")]
else:
src_langs = []
tgt_langs = []
if args.add_src_lang_token:
for lang in src_langs:
assert (
src_dict.index(PairedDenoisingTask.LANG_TAG_TEMPLATE.format(lang))
!= src_dict.unk()
)
if args.add_tgt_lang_token:
for lang in tgt_langs:
assert (
tgt_dict.index(PairedDenoisingTask.LANG_TAG_TEMPLATE.format(lang))
!= tgt_dict.unk()
)
logger.info("source dictionary: {} types".format(len(src_dict)))
logger.info("target dictionary: {} types".format(len(tgt_dict)))
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
return cls(args, src_dict, tgt_dict)
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self.data_cfg = S2TJointDataConfig(
Path(args.sup_speech_data) / args.config_yaml
)
logger.info(
f"load supervised speech data configure from {Path(args.sup_speech_data) / args.config_yaml}"
)
self.data_s2s_cfg = (
S2TJointDataConfig(Path(args.sup_speech_s2s_data) / args.config_s2s_yaml)
if args.sup_speech_s2s_train_subset != ""
else None
)
if self.data_s2s_cfg is not None:
logger.info(
f"load supervised sequece to sequence speech data configure from {Path(args.sup_speech_s2s_data) / args.config_yaml}"
)
def parse_data_ratio(sample_ratio):
ratios = sample_ratio.split(",")
if len(ratios) == 1:
return [float(ratios[0])]
epoch_ratios = []
for item in ratios:
ep, r = item.split(":")
ep = int(ep)
r = float(r)
assert ep > 0 # epoch is 1 based
assert ep >= len(epoch_ratios)
if len(epoch_ratios) == 0:
epoch_ratios.append(
r
) # epoch_ratios[0] is not used, but we still set it to the first value to make thing simple.
while len(epoch_ratios) < ep:
epoch_ratios.append(epoch_ratios[-1])
epoch_ratios.append(r)
return epoch_ratios
self.sup_ratio = parse_data_ratio(args.supervised_speech_sample_ratio)
self.sup_s2s_ratio = parse_data_ratio(args.supervised_speech_s2s_sample_ratio)
self.text_ratio = parse_data_ratio(args.text_sample_ratio)
self.bitext_ratio = parse_data_ratio(args.bitext_sample_ratio)
self.unsup_ratio = parse_data_ratio(args.unsupervised_speech_sample_ratio)
self.sample_mode = None
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
return super().build_model(args)
def build_tokenizer(self, data_cfg, msg=""):
logger.info(f"pre-tokenizer {msg}: {data_cfg.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**data_cfg.pre_tokenizer))
def build_bpe(self, data_cfg, msg=""):
logger.info(f"tokenizer {msg}: {data_cfg.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**data_cfg.bpe_tokenizer))
@classmethod
def resolve_data_type(cls, split, use_sup_speech_ctc):
if len(split.split("_")) == 1:
# default case, train or valid
is_train = split
dtype = "text"
else:
is_train, dtype = split.split("_", 1)
is_train = True if is_train == "train" else False
if dtype == "sup_speech":
dtype = "sup_speech_ctc" if use_sup_speech_ctc else "sup_speech_ali"
assert dtype in (
"text",
"bitext",
"sup_speech_ali",
"sup_speech_s2s",
"unsup_speech",
"sup_speech_ctc",
), f"failed resolving {split} (it resulted into: {dtype} ; is_train={is_train})"
return is_train, dtype
def create_modalitydatasetitem(self, dtype, dataset):
dsitem = None
if dtype in ("text", "bitext"):
dsitem = ModalityDatasetItem(
dtype,
dataset,
(self.args.max_source_positions, self.args.max_target_positions),
self.args.max_text_tokens,
self.args.batch_size,
)
elif dtype in ("sup_speech_ctc", "sup_speech_ali", "sup_speech_s2s"):
dsitem = ModalityDatasetItem(
dtype,
dataset,
(self.args.max_speech_positions, self.args.max_target_positions),
self.args.max_speech_tokens,
self.args.batch_size,
)
elif dtype == "unsup_speech":
dsitem = ModalityDatasetItem(
dtype, dataset, 1e8, self.args.max_speech_tokens, self.args.batch_size
)
else:
raise ValueError(f"{dtype} is not supported")
return dsitem
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
def _get_sup_src_tgt_dict(src_dict, tgt_dict, use_s2s_sup_decoder):
if use_s2s_sup_decoder:
return None, tgt_dict
# use src_dict as tgt_dict here, since we use source dictionary as target for forcealignment
return None, src_dict
is_train, dtype = self.resolve_data_type(split, self.args.use_sup_speech_ctc)
# Note we use --add-tgt-lang-token instead of data_cfg.prepend_tgt_lang_tag_no_change to set target language tag in the text dataset
# Verify add_tgt_lang_token and prepend_tgt_lang_tag_no_change are same
# Note we use --multilang-sampling-alpha instead of data_cfg.sampling_text_alpha to set text data sampling
if is_train:
msets = []
# train split, load everything into one
if self.lang_pairs != "":
text_dataset = self.load_dataset_only(
"train", self.lang_pairs, epoch=epoch, combine=combine
)
dsitem = self.create_modalitydatasetitem("text", text_dataset)
msets.append(dsitem)
if self.lang_pairs_bitext != "": # load bitext
bitext_dataset = self.load_dataset_only(
"train_bitext",
self.lang_pairs_bitext,
do_mask=False,
epoch=epoch,
combine=combine,
)
dsitem = self.create_modalitydatasetitem("bitext", bitext_dataset)
msets.append(dsitem)
if self.args.sup_speech_train_subset != "":
pre_tokenizer = self.build_tokenizer(self.data_cfg)
bpe_tokenizer = self.build_bpe(self.data_cfg)
append_eos = True
sup_speech_type = "sup_speech_ali"
if self.args.use_sup_speech_ctc:
# CTC mode
sup_speech_type = "sup_speech_ctc"
append_eos = False # CTC doesn't need eos in the target
src_dict, tgt_dict = _get_sup_src_tgt_dict(
self.src_dict, self.tgt_dict, False
)
sup_speech_dataset = SpeechToTextJointDatasetCreator.from_tsv(
self.args.sup_speech_data,
self.data_cfg,
self.args.sup_speech_train_subset,
tgt_dict=tgt_dict,
src_dict=src_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
src_pre_tokenizer=None,
src_bpe_tokenizer=None,
is_train_split=is_train,
epoch=epoch,
seed=self.args.seed,
append_eos=append_eos,
)
dsitem = self.create_modalitydatasetitem(
sup_speech_type, sup_speech_dataset
)
msets.append(dsitem)
if self.args.sup_speech_s2s_train_subset != "":
pre_tokenizer = self.build_tokenizer(self.data_s2s_cfg, msg="(s2s)")
bpe_tokenizer = self.build_bpe(self.data_s2s_cfg, msg="(s2s)")
# make sure self.data_cfg.prepend_tgt_lang_tag_no_change == self.args.add_tgt_lang_token
src_dict, tgt_dict = _get_sup_src_tgt_dict(
self.src_dict, self.tgt_dict, True
)
sup_speech_s2s_dataset = SpeechToTextJointDatasetCreator.from_tsv(
self.args.sup_speech_s2s_data,
self.data_s2s_cfg,
self.args.sup_speech_s2s_train_subset,
tgt_dict=tgt_dict,
src_dict=src_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
src_pre_tokenizer=None,
src_bpe_tokenizer=None,
is_train_split=is_train,
epoch=epoch,
seed=self.args.seed,
)
dsitem = self.create_modalitydatasetitem(
"sup_speech_s2s", sup_speech_s2s_dataset
)
msets.append(dsitem)
if self.args.unsup_speech_train_data != "":
unsup_speech_dataset = FileAudioDatasetWrapper(
self.args.unsup_speech_train_data,
self.args.sample_rate,
max_sample_size=self.args.max_sample_size,
min_sample_size=self.args.min_sample_size,
normalize=False,
)
dsitem = self.create_modalitydatasetitem(
"unsup_speech", unsup_speech_dataset
)
msets.append(dsitem)
pre_train_dataset = MultiModalityDataset(msets)
self.datasets[split] = pre_train_dataset
else: # validation split, load them for each type of data
if dtype == "text":
text_dataset = self.load_dataset_only(
split, self.lang_pairs, epoch=epoch, combine=combine
)
dsitem = self.create_modalitydatasetitem("text", text_dataset)
self.datasets[split] = MultiModalityDataset([dsitem])
elif dtype == "bitext":
bitext_dataset = self.load_dataset_only(
split,
self.lang_pairs_bitext,
do_mask=False,
epoch=epoch,
combine=combine,
)
dsitem = self.create_modalitydatasetitem("bitext", bitext_dataset)
self.datasets[split] = MultiModalityDataset([dsitem])
elif dtype in ("sup_speech_ctc", "sup_speech_ali"):
assert self.args.sup_speech_valid_subset != ""
pre_tokenizer = self.build_tokenizer(self.data_cfg)
bpe_tokenizer = self.build_bpe(self.data_cfg)
append_eos = True
if dtype == "sup_speech_ctc":
# CTC mode
append_eos = False # CTC doesn't need eos
assert self.args.use_sup_speech_ctc
datasets = []
for split_name in self.args.sup_speech_valid_subset.split(","):
src_dict, tgt_dict = _get_sup_src_tgt_dict(
self.src_dict, self.tgt_dict, False
)
datasets.append(
SpeechToTextJointDatasetCreator.from_tsv(
self.args.sup_speech_data,
self.data_cfg,
split_name,
tgt_dict=tgt_dict,
src_dict=src_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
src_pre_tokenizer=None,
src_bpe_tokenizer=None,
is_train_split=is_train,
epoch=epoch,
seed=self.args.seed,
append_eos=append_eos,
)
)
dset = datasets[0] if len(datasets) == 1 else ConcatDataset(datasets)
dsitem = self.create_modalitydatasetitem(dtype, dset)
self.datasets[split] = MultiModalityDataset([dsitem])
elif dtype == "sup_speech_s2s":
assert self.args.sup_speech_s2s_valid_subset != ""
pre_tokenizer = self.build_tokenizer(self.data_s2s_cfg)
bpe_tokenizer = self.build_bpe(self.data_s2s_cfg)
datasets = []
for split_name in self.args.sup_speech_s2s_valid_subset.split(","):
src_dict, tgt_dict = _get_sup_src_tgt_dict(
self.src_dict, self.tgt_dict, True
)
datasets.append(
SpeechToTextJointDatasetCreator.from_tsv(
self.args.sup_speech_s2s_data,
self.data_s2s_cfg,
split_name,
tgt_dict=tgt_dict,
src_dict=src_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
src_pre_tokenizer=None,
src_bpe_tokenizer=None,
is_train_split=is_train,
epoch=epoch,
seed=self.args.seed,
)
)
dset = datasets[0] if len(datasets) == 1 else ConcatDataset(datasets)
dsitem = self.create_modalitydatasetitem("sup_speech_s2s", dset)
self.datasets[split] = MultiModalityDataset([dsitem])
elif dtype == "unsup_speech":
assert self.args.unsup_speech_valid_data != ""
unsup_speech_dataset = FileAudioDatasetWrapper(
self.args.unsup_speech_valid_data,
self.args.sample_rate,
max_sample_size=self.args.max_sample_size,
min_sample_size=self.args.min_sample_size,
normalize=False,
)
dsitem = self.create_modalitydatasetitem(
"unsup_speech", unsup_speech_dataset
)
self.datasets[split] = MultiModalityDataset([dsitem])
else:
raise ValueError(f"Unsupported type {dtype}")
def get_sample_ratio(self, epoch):
sup_ratio = (
self.sup_ratio[epoch] if len(self.sup_ratio) > epoch else self.sup_ratio[-1]
)
sup_s2s_ratio = (
self.sup_s2s_ratio[epoch]
if len(self.sup_s2s_ratio) > epoch
else self.sup_s2s_ratio[-1]
)
unsup_ratio = (
self.unsup_ratio[epoch]
if len(self.unsup_ratio) > epoch
else self.unsup_ratio[-1]
)
text_ratio = (
self.text_ratio[epoch]
if len(self.text_ratio) > epoch
else self.text_ratio[-1]
)
bitext_ratio = (
self.bitext_ratio[epoch]
if len(self.bitext_ratio) > epoch
else self.bitext_ratio[-1]
)
return text_ratio, bitext_ratio, sup_ratio, sup_s2s_ratio, unsup_ratio
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=0,
data_buffer_size=0,
disable_iterator_cache=False,
skip_remainder_batch=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
assert isinstance(dataset, MultiModalityDataset)
if len(dataset.id_to_mode) == 1:
max_positions = dataset.max_positions[0]
max_tokens = dataset.max_tokens[0]
max_sentences = dataset.max_sentences[0]
return super().get_batch_iterator(
dataset,
max_tokens,
max_sentences,
max_positions,
ignore_invalid_inputs,
required_batch_size_multiple,
seed,
num_shards,
shard_id,
num_workers,
epoch,
data_buffer_size,
disable_iterator_cache,
skip_remainder_batch=skip_remainder_batch,
)
mult_ratio = []
(
text_ratio,
bitext_ratio,
sup_ratio,
sup_s2s_ratio,
unsup_ratio,
) = self.get_sample_ratio(epoch)
for mode in dataset.id_to_mode:
if mode in ("sup_speech_ctc", "sup_speech_ali"):
mult_ratio.append(sup_ratio)
elif mode == "sup_speech_s2s":
mult_ratio.append(sup_s2s_ratio)
elif mode == "text":
mult_ratio.append(text_ratio)
elif mode == "bitext":
mult_ratio.append(bitext_ratio)
elif mode == "unsup_speech":
mult_ratio.append(unsup_ratio)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
batch_samplers = dataset.get_batch_samplers(
mult_ratio, required_batch_size_multiple, seed
)
# return a reusable, sharded iterator
epoch_iter = GroupedEpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_samplers=batch_samplers,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
mult_rate=max(self.args.update_freq) if self.args.same_data_update else 1,
buffer_size=data_buffer_size,
skip_remainder_batch=skip_remainder_batch,
)
self.dataset_to_epoch_iter[dataset] = {} # refresh it every epoch
return epoch_iter
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/tasks/speech_text_denoise_pretrain.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import re
import numpy as np
import torch
from examples.speech_text_joint_to_text.data.pair_denoising_dataset import (
LanguagePairDenoisingDataset,
)
from fairseq import utils
from fairseq.data import (
ConcatDataset,
Dictionary,
LanguagePairDataset,
ResamplingDataset,
TransformEosConcatLangPairDataset,
TransformEosLangPairDataset,
data_utils,
indexed_dataset,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
logger = logging.getLogger(__name__)
def gen_whole_word_mask(args, dictionary):
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith("madeupword"):
return True
if tok in ["<unk>", "<s>", "</s>", "<pad>"]:
return True
return tok.startswith("\u2581")
if args.use_mask_whole_words:
mask_whole_words = torch.ByteTensor(
list(map(is_beginning_of_word, range(len(dictionary))))
)
else:
# it will mask every token as word leading token, since no bpe model is loaded for phoneme tokens
return get_whole_word_mask(args, dictionary)
return mask_whole_words
@register_task("paired_denoising")
class PairedDenoisingTask(TranslationTask):
LANG_TAG_TEMPLATE = "<lang:{}>" # Tag for language (target)
@staticmethod
def add_args(parser):
TranslationTask.add_args(parser)
# bart setting
parser.add_argument(
"--mask",
default=0.0,
type=float,
help="fraction of words/subwords that will be masked",
)
parser.add_argument(
"--mask-random",
default=0.0,
type=float,
help="instead of using [MASK], use random token this often",
)
parser.add_argument(
"--insert",
default=0.0,
type=float,
help="insert this percentage of additional random tokens",
)
parser.add_argument(
"--poisson-lambda",
default=3.0,
type=float,
help="randomly shuffle sentences for this proportion of inputs",
)
parser.add_argument(
"--mask-length",
default="span-poisson",
type=str,
choices=["subword", "word", "span-poisson"],
help="mask length to choose",
)
parser.add_argument(
"--replace-length",
default=1,
type=int,
help="when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)",
)
# multi-lingual
parser.add_argument(
"--multilang-sampling-alpha",
type=float,
default=1.0,
help="smoothing alpha for sample ratios across multiple datasets",
)
parser.add_argument(
"--lang-pairs",
default="",
metavar="PAIRS",
help="comma-separated list of language pairs (in training order): phnen-en,phnfr-fr,phnit-it. Do masking",
)
parser.add_argument(
"--lang-pairs-bitext",
default="",
metavar="PAIRS",
help="comma-separated list of language pairs (in training order): en-de,en-fr,de-fr. No masking",
)
parser.add_argument("--add-src-lang-token", default=False, action="store_true")
parser.add_argument("--add-tgt-lang-token", default=False, action="store_true")
parser.add_argument(
"--no-whole-word-mask-langs",
type=str,
default="",
metavar="N",
help="languages without spacing between words dont support whole word masking",
)
parser.add_argument(
"--use-mask-whole-words", default=False, action="store_true"
)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
paths = args.data.split(":")
assert len(paths) > 0
src_dict = Dictionary.load(
os.path.join(paths[0], "src_dict.txt")
) # assume all languages share a source dictionary
tgt_dict = Dictionary.load(
os.path.join(paths[0], "tgt_dict.txt")
) # assume all languages share a target dictionary
lang_pairs = args.lang_pairs + "," + args.lang_pairs_bitext
lang_pairs = re.sub(",$", "", re.sub("^,", "", lang_pairs))
src_langs = [lp.split("-")[0] for lp in lang_pairs.split(",")]
tgt_langs = [lp.split("-")[1] for lp in lang_pairs.split(",")]
if args.add_src_lang_token:
for lang in src_langs:
assert (
src_dict.index(PairedDenoisingTask.LANG_TAG_TEMPLATE.format(lang))
!= src_dict.unk()
)
if args.add_tgt_lang_token:
for lang in tgt_langs:
assert (
tgt_dict.index(PairedDenoisingTask.LANG_TAG_TEMPLATE.format(lang))
!= tgt_dict.unk()
)
logger.info("source dictionary: {} types".format(len(src_dict)))
logger.info("target dictionary: {} types".format(len(tgt_dict)))
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
return cls(args, src_dict, tgt_dict)
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
# check mask token
self.mask_idx = self.src_dict.index("<mask>")
assert self.mask_idx != self.src_dict.unk()
self.lang_pairs = args.lang_pairs
self.lang_pairs_bitext = args.lang_pairs_bitext
self.args = args
@classmethod
def language_pair_denoising_dataset(
cls,
data_path,
do_mask,
split,
src,
src_dict,
tgt,
tgt_dict,
mask_idx,
mask_whole_words,
seed,
args,
dataset_impl,
combine=False,
left_pad_source=True,
left_pad_target=False,
max_source_positions=1024,
max_target_positions=1024,
shuffle=True,
src_lang_id=None,
tgt_lang_id=None,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(
data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)
)
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
eos = None
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
if not do_mask:
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
eos=eos,
shuffle=shuffle,
src_lang_id=src_lang_id,
tgt_lang_id=tgt_lang_id,
)
return LanguagePairDenoisingDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
mask_idx,
mask_whole_words,
seed,
args,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
eos=eos,
shuffle=shuffle,
src_lang_id=src_lang_id,
tgt_lang_id=tgt_lang_id,
)
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def resample_datasets(self, lang_datasets, lang_pairs_all, epoch):
# For train subset, additionally up or down sample languages.
if self.args.multilang_sampling_alpha == 1.0:
return lang_datasets
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info(
"Sample probability by language pair: {}".format(
{
lp: "{0:.4f}".format(sample_probs[id])
for id, lp in enumerate(lang_pairs_all)
}
)
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info(
"Up/Down Sampling ratio by language: {}".format(
{
lp: "{0:.2f}".format(size_ratio[id])
for id, lp in enumerate(lang_pairs_all)
}
)
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
return resampled_lang_datasets
def load_dataset_only(
self, split, lang_pairs, do_mask=True, epoch=1, combine=False
):
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# TODO unk token will be considered as first word too, though it might be an unknown phoneme within a word
# get_whole_word_mask returns a tensor (size V by 1 ) to indicate if a token is a word start token
mask_whole_src_words = gen_whole_word_mask(self.args, self.src_dict)
language_without_segmentations = self.args.no_whole_word_mask_langs.split(",")
lang_datasets = []
eos_bos = []
lang_pairs = lang_pairs.split(",") if lang_pairs != "" else []
assert len(lang_pairs) > 0
for lp in lang_pairs:
src, tgt = lp.split("-")
lang_mask_whole_src_words = (
mask_whole_src_words
if src not in language_without_segmentations
else None
)
end_token = (
self.source_dictionary.index(
PairedDenoisingTask.LANG_TAG_TEMPLATE.format(src)
)
if self.args.add_src_lang_token
else None
)
bos_token = (
self.target_dictionary.index(
PairedDenoisingTask.LANG_TAG_TEMPLATE.format(tgt)
)
if self.args.add_tgt_lang_token
else None
)
src_lang_id = None
if self.args.add_src_lang_token or self.args.add_tgt_lang_token:
eos_bos.append((end_token, bos_token))
dataset = PairedDenoisingTask.language_pair_denoising_dataset(
data_path,
do_mask,
split,
src,
self.source_dictionary,
tgt,
self.target_dictionary,
self.mask_idx,
lang_mask_whole_src_words,
self.args.seed,
self.args,
self.args.dataset_impl,
combine=combine,
left_pad_source=utils.eval_bool(self.args.left_pad_source),
left_pad_target=utils.eval_bool(self.args.left_pad_target),
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
src_lang_id=src_lang_id,
)
lang_datasets.append(dataset)
if len(lang_datasets) == 0:
return
elif len(lang_datasets) == 1:
dataset = lang_datasets[0]
if self.args.add_src_lang_token or self.args.add_tgt_lang_token:
end_token, bos_token = eos_bos[0]
dataset = TransformEosLangPairDataset(
dataset,
src_eos=self.source_dictionary.eos(),
new_src_eos=end_token,
tgt_bos=self.target_dictionary.eos(),
new_tgt_bos=bos_token,
)
else:
end_tokens = [item[0] for item in eos_bos if item[0] is not None]
bos_tokens = [item[1] for item in eos_bos if item[1] is not None]
lang_datasets = self.resample_datasets(lang_datasets, lang_pairs, epoch)
dataset = TransformEosConcatLangPairDataset(
lang_datasets,
self.source_dictionary.eos(),
self.target_dictionary.eos(),
new_src_eos=end_tokens,
new_tgt_bos=bos_tokens,
)
return dataset
# split in (train, valid, test, ...)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
self.datasets[split] = self.load_dataset_only(
split, self.lang_pairs, epoch=epoch, combine=combine
)
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/tasks/pair_denoising.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import namedtuple
import torch
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.models.speech_to_text import (
TransformerDecoder,
S2TTransformerEncoder,
)
from fairseq.models.transformer import TransformerEncoder
from fairseq.modules import (
TransformerEncoderLayer,
GradMultiply,
LayerNorm,
)
logger = logging.getLogger(__name__)
class SpeechEoSEncoder(FairseqEncoder):
def __init__(self, encoder, eos_num, feat_dim, adapter_type="None", adapter_dim=0):
super().__init__(None)
self.encoder = encoder
self.eos_num = eos_num # downsampling rate for speech input feature
self.eos_emb = (
nn.Parameter(torch.zeros(1, feat_dim), requires_grad=True)
if eos_num > 0
else None
)
self.adapter = self.add_adapter(adapter_type, adapter_dim)
def add_adapter(self, adapter_type, adapter_dim):
def _make_identity(linear, eps=1e-5):
assert isinstance(linear, nn.Linear)
linear.weight.data.mul_(eps)
linear.weight.data.fill_diagonal_(1.0)
if linear.bias is not None:
linear.bias.data.mul_(eps)
adapter = None
if adapter_type == "Linear":
assert adapter_dim > 0
adapter = nn.Sequential(
nn.Linear(adapter_dim, adapter_dim), LayerNorm(adapter_dim)
)
# initialize the adapter as identity matrix first
_make_identity(adapter[0])
elif adapter_type == "MLP":
assert adapter_dim > 0
# assume the model is pre-norm model
adapter = nn.Sequential(
nn.Linear(adapter_dim, 2 * adapter_dim),
nn.ReLU(),
nn.Linear(2 * adapter_dim, adapter_dim),
LayerNorm(adapter_dim),
)
_make_identity(adapter[0])
_make_identity(adapter[2])
return adapter
def add_eos(self, src_tokens, src_lengths):
bsz, max_seq_len, fdim = src_tokens.size()
if self.eos_num > 0:
src_token_eos = torch.zeros(
[bsz, max_seq_len + self.eos_num, fdim],
dtype=src_tokens.dtype,
device=src_tokens.device,
)
src_token_eos[:, :max_seq_len] = src_tokens
for bi in range(bsz):
src_token_eos[bi][
src_lengths[bi] : src_lengths[bi] + self.eos_num
] = self.eos_emb.expand(self.eos_num, fdim)
src_lengths = src_lengths + self.eos_num
src_tokens = src_token_eos
return src_tokens, src_lengths
def apply_adapter(self, enc_out):
if self.adapter is None:
return enc_out
rst = self.adapter(enc_out.encoder_out)
if enc_out.encoder_padding_mask is not None:
rst.masked_fill_(
enc_out.encoder_padding_mask.transpose(0, 1).unsqueeze(-1), 0
)
return EncoderOut(
encoder_out=rst,
encoder_padding_mask=enc_out.encoder_padding_mask,
encoder_embedding=enc_out.encoder_embedding,
encoder_states=enc_out.encoder_states,
src_tokens=enc_out.src_tokens,
src_lengths=enc_out.src_lengths,
)
def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
src_tokens, src_lengths = self.add_eos(src_tokens, src_lengths)
enc_out = self.encoder(src_tokens, src_lengths, return_all_hiddens)
enc_out = self.apply_adapter(enc_out)
return enc_out
def reorder_encoder_out(self, encoder_out, new_order):
return self.encoder.reorder_encoder_out(encoder_out, new_order)
class DualInputEncoder(FairseqEncoder):
def __init__(
self,
args,
spch_encoder,
text_encoder,
dictionary,
cross_attentive_loss_before_last_layer=-1,
):
super().__init__(dictionary)
self.spch_encoder = spch_encoder
self.text_encoder = text_encoder
self.enc_grad_mult = args.enc_grad_mult
self.cross_attentive_loss_before_last_layer = (
cross_attentive_loss_before_last_layer
)
self.use_cross_attentive_loss = (
False if cross_attentive_loss_before_last_layer <= -1 else True
)
self.enc2_along_grad_mult = args.enc2_along_grad_mult
@classmethod
def set_shared_layer(cls, share_level, src_layer, tgt_layer):
"""
share parameters from tgt_layer to src_layer
share_level:
0: share everything
1: share everything but different model
2: share weight but not bias, layernorm
"""
if share_level == 0:
return tgt_layer
if isinstance(src_layer, nn.Linear):
return tgt_layer
if isinstance(src_layer, TransformerEncoderLayer):
assert src_layer.embed_dim == tgt_layer.embed_dim
assert src_layer.normalize_before == tgt_layer.normalize_before
if share_level == 1:
src_layer.fc1 = tgt_layer.fc1
src_layer.fc2 = tgt_layer.fc2
src_layer.self_attn = tgt_layer.self_attn
src_layer.final_layer_norm = tgt_layer.final_layer_norm
src_layer.self_attn_layer_norm = tgt_layer.self_attn_layer_norm
src_layer.layernorm_embedding = tgt_layer.layernorm_embedding
else:
src_layer.fc1.weight = tgt_layer.fc1.weight
src_layer.fc2.weight = tgt_layer.fc2.weight
src_layer.self_attn.k_proj.weight = tgt_layer.self_attn.k_proj.weight
src_layer.self_attn.v_proj.weight = tgt_layer.self_attn.v_proj.weight
src_layer.self_attn.q_proj.weight = tgt_layer.self_attn.q_proj.weight
src_layer.self_attn.out_proj.weight = (
tgt_layer.self_attn.out_proj.weight
)
else:
if share_level == 1:
return tgt_layer
return src_layer
@classmethod
def build_spch_encoder(cls, args):
cfg = {
"input_feat_per_channel": args.input_feat_per_channel,
"input_channels": args.input_channels,
"conv_kernel_sizes": args.conv_kernel_sizes,
"conv_channels": args.conv_channels,
"encoder_embed_dim": args.encoder_embed_dim,
"encoder_ffn_embed_dim": args.encoder_ffn_embed_dim,
"encoder_layers": args.speech_encoder_layers,
"encoder_layerdrop": args.encoder_layerdrop,
"encoder_attention_heads": args.encoder_attention_heads,
"max_source_positions": args.max_source_positions,
"dropout": args.dropout,
"encoder_normalize_before": args.encoder_normalize_before,
"activation_dropout": args.activation_dropout,
"attention_dropout": args.attention_dropout,
"activation_fn": args.activation_fn,
"layernorm_embedding": args.layernorm_embedding,
"no_token_positional_embeddings": args.no_token_positional_embeddings,
"no_scale_embedding": args.no_scale_embedding,
"quant_noise_pq": args.quant_noise_pq,
"encoder_freezing_updates": 0,
}
model_args = namedtuple("args", cfg.keys())(*cfg.values())
spch_encoder = S2TTransformerEncoder(model_args)
if args.add_speech_eos:
spch_encoder = SpeechEoSEncoder(
spch_encoder,
2 * len(args.conv_kernel_sizes.split(",")),
args.input_feat_per_channel,
adapter_type=getattr(args, "speech_encoder_adapter_type", "None"),
adapter_dim=args.encoder_embed_dim,
)
return spch_encoder
@classmethod
def build_text_encoder(cls, args, src_dictionary, spch_encoder):
if args.encoder_shared_layers > 0:
mx_shared_layers = (
args.speech_encoder_layers
if args.speech_encoder_layers < args.text_encoder_layers
else args.text_encoder_layers
)
args.encoder_shared_layers = (
args.encoder_shared_layers
if args.encoder_shared_layers <= mx_shared_layers
else mx_shared_layers
)
cfg = {
"encoder_embed_dim": args.encoder_text_embed_dim,
"encoder_ffn_embed_dim": args.encoder_ffn_embed_dim,
"encoder_layers": args.text_encoder_layers,
"encoder_layerdrop": args.encoder_layerdrop,
"encoder_attention_heads": args.encoder_attention_heads,
"encoder_learned_pos": args.encoder_learned_pos,
"max_source_positions": args.max_source_positions,
"dropout": args.dropout,
"encoder_normalize_before": args.encoder_normalize_before,
"activation_dropout": args.activation_dropout,
"attention_dropout": args.attention_dropout,
"activation_fn": args.activation_fn,
"adaptive_input": args.adaptive_input,
"no_token_positional_embeddings": args.no_token_positional_embeddings,
"no_scale_embedding": args.no_scale_embedding,
"quant_noise_pq": args.quant_noise_pq,
}
model_args = namedtuple("args", cfg.keys())(*cfg.values())
enc_emb = nn.Embedding(
len(src_dictionary), model_args.encoder_embed_dim, src_dictionary.pad()
)
text_encoder = TransformerEncoder(model_args, src_dictionary, enc_emb)
if args.add_speech_eos:
spch_encoder = spch_encoder.encoder
if args.encoder_shared_layers > 0:
text_encoder.layer_norm = cls.set_shared_layer(
args.encoder_shared_layer_level,
text_encoder.layer_norm,
spch_encoder.layer_norm,
)
for i, ly in enumerate(
spch_encoder.transformer_layers[-args.encoder_shared_layers :]
):
ly_id = i + args.text_encoder_layers - args.encoder_shared_layers
if not isinstance(text_encoder.layers[ly_id], type(ly)):
if text_encoder.layers[ly_id]._get_name() not in ('TransformerEncoderLayerBase', 'TransformerEncoderLayer'):
raise ValueError("The shared layers are expected from the same class")
text_encoder.layers[ly_id] = cls.set_shared_layer(
args.encoder_shared_layer_level,
text_encoder.layers[ly_id],
ly,
)
return text_encoder
def mult_rst_grad(self, rst, ratio):
assert isinstance(rst, dict) # instead of EncoderOut
assert len(rst["encoder_out"]) == 1
rst["encoder_out"][0] = GradMultiply.apply(rst["encoder_out"][0], ratio)
return rst
def process_attentive_loss_states(self, rst, interstates):
assert isinstance(rst, dict) # instead of EncoderOut
rst["encoder_states"] = interstates
return rst
def forward(
self,
src_tokens,
src_lengths=None,
src_txt_tokens=None,
src_txt_lengths=None,
**kwargs
):
"""
Args:
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (speech) (B,)
src_txt_tokens: padded tensor (B, T)
src_txt_lengths: tensor of original lengths of input utterances (text) (B,)
"""
# src_tokens only: inference
# src_tokens, src_lengths: speech only training
# src_txt_tokens, src_txt_lengths: text only training
# all valid: speech + text training
if src_tokens is None and src_txt_tokens is None:
raise ValueError(
"src_tokens and src_txt_tokens cannot be None at the same time"
)
ret1 = None
ret2 = None
return_all_hiddens = False
if src_tokens is not None:
if (
self.use_cross_attentive_loss and src_txt_tokens is not None
): # remove self.training so we can get attn score during validation step
return_all_hiddens = True
ret1 = self.spch_encoder(
src_tokens, src_lengths, return_all_hiddens=return_all_hiddens
)
if self.use_cross_attentive_loss and src_txt_tokens is not None:
assert self.cross_attentive_loss_before_last_layer < len(
ret1["encoder_states"]
)
ret1 = self.process_attentive_loss_states(
ret1,
ret1["encoder_states"][
-self.cross_attentive_loss_before_last_layer - 1
],
)
if src_txt_tokens is not None:
ret2 = self.text_encoder(
src_txt_tokens, src_txt_lengths, return_all_hiddens=return_all_hiddens
)
if return_all_hiddens:
if self.cross_attentive_loss_before_last_layer == len(
self.text_encoder.layers
):
text_embedding, _ = self.text_encoder.forward_embedding(
src_txt_tokens
)
text_embedding = text_embedding.transpose(0, 1)
ret2 = self.process_attentive_loss_states(ret2, text_embedding)
else:
assert self.cross_attentive_loss_before_last_layer < len(
self.text_encoder.layers
)
ret2 = self.process_attentive_loss_states(
ret2,
ret2["encoder_states"][
-self.cross_attentive_loss_before_last_layer - 1
],
)
def merge_output(rst1, rst2):
if rst1 is None:
if not (self.enc2_along_grad_mult == 1.0 or self.training):
rst2 = self.mult_rst_grad(rst2, self.enc2_along_grad_mult)
return rst2
if rst2 is None:
return rst1
if self.enc_grad_mult != 1.0 and self.training:
rst1 = self.mult_rst_grad(rst1, self.enc_grad_mult)
rst2 = self.mult_rst_grad(rst2, self.enc_grad_mult)
rst = (rst1, rst2)
return rst
return merge_output(ret1, ret2)
def reorder_encoder_out(self, encoder_out, new_order):
assert self.training is False # used for inference only
return self.spch_encoder.reorder_encoder_out(encoder_out, new_order)
# TransformerMultiInputDecoder: take one or two encoder inputs
class TransformerMultiInputDecoder(FairseqDecoder):
def __init__(
self,
dictionary,
spch_decoder,
text_decoder,
compute_cross_attentive_loss=False,
cross_attentive_loss_with_norm=True,
cross_attentive_loss_reverse=False,
):
super().__init__(dictionary)
self.spch_decoder = spch_decoder
self.text_decoder = text_decoder
self.compute_cross_attentive_loss = compute_cross_attentive_loss
self.cross_attentive_loss_with_norm = cross_attentive_loss_with_norm
self.cross_attentive_loss_reverse = cross_attentive_loss_reverse
@classmethod
def share_spchdecoder(cls, task_args, text_decoder, spch_decoder):
if task_args.decoder_shared_layer_level == 0:
return text_decoder
assert text_decoder.embed_tokens == spch_decoder.embed_tokens
spch_decoder.project_in_dim = text_decoder.project_in_dim
spch_decoder.embed_positions = text_decoder.embed_positions
spch_decoder.layernorm_embedding = text_decoder.layernorm_embedding
spch_decoder.project_out_dim = text_decoder.project_out_dim
spch_decoder.adaptive_softmax = text_decoder.adaptive_softmax
if task_args.decoder_shared_layer_level == 1:
spch_decoder.output_projection = text_decoder.output_projection
spch_decoder.layer_norm = text_decoder.layer_norm
else: # 2
spch_decoder.output_projection.weight = (
text_decoder.output_projection.weight
)
for i, ly in enumerate(text_decoder.layers):
sly = spch_decoder.layers[i]
sly.self_attn = ly.self_attn
sly.self_attn_layer_norm = ly.self_attn_layer_norm
# sly.encoder_attn = ly.encoder_attn
if (
task_args.decoder_shared_layer_level == 1
): # share everything, but under different models
sly.encoder_attn = ly.encoder_attn
sly.encoder_attn_layer_norm = ly.encoder_attn_layer_norm
sly.fc1 = ly.fc1
sly.fc2 = ly.fc2
sly.final_layer_norm = ly.final_layer_norm
else: # task_args.decoder_shared_layer_level == 2: #separated encoder_attn_layer_norm and bias
sly.encoder_attn.k_proj.weight = ly.encoder_attn.k_proj.weight
sly.encoder_attn.v_proj.weight = ly.encoder_attn.v_proj.weight
sly.encoder_attn.q_proj.weight = ly.encoder_attn.q_proj.weight
sly.encoder_attn.out_proj.weight = ly.encoder_attn.out_proj.weight
sly.fc1.weight = ly.fc1.weight
sly.fc2.weight = ly.fc2.weight
return spch_decoder
def cross_attentive_loss(
self, teacher_states, student_states, teacher_masking, student_masking, eps=1e-6
):
x = teacher_states.transpose(0, 1) # from T X B X D to B X T X D
y = student_states.transpose(0, 1)
if self.cross_attentive_loss_with_norm:
x = x / (x.norm(dim=2, keepdim=True) + eps)
y = y / (y.norm(dim=2, keepdim=True) + eps)
dim = x.size(-1)
# lengths: batch X seqLen
sim_scores_xy = torch.bmm(x, y.transpose(1, 2)) # batch X lenx X leny ]
if y.dtype == torch.float16:
sim_scores_xy = sim_scores_xy.float()
y = y.float()
x = x.float()
if teacher_masking != []:
assert len(teacher_masking) == 1
sim_scores_xy = sim_scores_xy.masked_fill(
teacher_masking[0].unsqueeze(-1), float("-inf")
)
if student_masking != []:
sim_scores_xy = sim_scores_xy.masked_fill(
student_masking[0].unsqueeze(1), float("-inf")
)
# do masking
y_weights = utils.softmax(sim_scores_xy, dim=-1)
if teacher_masking != []:
y_weights = y_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0)
x_reconstruct_from_y = torch.bmm(y_weights, y)
sim_scores_xx = torch.bmm(x, x.transpose(1, 2)) # batch X lenx X lenx ]
x_weights = utils.softmax(sim_scores_xx, dim=-1)
if teacher_masking != []:
x_weights = x_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0)
# no gradient for teacher state
x_reconstruct_from_x = torch.bmm(x_weights, x).detach()
cost = (x_reconstruct_from_x - x_reconstruct_from_y).norm(dim=2)
if teacher_masking != []:
cost = cost.masked_fill(teacher_masking[0], 0)
if not self.cross_attentive_loss_with_norm:
cost = cost / dim
return cost
def forward(
self,
prev_output_tokens,
encoder_out,
incremental_state=None,
has_txt_input=False,
**kwargs
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing. If there are
two or more input during training, they will share the same prev_output_tokens
encoder_out (tuple[Tensor]): output from the encoder, used for
encoder-side attention. It will be tuple if there are more inputs, but a tensor
if only one input
incremental_state ([dict]): dictionary used for storing state during
:ref:`Incremental decoding`. It is only valid for inference, only from single
input
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`. If there are N inputs, batch will be N bigger than a single input
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
assert not isinstance(encoder_out, EncoderOut)
if isinstance(encoder_out, tuple): # training with mulitple input
rst = []
assert len(encoder_out) == 2
for i, eo in enumerate(encoder_out):
assert incremental_state is None
if i == 0:
rst.append(
self.spch_decoder(prev_output_tokens, eo, incremental_state)
)
else:
rst.append(
self.text_decoder(prev_output_tokens, eo, incremental_state)
)
dec_out = torch.cat([r[0] for r in rst], dim=0)
attn_cost = None
if self.compute_cross_attentive_loss:
assert isinstance(encoder_out[0], dict)
if self.cross_attentive_loss_reverse:
attn_cost = self.cross_attentive_loss(
teacher_states=encoder_out[1]["encoder_states"], # text_states
student_states=encoder_out[0]["encoder_states"], # spch_states
teacher_masking=encoder_out[1]["encoder_padding_mask"],
student_masking=encoder_out[0]["encoder_padding_mask"],
)
else:
attn_cost = self.cross_attentive_loss(
teacher_states=encoder_out[0]["encoder_states"], # spch_states
student_states=encoder_out[1]["encoder_states"], # text_states
teacher_masking=encoder_out[0]["encoder_padding_mask"],
student_masking=encoder_out[1]["encoder_padding_mask"],
)
return (dec_out, {"attn_cost": attn_cost})
else: # inference or training with one input
if has_txt_input:
return self.text_decoder(
prev_output_tokens, encoder_out, incremental_state
)
return self.spch_decoder(prev_output_tokens, encoder_out, incremental_state)
# Note:
# dual input transformer:
# encoder: S2TTransformerEncoder for speech + TransformerEncoder for text
# decoder: TransformerDecoder for text
@register_model("dual_input_s2t_transformer")
class DualInputS2TTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.num_updates = 0
def max_positions(self):
return None # it is provided in task
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# encoder 1: S2TTransformerEncoder for speech
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
# standard Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-text-embed-dim",
type=int,
metavar="N",
help="encoder text embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
# non-standard transformer parameters
parser.add_argument(
"--speech-encoder-layers",
type=int,
metavar="N",
help="num speech encoder layers",
)
parser.add_argument(
"--text-encoder-layers",
type=int,
metavar="N",
help="num text encoder layers",
)
parser.add_argument(
"--encoder-shared-layers",
type=int,
metavar="N",
help="num shared encoder layers",
)
parser.add_argument(
"--encoder-shared-layer-level",
type=int,
metavar="N",
default=0,
choices=[0, 1, 2],
help="share layer level 0: all share 1: all share with separate model 2: share weight but not bias and layernorm",
)
parser.add_argument(
"--decoder-shared-layer-level",
default=0,
choices=[0, 1, 2],
type=int,
metavar="N",
help="0: share everything; 1: share everything with different model 2: no share layer_norm and bias",
)
###
parser.add_argument(
"--text-input-cost-ratio",
type=float,
default=1.0,
metavar="V",
help="text input cost ratio relative to speech input cost",
)
parser.add_argument(
"--init-scale",
type=float,
default=1.0,
metavar="V",
help="scale the initial weight by given factor",
)
parser.add_argument(
"--enc-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc1 and enc2 gradient by V",
)
parser.add_argument(
"--enc2-along-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc2 gradient by V if only enc2 is used",
)
parser.add_argument(
"--load-pretrain-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained encoder """,
)
parser.add_argument(
"--load-pretrain-speech-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained speech encoder """,
)
parser.add_argument(
"--load-pretrain-text-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained text encoder """,
)
parser.add_argument(
"--load-pretrain-text-encoder-last",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained text encoder """,
)
parser.add_argument(
"--load-pretrain-decoder",
type=str,
metavar="EXPR",
default="",
help=""" path to the pretrained encoder """,
)
parser.add_argument(
"--add-speech-eos",
action="store_true",
help="add eos token at the end of input feature",
)
parser.add_argument(
"--speech-encoder-adapter-type",
type=str,
metavar="EXPR",
default="None",
choices=["None", "Linear", "MLP"],
help="add speech encoder adapter",
)
@classmethod
def build_encoder(cls, args, task):
spch_encoder = DualInputEncoder.build_spch_encoder(args)
text_encoder = DualInputEncoder.build_text_encoder(
args, task.src_dict, spch_encoder
)
cross_attentive_loss_before_last_layer = (
0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1
)
encoder = DualInputEncoder(
args,
spch_encoder,
text_encoder,
task.src_dict,
cross_attentive_loss_before_last_layer,
)
if args.init_scale != 1.0:
with torch.no_grad():
for param in encoder.parameters():
param.data.mul_(args.init_scale)
if args.load_pretrain_text_encoder != "":
checkpoint_utils.load_pretrained_component_from_model(
text_encoder, args.load_pretrain_text_encoder
)
if args.load_pretrain_speech_encoder != "":
if hasattr(spch_encoder, "encoder"):
checkpoint_utils.load_pretrained_component_from_model(
spch_encoder.encoder, args.load_pretrain_speech_encoder
)
else:
checkpoint_utils.load_pretrained_component_from_model(
spch_encoder, args.load_pretrain_speech_encoder
)
if (
args.load_pretrain_text_encoder_last != ""
): # if share encoder, speech encoder parameters will be used.
# It provides a chance to use pre-trained mt encoder instead
checkpoint_utils.load_pretrained_component_from_model(
text_encoder, args.load_pretrain_text_encoder_last
)
if args.load_pretrain_encoder != "":
checkpoint_utils.load_pretrained_component_from_model(
encoder, args.load_pretrain_encoder
)
return encoder
@classmethod
def build_decoder(cls, args, task):
dec_cfg = {
"decoder_layerdrop": args.decoder_layerdrop,
"share_decoder_input_output_embed": args.share_decoder_input_output_embed,
"decoder_embed_dim": args.decoder_embed_dim,
"max_target_positions": args.max_target_positions,
"dropout": args.dropout,
"encoder_learned_pos": args.encoder_learned_pos,
"decoder_learned_pos": args.decoder_learned_pos,
"layernorm_embedding": args.layernorm_embedding,
"decoder_normalize_before": args.decoder_normalize_before,
"activation_dropout": args.activation_dropout,
"attention_dropout": args.attention_dropout,
"decoder_ffn_embed_dim": args.decoder_ffn_embed_dim,
"decoder_layers": args.decoder_layers,
"decoder_attention_heads": args.decoder_attention_heads,
"decoder_output_dim": args.decoder_embed_dim,
"no_scale_embedding": args.no_scale_embedding,
"adaptive_input": args.adaptive_input,
"quant_noise_pq": args.quant_noise_pq,
"adaptive_softmax_cutoff": args.adaptive_softmax_cutoff,
"tie_adaptive_weights": args.tie_adaptive_weights,
"no_token_positional_embeddings": args.no_token_positional_embeddings,
"encoder": {"embed_dim":args.encoder_embed_dim}
}
dec_cfg = namedtuple("args", dec_cfg.keys())(*dec_cfg.values())
dec_emb = nn.Embedding(
len(task.target_dictionary),
args.decoder_embed_dim,
task.target_dictionary.pad(),
)
compute_cross_attentive_loss = (
True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False
)
cross_attentive_loss_without_norm = getattr(
args, "attentive_cost_without_normalize", False
)
cross_attentive_loss_reverse = (
False # getattr(args, "attentive_cost_reverse", False)
)
text_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb)
spch_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb)
spch_decoder = TransformerMultiInputDecoder.share_spchdecoder(
args, text_decoder, spch_decoder
)
decoder = TransformerMultiInputDecoder(
dictionary=task.target_dictionary,
spch_decoder=spch_decoder,
text_decoder=text_decoder,
compute_cross_attentive_loss=compute_cross_attentive_loss,
cross_attentive_loss_with_norm=True
if not cross_attentive_loss_without_norm
else False,
cross_attentive_loss_reverse=cross_attentive_loss_reverse,
)
if args.init_scale != 1.0:
with torch.no_grad():
for param in decoder.parameters():
param.data.mul_(args.init_scale)
if args.load_pretrain_decoder != "":
try:
checkpoint_utils.load_pretrained_component_from_model(
decoder, args.load_pretrain_decoder
)
except RuntimeError:
checkpoint_utils.load_pretrained_component_from_model(
decoder.text_decoder, args.load_pretrain_decoder
)
if args.decoder_shared_layer_level > 0:
checkpoint_utils.load_pretrained_component_from_model(
decoder.spch_decoder, args.load_pretrain_decoder
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
dualinputs2ttransformer_base(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
use_encoder_outputs=False,
src_txt_tokens=None,
src_txt_lengths=None,
mode="sup_speech",
**kwargs
):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., teacher forcing) to
the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
mode = 'sup_speech' or 'text'
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
if mode == "text":
assert src_txt_tokens is None
src_txt_tokens = src_tokens
src_txt_lengths = src_lengths
src_tokens = None
src_lengths = None
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
src_txt_tokens=src_txt_tokens,
src_txt_lengths=src_txt_lengths,
**kwargs
)
has_txt_input = True if src_txt_tokens is not None else False
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
has_txt_input=has_txt_input,
**kwargs
)
if use_encoder_outputs:
return decoder_out, encoder_out
return decoder_out
@register_model_architecture(
"dual_input_s2t_transformer", "dualinputs2ttransformer_base"
)
def dualinputs2ttransformer_base(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_text_embed_dim = getattr(
args, "encoder_text_embed_dim", args.encoder_embed_dim
)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.encoder_shared_layers = getattr(args, "encoder_shared_layers", 0)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.add_speech_eos = getattr(args, "add_speech_eos", False)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_s")
def dualinputs2ttransformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 7)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 7)
args.decoder_layers = getattr(args, "decoder_layers", 7)
dualinputs2ttransformer_base(args)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_m")
def dualinputs2ttransformer_m(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.dropout = getattr(args, "dropout", 0.15)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
dualinputs2ttransformer_base(args)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_b")
def dualinputs2ttransformer_b(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.dropout = getattr(args, "dropout", 0.15)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
dualinputs2ttransformer_base(args)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_l")
def dualinputs2ttransformer_l(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.2)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
dualinputs2ttransformer_base(args)
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/models/s2t_dualinputtransformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict, namedtuple
import torch.nn as nn
from fairseq import checkpoint_utils, utils
from fairseq.checkpoint_utils import load_checkpoint_to_cpu
from fairseq.file_io import PathManager
from fairseq.models import register_model, register_model_architecture
from fairseq.models.speech_to_text import (
SpeechWavTransformerEncoder,
StackedSpeechWavTransformerEncoder,
TransformerDecoder,
)
from fairseq.models.transformer import TransformerEncoder
from .s2t_dualinputtransformer import (
DualInputEncoder,
DualInputS2TTransformerModel,
TransformerMultiInputDecoder,
)
logger = logging.getLogger(__name__)
@register_model("dual_input_wav_transformer")
class DualInputWavTransformerModel(DualInputS2TTransformerModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
def add_transformer_args(parser):
# We can't use TransformerModel.add_args(parser), since it defines max-source-positions which is duplicated with tasks/speech_to_text.py
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--encoder-learned-pos",
action="store_true",
help="use learned positional embeddings",
)
parser.add_argument(
"--decoder-learned-pos",
action="store_true",
help="use learned positional embeddings",
)
add_transformer_args(parser)
SpeechWavTransformerEncoder.add_args(parser)
parser.add_argument(
"--load-pretrained-speech-text-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained speech text encoder from SpeechTextPreTrainModel """,
)
parser.add_argument(
"--load-pretrained-wav2vec-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained speech text encoder from wav2vec """,
)
parser.add_argument(
"--load-pretrained-speech-text-decoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained speech text decoder from SpeechTextPreTrainModel """,
)
parser.add_argument(
"--load-pretrained-text-decoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained text decoder """,
)
parser.add_argument(
"--load-init-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to load seed encoder model """,
)
parser.add_argument(
"--load-init-decoder",
type=str,
default="",
metavar="EXPR",
help=""" path to load seed decoder model """,
)
parser.add_argument(
"--text-input-cost-ratio",
type=float,
default=1.0,
metavar="V",
help="text input cost ratio relative to speech input cost",
)
parser.add_argument(
"--enc-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc1 and enc2 gradient by V",
)
parser.add_argument(
"--enc2-along-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc2 gradient by V if only enc2 is used",
)
parser.add_argument(
"--no-strict-check-pretrain-model",
action="store_true",
help="Don't apply strict model check for the pretrained model",
)
parser.add_argument(
"--stacked-encoder",
action="store_true",
help="stack speech and text encoders",
)
@classmethod
def update_transformer_encoder_cfg(cls, args, update_dict):
cfg = dict(args._get_kwargs())
for fkey in update_dict.keys():
cfg[fkey] = update_dict[fkey]
cfg.pop("_name", None) # remove keys start with _
model_args = namedtuple("args", cfg.keys())(*cfg.values())
return model_args
@classmethod
def build_text_encoder(cls, args, src_dictionary):
enc_emb = nn.Embedding(
len(src_dictionary), args.encoder_embed_dim, src_dictionary.pad()
)
model_args = cls.update_transformer_encoder_cfg(
args,
{
"encoder_layers": args.text_encoder_layers,
"max_source_positions": args.max_positions_text,
},
)
text_encoder = TransformerEncoder(model_args, src_dictionary, enc_emb)
return text_encoder
@classmethod
def build_speech_encoder(cls, args):
model_args = cls.update_transformer_encoder_cfg(
args, {"encoder_layers": args.speech_encoder_layers}
)
speech_encoder = SpeechWavTransformerEncoder(model_args)
return speech_encoder
@classmethod
def check_args(cls, condition, is_strict, msg):
if condition:
return
if is_strict:
raise ValueError(msg)
logger.warn(msg)
@classmethod
def build_encoder(cls, args, task):
# text_encoder = cls.build_text_encoder(args, task.source_dictionary )
text_encoder = cls.build_text_encoder(args, task.src_dict)
speech_encoder = cls.build_speech_encoder(args)
if args.load_pretrained_wav2vec_encoder:
component_pairs = (
("feature_extractor", speech_encoder.subsample),
("post_extract_proj", speech_encoder.feat_proj),
("layer_norm", speech_encoder.feat_layer_norm),
("encoder.pos_conv", speech_encoder.embed_positions),
("encoder.layers", speech_encoder.layers),
("encoder.layer_norm", speech_encoder.layer_norm),
("mask_emb", speech_encoder.mask_emb),
)
state = cls.load_pretrained_speech_text_components(
args.load_pretrained_wav2vec_encoder, component_pairs
)
cls.check_args(
args.encoder_normalize_before
== state["cfg"]["model"]["layer_norm_first"],
not args.no_strict_check_pretrain_model,
f"encoder_normalize_before {args.encoder_normalize_before} doesn't match with the pretrained model",
)
cls.check_args(
args.activation_fn == state["cfg"]["model"]["activation_fn"],
not args.no_strict_check_pretrain_model,
f"activation_fn {args.activation_fn} doesn't match with the pretrained model",
)
if getattr(args, "stacked_encoder", False):
if args.encoder_shared_text_layers_from_begin > 0:
raise ValueError(
"We can not stack encoders and share encoders at the same time!"
)
speech_encoder = StackedSpeechWavTransformerEncoder(
speech_encoder, text_encoder.layers, text_encoder.layer_norm
)
else:
cls.share_speech_text_encoder(
speech_encoder, text_encoder, args.encoder_shared_text_layers_from_begin
)
cross_attentive_loss_before_last_layer = (
0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1
)
encoder = DualInputEncoder(
args,
speech_encoder,
text_encoder,
task.src_dict,
cross_attentive_loss_before_last_layer,
)
if args.load_pretrained_speech_text_encoder:
component_pairs = (
("encoder.sup_s2s_speech_encoder", encoder.spch_encoder),
("encoder.text_encoder", encoder.text_encoder),
)
cls.load_pretrained_speech_text_components(
args.load_pretrained_speech_text_encoder, component_pairs
)
if getattr(args, "load_init_encoder", "") != "":
checkpoint_utils.load_pretrained_component_from_model(
encoder, args.load_init_encoder
)
return encoder
@classmethod
def build_text_decoder(cls, args, tgt_dictionary, dec_emb_share=None):
dec_emb = (
nn.Embedding(
len(tgt_dictionary), args.decoder_embed_dim, tgt_dictionary.pad()
)
if dec_emb_share is None
else dec_emb_share
)
text_decoder = TransformerDecoder(args, tgt_dictionary, dec_emb)
return text_decoder
@classmethod
def build_decoder(cls, args, task):
text_decoder = cls.build_text_decoder(args, task.target_dictionary)
compute_cross_attentive_loss = (
True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False
)
cross_attentive_loss_without_norm = getattr(
args, "attentive_cost_without_normalize", False
)
cross_attentive_loss_reverse = (
False # getattr(args, "attentive_cost_reverse", False)
)
if getattr(args, "load_pretrained_text_decoder", "") != "":
checkpoint_utils.load_pretrained_component_from_model(
text_decoder, args.load_pretrained_text_decoder
)
if args.load_pretrained_speech_text_decoder:
component_pairs = (("decoder.text_decoder", text_decoder),)
cls.load_pretrained_speech_text_components(
args.load_pretrained_speech_text_decoder, component_pairs
)
decoder = TransformerMultiInputDecoder(
dictionary=task.target_dictionary,
spch_decoder=text_decoder,
text_decoder=text_decoder,
compute_cross_attentive_loss=compute_cross_attentive_loss,
cross_attentive_loss_with_norm=True
if not cross_attentive_loss_without_norm
else False,
cross_attentive_loss_reverse=cross_attentive_loss_reverse,
)
if getattr(args, "load_init_decoder", "") != "":
checkpoint_utils.load_pretrained_component_from_model(
decoder, args.load_init_decoder
)
return decoder
@classmethod
def load_pretrained_speech_text_components(cls, checkpoint, component_pairs):
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
for component_type, component in component_pairs:
if isinstance(component, nn.parameter.Parameter):
component.data.copy_(state["model"][component_type])
else:
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
component_subkey = key[len(component_type) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return state
@classmethod
def share_speech_text_encoder(
cls, speech_encoder, text_encoder, shared_layers_from_begin
):
if shared_layers_from_begin > 0:
num_text_encoder_layers = len(text_encoder.layers)
assert len(speech_encoder.layers) >= shared_layers_from_begin
assert num_text_encoder_layers >= shared_layers_from_begin
assert len(speech_encoder.layers) >= num_text_encoder_layers
for i, ly in enumerate(
speech_encoder.layers[
-num_text_encoder_layers : -num_text_encoder_layers
+ shared_layers_from_begin
]
):
assert isinstance(text_encoder.layers[i], type(ly))
text_encoder.layers[i] = ly
@register_model_architecture(
"dual_input_wav_transformer", "dualinputs2twavtransformer_base"
)
def dualinputs2twavtransformer_base(args):
# speech masking
args.dropout_input = getattr(args, "dropout_input", 0)
args.dropout_features = getattr(args, "dropout_features", 0)
args.speech_mask_length = getattr(args, "speech_mask_length", 10)
args.speech_mask_prob = getattr(args, "speech_mask_prob", 0.65)
args.speech_mask_selection = getattr(args, "speech_mask_selection", "static")
args.speech_mask_other = getattr(args, "speech_mask_other", 0)
args.speech_mask_min_space = getattr(args, "speech_mask_min_space", 1)
args.speech_no_mask_overlap = getattr(args, "speech_no_mask_overlap", False)
args.speech_conv_bias = getattr(args, "speech_conv_bias", False)
args.speech_extractor_mode = getattr(args, "speech_extractor_mode", "default")
args.no_strict_check_pretrain_model = getattr(
args, "no_strict_check_pretrain_model", False
)
args.speech_mask_channel_length = getattr(args, "speech_mask_channel_length", 10)
args.speech_mask_channel_prob = getattr(args, "speech_mask_channel_prob", 0.0)
args.speech_mask_channel_selection = getattr(
args, "speech_mask_channel_selection", "static"
)
args.speech_mask_channel_other = getattr(args, "speech_mask_channel_other", 0)
args.speech_mask_channel_min_space = getattr(
args, "speech_mask_channel_min_space", 1
)
args.speech_no_mask_channel_overlap = getattr(
args, "speech_no_mask_channel_overlap", False
)
args.no_scale_feature = getattr(args, "", False)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 0.0) # 0.1
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(
args, "encoder_ffn_embed_dim", args.encoder_embed_dim * 4
)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.1)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_attention_heads = getattr(
args, "decoder_attention_heads", args.encoder_attention_heads
)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu") # gelu?
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.encoder_shared_text_layers_from_begin = getattr(
args, "encoder_shared_text_layers_from_begin", 6
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
@register_model_architecture(
"dual_input_wav_transformer", "dualinputs2twavtransformer_base_stack"
)
def dualinputs2twavtransformer_base_stack(args):
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 6)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.encoder_shared_text_layers_from_begin = getattr(
args, "encoder_shared_text_layers_from_begin", 0
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.stacked_encoder = getattr(args, "stacked_encoder", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
dualinputs2twavtransformer_base(args)
@register_model_architecture(
"dual_input_wav_transformer", "dualinputs2twavtransformer_large"
)
def dualinputs2twavtransformer_large(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 24)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 12)
args.encoder_shared_text_layers_from_begin = getattr(
args, "encoder_shared_text_layers_from_begin", 12
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
dualinputs2twavtransformer_base(args)
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/models/s2t_dualinputwavtransformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
register_model,
register_model_architecture,
FairseqEncoder,
)
from fairseq.models.speech_to_text import Wav2VecEncoderWithAdaptor
from fairseq.models.speech_to_text.xm_transformer import (
set_default_adaptor_args,
set_default_w2v_encoder_args,
need_finetuning
)
from fairseq.models.transformer import TransformerEncoder, TransformerDecoder
from fairseq.models.wav2vec import TransformerSentenceEncoderLayer
from fairseq.utils import safe_hasattr
from .s2t_dualinputtransformer import (
DualInputS2TTransformerModel,
TransformerMultiInputDecoder,
DualInputEncoder,
)
class TransformerSentenceEncoderLayerStd(TransformerSentenceEncoderLayer):
def __init__(self, sent_enc_layer):
super(TransformerSentenceEncoderLayer, self).__init__()
self.embedding_dim = sent_enc_layer.embedding_dim
self.dropout = sent_enc_layer.dropout
self.activation_dropout = sent_enc_layer.activation_dropout
# Initialize blocks
self.activation_fn = sent_enc_layer.activation_fn
self.self_attn = sent_enc_layer.self_attn
self.dropout1 = sent_enc_layer.dropout1
self.dropout2 = sent_enc_layer.dropout2
self.dropout3 = sent_enc_layer.dropout3
self.layer_norm_first = sent_enc_layer.layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = sent_enc_layer.self_attn_layer_norm
self.fc1 = sent_enc_layer.fc1
self.fc2 = sent_enc_layer.fc2
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = sent_enc_layer.final_layer_norm
def forward(
self,
x,
self_attn_mask=None,
self_attn_padding_mask=None,
need_weights=None,
att_args=None,
):
x, attn = super().forward(
x, self_attn_mask, self_attn_padding_mask, need_weights, att_args
)
return x
# TODO retire SharedEncoder
class SharedEncoder(FairseqEncoder):
def __init__(self, wav2vec_enc, mbart_enc, adaptor, shared_layers):
super().__init__(None)
self.w2v_encoder = wav2vec_enc
self.shared_layers = self.w2v_encoder.w2v_model.encoder.layers[-shared_layers:]
self.w2v_encoder.w2v_model.encoder.layers = (
self.w2v_encoder.w2v_model.encoder.layers[:-shared_layers]
)
self.adaptor = adaptor
if self.shared_layers[-1].layer_norm_first:
self.final_layer_norm = mbart_enc.layer_norm
else:
mbart_enc.layer_norm = None
self.final_layer_norm = None
shared_layer_from = len(mbart_enc.layers) - shared_layers
if shared_layer_from < 0:
shared_layer_from = 0
for layer_id, layer in enumerate(self.shared_layers):
mbart_enc.layers[
shared_layer_from + layer_id
] = TransformerSentenceEncoderLayerStd(layer)
def forward(self, src_tokens, src_lengths=None, **kwargs):
padding_mask = lengths_to_padding_mask(src_lengths)
if not padding_mask.any():
padding_mask = None
out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True)
x = out["encoder_out"]
enc_padding_mask = None
if out["encoder_padding_mask"] is not None:
enc_padding_mask = out["encoder_padding_mask"].transpose(
0, 1
) # T X B --> B X T
x, enc_padding_mask = self.adaptor(x, enc_padding_mask)
for layer in self.shared_layers:
x, _ = layer(x, enc_padding_mask)
if self.final_layer_norm is not None:
x = self.final_layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [enc_padding_mask]
if enc_padding_mask is not None
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
class StackedWav2VecEncoderWithAdaptor(FairseqEncoder):
def __init__(
self,
wav2vec_enc,
mbart_enc_layers,
mbart_layer_norm,
adaptor,
drop_w2v_layers=0,
):
super().__init__(None)
self.w2v_encoder = wav2vec_enc
self.adaptor = adaptor
self.mbart_encoder_layers = mbart_enc_layers
self.final_layer_norm = mbart_layer_norm
if drop_w2v_layers > 0:
self.w2v_encoder.w2v_model.encoder.layers = (
self.w2v_encoder.w2v_model.encoder.layers[:-drop_w2v_layers]
)
def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs):
padding_mask = lengths_to_padding_mask(src_lengths)
if not padding_mask.any():
padding_mask = None
out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True)
x = out["encoder_out"]
enc_padding_mask = None
if out["padding_mask"] is not None:
enc_padding_mask = out["padding_mask"] # B X T
x, enc_padding_mask = self.adaptor(x, enc_padding_mask)
encoder_states = []
for layer in self.mbart_encoder_layers:
x = layer(x, enc_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.final_layer_norm is not None:
x = self.final_layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [enc_padding_mask]
if enc_padding_mask is not None
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
# Note:
# dual input transformer:
# encoder: wav2vec for speech + mbart encoder for text
# decoder: mbart decoder for text
@register_model("dual_input_xm_transformer")
class DualInputXMTransformerModel(DualInputS2TTransformerModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# wav2vec encoder
Wav2VecEncoderWithAdaptor.add_args(parser)
# add_decoder_args(parser)
# mbart Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--mbart-dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--mbart-attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--mbart-activation-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-mbart-from",
type=str,
metavar="STR",
help="model to take text encoder decoder weights from (for initialization)",
)
# parser.add_argument("--finetune-w2v-params", type=str, metavar="STR",
# help="comma-separated param strings to finetune.")
parser.add_argument(
"--finetune-mbart-decoder-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
parser.add_argument(
"--finetune-mbart-encoder-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
parser.add_argument(
"--skip-encoder-projection",
action="store_true",
help="skip the projection layer in encoder",
)
parser.add_argument(
"--enc-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc1 and enc2 gradient by V",
)
parser.add_argument(
"--enc2-along-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc2 gradient by V if only enc2 is used",
)
parser.add_argument(
"--text-input-cost-ratio",
type=float,
default=1.0,
metavar="V",
help="text input cost ratio relative to speech input cost",
)
parser.add_argument(
"--stack-w2v-mbart-encoder",
action="store_true",
help="stack w2v and mbart encoder",
)
parser.add_argument(
"--stack-w2v-mbart-nonorm-encoder",
action="store_true",
help="stack w2v and mbart encoder",
)
parser.add_argument(
"--no-final-norm-decoder", action="store_true", help="no layer norm"
)
parser.add_argument(
"--drop-w2v-layers",
type=int,
default=0,
metavar="N",
help="drop w2v encoder layers",
)
parser.add_argument(
"--share-w2v-text-encoder",
action="store_true",
help="share w2v encoder layers with text encoder",
)
parser.add_argument(
"--shared-w2v-layers",
type=int,
default=0,
metavar="N",
help="shared encoder layers from w2v encoder",
)
@classmethod
def build_encoder(cls, args, task):
_args = copy.deepcopy(args)
_args.dropout = args.mbart_dropout
_args.attention_dropout = args.mbart_attention_dropout
_args.activation_dropout = args.mbart_activation_dropout
_args.max_source_positions = 1024
enc_emb = nn.Embedding(
len(task.src_dict), _args.encoder_embed_dim, task.src_dict.pad()
)
text_encoder = TransformerEncoder(_args, task.src_dict, enc_emb)
spch_encoder = Wav2VecEncoderWithAdaptor(args)
if getattr(args, "load_pretrained_mbart_from", None):
text_encoder = checkpoint_utils.load_pretrained_component_from_model(
component=text_encoder, checkpoint=args.load_pretrained_mbart_from
)
if getattr(args, "stack_w2v_mbart_encoder", False):
assert getattr(args, "share_w2v_text_encoder", False) is False
spch_encoder = StackedWav2VecEncoderWithAdaptor(
spch_encoder.w2v_encoder,
text_encoder.layers,
text_encoder.layer_norm,
spch_encoder.adaptor,
args.drop_w2v_layers,
)
elif getattr(args, "stack_w2v_mbart_nonorm_encoder", False):
text_encoder.layer_norm = None
spch_encoder = StackedWav2VecEncoderWithAdaptor(
spch_encoder.w2v_encoder,
text_encoder.layers,
text_encoder.layer_norm,
spch_encoder.adaptor,
args.drop_w2v_layers,
)
elif getattr(args, "share_w2v_text_encoder", False):
spch_encoder = SharedEncoder(
spch_encoder.w2v_encoder,
text_encoder,
spch_encoder.adaptor,
args.shared_w2v_layers,
)
for k, p in spch_encoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_w2v_params"
) and need_finetuning(args.finetune_w2v_params, k):
p.requires_grad = True
else:
p.requires_grad = False
for k, p in text_encoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_mbart_encoder_params"
) and need_finetuning(
args.finetune_mbart_encoder_params, k
):
p.requires_grad = True
else:
p.requires_grad = False
cross_attentive_loss_before_last_layer = (
0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1
)
encoder = DualInputEncoder(
args,
spch_encoder,
text_encoder,
task.src_dict,
cross_attentive_loss_before_last_layer,
)
return encoder
@classmethod
def build_decoder(cls, args, task):
_args = copy.deepcopy(args)
_args.dropout = args.mbart_dropout
_args.attention_dropout = args.mbart_attention_dropout
_args.activation_dropout = args.mbart_activation_dropout
_args.max_target_positions = 1024
dec_emb = nn.Embedding(
len(task.tgt_dict), _args.encoder_embed_dim, task.tgt_dict.pad()
)
decoder = TransformerDecoder(_args, task.tgt_dict, dec_emb)
if getattr(args, "load_pretrained_mbart_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_mbart_from
)
if getattr(args, "no_final_norm_decoder", False):
decoder.layer_norm = None
for k, p in decoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_mbart_decoder_params"
) and need_finetuning(
args.finetune_mbart_decoder_params, k
):
p.requires_grad = True
else:
p.requires_grad = False
compute_cross_attentive_loss = (
True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False
)
cross_attentive_loss_without_norm = getattr(
args, "attentive_cost_without_normalize", False
)
cross_attentive_loss_reverse = (
False # getattr(args, "attentive_cost_reverse", False)
)
decoder = TransformerMultiInputDecoder(
dictionary=task.target_dictionary,
spch_decoder=decoder,
text_decoder=decoder,
compute_cross_attentive_loss=compute_cross_attentive_loss,
cross_attentive_loss_with_norm=True
if not cross_attentive_loss_without_norm
else False,
cross_attentive_loss_reverse=cross_attentive_loss_reverse,
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
dualinputxmtransformer_base(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
@register_model_architecture("dual_input_xm_transformer", "dualinputxmtransformer_base")
def dualinputxmtransformer_base(args):
# wav2vec encoder
set_default_w2v_encoder_args(args)
set_default_adaptor_args(args)
# mbart model
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(
args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim
)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * 1024)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.mbart_attention_dropout = getattr(args, "mbart_attention_dropout", 0.0)
args.mbart_activation_dropout = getattr(args, "mbart_activation_dropout", 0.0)
args.mbart_dropout = getattr(args, "mbart_dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/models/s2t_dualinputxmtransformer.py |
#!/usr/bin/env python3
import logging
from collections import OrderedDict, namedtuple
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from fairseq import checkpoint_utils, utils
from fairseq.file_io import PathManager
from fairseq.models import (
FairseqDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text import (
MultiInputDecoder,
MultiModalityEncoder,
SpeechWavTransformerEncoder,
StackedSpeechWavTransformerEncoder,
)
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
)
logger = logging.getLogger(__name__)
class SpeechTextPreTrainEncoder(MultiModalityEncoder):
def __init__(
self,
dictionary,
sup_speech_encoder,
sup_s2s_speech_encoder,
unsup_speech_encoder,
text_encoder,
):
super().__init__(dictionary)
self.sup_speech_encoder = sup_speech_encoder
self.sup_s2s_speech_encoder = sup_s2s_speech_encoder
self.unsup_speech_encoder = unsup_speech_encoder
self.text_encoder = text_encoder
@classmethod
def update_transformer_encoder_cfg(cls, args, update_dict):
cfg = dict(args._get_kwargs())
for fkey in update_dict.keys():
cfg[fkey] = update_dict[fkey]
cfg.pop("_name", None) # remove keys start with _
model_args = namedtuple("args", cfg.keys())(*cfg.values())
return model_args
@classmethod
def build_text_encoder(cls, args, src_dictionary):
enc_emb = nn.Embedding(
len(src_dictionary), args.encoder_embed_dim, src_dictionary.pad()
)
model_args = cls.update_transformer_encoder_cfg(
args, {"encoder_layers": args.text_encoder_layers}
)
text_encoder = TransformerEncoder(model_args, src_dictionary, enc_emb)
return text_encoder
@classmethod
def build_speech_encoder(cls, args):
model_args = cls.update_transformer_encoder_cfg(
args,
{
"encoder_layers": args.speech_encoder_layers,
"speech_mask_prob": args.speech_sup_mask_prob,
},
)
speech_encoder = SpeechWavTransformerEncoder(model_args)
return speech_encoder
@classmethod
def share_layers(cls, src_layers, tgt_layers): # share layer but not dropout
# share parameters in src_layers with tgt_layers
assert len(src_layers) == len(tgt_layers)
for i, ly in enumerate(src_layers):
tly = tgt_layers[i]
tly.self_attn = ly.self_attn
tly.self_attn_layer_norm = ly.self_attn_layer_norm
tly.activation_fn = ly.activation_fn
tly.normalize_before = ly.normalize_before
tly.fc1 = ly.fc1
tly.fc2 = ly.fc2
tly.final_layer_norm = ly.final_layer_norm
if hasattr(tly, "encoder_attn"):
tly.encoder_attn = ly.encoder_attn
tly.encoder_attn_layer_norm = ly.encoder_attn_layer_norm
return tgt_layers
@classmethod
def build_unsup_speech_encoder(cls, args, sup_speech_encoder):
model_args = cls.update_transformer_encoder_cfg(
args,
{
"encoder_layers": args.speech_encoder_layers,
"speech_mask_prob": args.speech_unsup_mask_prob,
"encoder_layerdrop": 0.0,
"decoder_layerdrop": 0.0,
"dropout": args.speech_unsup_dropout,
"activation_dropout": args.speech_unsup_dropout,
"attention_dropout": 0.0,
"dropout_features": args.speech_unsup_feature_dropout,
"dropout_input": args.speech_unsup_feature_dropout,
},
)
unsup_speech_encoder = SpeechWavTransformerEncoder(model_args, alway_mask=True)
unsup_speech_encoder.layer_norm = sup_speech_encoder.layer_norm
unsup_speech_encoder.layers = cls.share_layers(
sup_speech_encoder.layers, unsup_speech_encoder.layers
)
unsup_speech_encoder.mask_emb = sup_speech_encoder.mask_emb
unsup_speech_encoder.embed_positions = sup_speech_encoder.embed_positions
unsup_speech_encoder.feat_layer_norm = sup_speech_encoder.feat_layer_norm
unsup_speech_encoder.feat_proj = sup_speech_encoder.feat_proj
unsup_speech_encoder.subsample = sup_speech_encoder.subsample
return unsup_speech_encoder
@classmethod
def build_encoder(cls, args, dictionary):
text_encoder = cls.build_text_encoder(args, dictionary)
if getattr(args, "load_pretrained_mbart_encoder_from", None):
text_encoder = checkpoint_utils.load_pretrained_component_from_model(
component=text_encoder,
checkpoint=args.load_pretrained_mbart_encoder_from,
)
speech_encoder = cls.build_speech_encoder(args)
if getattr(args, "load_pretrained_feature_extractor_from", None):
def load_feature_extractor(component, checkpoint):
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(checkpoint)
component_state_dict = OrderedDict()
component_prefix = "feature_extractor"
for key in state["model"].keys():
if key.startswith(component_prefix):
component_subkey = key[len(component_prefix) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return component
speech_encoder.subsample = load_feature_extractor(
speech_encoder.subsample, args.load_pretrained_feature_extractor_from
)
speech_s2s_encoder = speech_encoder
unsup_speech_encoder = cls.build_unsup_speech_encoder(args, speech_encoder)
if getattr(args, "stacked_encoder", "none") != "none":
if args.encoder_shared_text_layers_from_begin > 0:
raise ValueError(
"We can not stack encoders and share encoders at the same time!"
)
speech_s2s_encoder = StackedSpeechWavTransformerEncoder(
speech_encoder, text_encoder.layers, text_encoder.layer_norm
)
if args.stacked_encoder == "all":
speech_encoder = speech_s2s_encoder
unsup_speech_encoder = StackedSpeechWavTransformerEncoder(
unsup_speech_encoder, text_encoder.layers, text_encoder.layer_norm
)
else:
cls.share_speech_text_encoder(
speech_encoder, text_encoder, args.encoder_shared_text_layers_from_begin
)
return SpeechTextPreTrainEncoder(
dictionary,
speech_encoder,
speech_s2s_encoder,
unsup_speech_encoder,
text_encoder,
)
@classmethod
def share_speech_text_encoder(
cls, speech_encoder, text_encoder, shared_layers_from_begin
):
if shared_layers_from_begin > 0:
num_text_encoder_layers = len(text_encoder.layers)
assert len(speech_encoder.layers) >= shared_layers_from_begin
assert num_text_encoder_layers >= shared_layers_from_begin
assert len(speech_encoder.layers) >= num_text_encoder_layers
for i, ly in enumerate(
speech_encoder.layers[
-num_text_encoder_layers : -num_text_encoder_layers
+ shared_layers_from_begin
]
):
assert isinstance(text_encoder.layers[i], type(ly))
text_encoder.layers[i] = ly
def select_encoder(self, mode, **kwargs):
if mode in ("speech", "sup_speech_ctc", "sup_speech_ali", "sup_speech_s2s"):
kwargs["features_only"] = True
if mode == "sup_speech_s2s":
return self.sup_s2s_speech_encoder, kwargs
return self.sup_speech_encoder, kwargs
elif mode == "unsup_speech":
kwargs["features_only"] = False
return self.unsup_speech_encoder, kwargs
elif mode in ("text", "bitext"):
return self.text_encoder, kwargs
else:
raise NotImplementedError(f"{mode} is not supported")
return None, kwargs
def forward(self, src_tokens, src_lengths=None, mode="", alignment=None, **kwargs):
return super().forward(src_tokens, src_lengths, mode, **kwargs)
# SpeechDummyDecoder works as an extension of encoder, so we could fit encoder only training into seq2seq training
class SpeechDummyDecoder(FairseqDecoder):
def __init__(
self,
dictionary,
output_embedding,
no_emb_update_unsup=False,
use_output_proj=False,
):
super().__init__(dictionary)
self.output_embedding = output_embedding
num_embedding, num_dim = self.output_embedding.weight.size()
self.out_proj = (
None if use_output_proj is False else nn.Linear(num_dim, num_dim)
)
self.no_emb_update_unsup = no_emb_update_unsup
def extend_alignment(self, alignment, src_lengths, prev_output_tokens):
# alignment: B X N
# src_lengths: B X T
# prev_output_tokens: B X (N + 1)
tgt_tokens = prev_output_tokens[
:, 1:
] # remove the leading start of sentence token
ext_alignment = (
torch.ones(len(src_lengths), src_lengths.max(), device=src_lengths.device)
.long()
.fill_(self.dictionary.pad())
)
for bs in range(src_lengths.size(0)):
tgt_length = tgt_tokens[bs].ne(self.dictionary.pad()).sum().item()
assert tgt_length == sum(alignment[bs].ne(1)) + 1
src_st = 0
for i in range(tgt_length):
tok = tgt_tokens[bs][i]
src_ed = (alignment[bs][i] * src_lengths[bs]).int().item()
ext_alignment[bs][src_st:src_ed].fill_(tok)
src_st = src_ed
return ext_alignment
def forward(
self,
prev_output_tokens,
encoder_out,
incremental_state=None,
mode="speech",
alignment=None,
**kwargs,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
sup_speech_ctc:
dictionary{"logits": logits, "padding_mask": padding_mask}
sup_speech_ali and unsup_speech:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
emb_weight = self.output_embedding.weight
if (
mode == "unsup_speech" and self.no_emb_update_unsup
): # no gradient for embedding here
emb_weight = emb_weight.detach()
enc_out = (
encoder_out["encoder_out"][0]
if self.out_proj is None
else self.out_proj(encoder_out["encoder_out"][0])
)
logits = F.linear(enc_out, emb_weight, None).transpose(0, 1) # B X T X C
others = None
if mode in (
"speech",
"sup_speech_ctc",
): # speech data with label, do forcealignment
if len(encoder_out["encoder_padding_mask"]) > 0:
padding_mask = encoder_out["encoder_padding_mask"][0]
logits = logits.masked_fill(padding_mask, float("-inf"))
else:
seq_len, bsz = encoder_out["encoder_out"][0].size()[:2]
padding_mask = torch.zeros(
bsz, seq_len, device=encoder_out["encoder_out"][0].device
).bool()
return {"x": logits, "padding_mask": padding_mask}
elif mode == "sup_speech_ali":
src_lengths = None
if len(encoder_out["encoder_padding_mask"]) > 0:
src_lengths = (1 - encoder_out["encoder_padding_mask"][0].long()).sum(
-1
)
else:
seq_len, bsz = encoder_out["encoder_out"][0].size()[:2]
src_lengths = (
torch.ones(bsz, device=encoder_out["encoder_out"][0].device).long()
* seq_len
)
assert alignment is not None
alignment = self.extend_alignment(
alignment, src_lengths, prev_output_tokens
)
others = {"pseudo_target_tokens": alignment}
elif mode == "unsup_speech":
enc_out_ori = (
encoder_out["encoder_unmasked_out"][0]
if self.out_proj is None
else self.out_proj(encoder_out["encoder_unmasked_out"][0])
)
logits_ori = F.linear(enc_out_ori, emb_weight, None).transpose(0, 1)
if len(encoder_out["encoder_padding_mask"]) > 0:
encoder_padding_mask = encoder_out["encoder_padding_mask"][0]
logits_ori = logits_ori.masked_fill(encoder_padding_mask, float("-inf"))
pseudo_labels = utils.log_softmax(logits_ori, dim=-1)
others = {
"pseudo_target_logprobs": pseudo_labels,
"padding_mask": encoder_out["encoder_padding_mask"], # B X T
"mask_indices": encoder_out[
"mask_indices"
], # True for masked frames B X T
}
return logits, others
def get_normalized_probs(
self,
net_output: Dict[str, Tensor],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
return self.get_normalized_probs_scriptable(
(net_output["x"], None), log_probs, sample
)
class SpeechTextPreTrainDecoder(MultiInputDecoder):
def __init__(self, dictionary, speech_decoder, text_decoder):
super().__init__(dictionary)
self.speech_decoder = speech_decoder
self.text_decoder = text_decoder
def select_decoder(self, mode, **kwargs):
if mode == "unsup_speech":
kwargs["mode"] = mode
return self.speech_decoder, kwargs
if mode in ("text", "bitext"):
return self.text_decoder, kwargs
if mode in ("speech", "sup_speech_ctc", "sup_speech_ali"):
kwargs["mode"] = mode
return self.speech_decoder, kwargs
if mode in ("speech", "sup_speech_s2s"):
if "alignment" in kwargs:
del kwargs["alignment"]
return self.text_decoder, kwargs
raise NotImplementedError(f"{mode} is not supported")
return None, kwargs
def get_normalized_probs(
self,
net_output,
log_probs,
sample=None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
if isinstance(net_output, dict):
return self.speech_decoder.get_normalized_probs(
net_output, log_probs, sample
)
return self.text_decoder.get_normalized_probs(net_output, log_probs, sample)
@classmethod
def build_text_decoder(cls, args, tgt_dictionary, dec_emb_share=None):
dec_emb = (
nn.Embedding(
len(tgt_dictionary), args.decoder_embed_dim, tgt_dictionary.pad()
)
if dec_emb_share is None
else dec_emb_share
)
text_decoder = TransformerDecoder(args, tgt_dictionary, dec_emb)
return text_decoder
@classmethod
def build_dummy_speech_decoder(cls, args, dictionary, dec_emb_share=None):
dec_emb = (
nn.Embedding(len(dictionary), args.decoder_embed_dim, dictionary.pad())
if dec_emb_share is None
else dec_emb_share
)
speech_decoder = SpeechDummyDecoder(
dictionary,
dec_emb,
no_emb_update_unsup=getattr(args, "no_emb_update_unsup", False),
use_output_proj=getattr(args, "use_decoder_output_proj", False),
)
return speech_decoder
@classmethod
def build_decoder(
cls, args, text_dictionary, speech_dictionary, speech_output_embedding
):
text_decoder = cls.build_text_decoder(args, text_dictionary)
speech_decoder = cls.build_dummy_speech_decoder(
args, speech_dictionary, speech_output_embedding
)
if getattr(args, "load_pretrained_mbart_decoder_from", None):
text_decoder = checkpoint_utils.load_pretrained_component_from_model(
component=text_decoder,
checkpoint=args.load_pretrained_mbart_decoder_from,
)
return SpeechTextPreTrainDecoder(text_dictionary, speech_decoder, text_decoder)
@register_model("speech_text_pretrain_bart")
class SpeechTextPreTrainModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.num_updates = 0
def forward(
self, src_tokens, src_lengths, prev_output_tokens, src_lang_ids=None, **kwargs
):
if src_lang_ids is not None:
encoder_out = self.encoder(
src_tokens, src_lengths=src_lengths, src_lang_ids=src_lang_ids, **kwargs
)
else:
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
def max_positions(self):
return None # it is provided in task
def get_targets(self, sample, net_output):
mode = sample["net_input"]["mode"]
if mode == "unsup_speech":
return {"target_logprobs": net_output[1]["pseudo_target_logprobs"]}
if mode == "sup_speech_ali":
return net_output[1]["pseudo_target_tokens"]
return sample["target"]
def get_normalized_probs(
self,
net_output,
log_probs,
sample=None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
@staticmethod
def add_args(parser):
TransformerModel.add_args(parser)
SpeechWavTransformerEncoder.add_args(parser)
parser.add_argument(
"--speech-sup-mask-prob",
type=float,
help="probability of replacing a token with mask (sup-speech)",
)
parser.add_argument(
"--speech-unsup-mask-prob",
type=float,
help="probability of replacing a token with mask (unsup-speech)",
)
parser.add_argument(
"--load-pretrained-mbart-encoder-from",
type=str,
metavar="STR",
help="model to take text encoder weights from (for initialization)",
)
parser.add_argument(
"--load-pretrained-mbart-decoder-from",
type=str,
metavar="STR",
help="model to take text decoder weights from (for initialization)",
)
parser.add_argument(
"--load-pretrained-feature-extractor-from",
type=str,
metavar="STR",
help="model to take feature extractor weights from (for initialization)",
)
parser.add_argument(
"--speech-unsup-dropout",
type=float,
default=0,
help="dropout for unsupervised speech encoder",
)
parser.add_argument(
"--speech-unsup-feature-dropout",
type=float,
default=0,
help="dropout for unsupervised speech feature encoder",
)
parser.add_argument(
"--encoder-shared-text-layers-from-begin",
type=int,
help="number of text encoder layers shared with speech encoder (from first layer)",
)
parser.add_argument(
"--stacked-encoder",
default="none",
choices=["none", "s2s", "all"],
help="stack speech and text encoders",
)
parser.add_argument("--use-decoder-output-proj", action="store_true")
@classmethod
def build_model(cls, args, task):
encoder = SpeechTextPreTrainEncoder.build_encoder(args, task.src_dict)
decoder = SpeechTextPreTrainDecoder.build_decoder(
args, task.tgt_dict, task.src_dict, encoder.text_encoder.embed_tokens
)
model = SpeechTextPreTrainModel(encoder, decoder)
return model
def upgrade_state_dict(self, state_dict):
"""Upgrade old state dicts to work with newer code."""
if "decoder.speech_decoder.output_projection.weight" in state_dict:
del state_dict["decoder.speech_decoder.output_projection.weight"]
self.upgrade_state_dict_named(state_dict, "")
@register_model_architecture(
"speech_text_pretrain_bart", "speech_text_pretrain_bart_base"
)
def speech_text_pretrain_bart_base(args):
# speech masking
args.dropout_input = getattr(args, "dropout_input", 0)
args.dropout_features = getattr(args, "dropout_features", 0)
args.speech_mask_length = getattr(args, "speech_mask_length", 10)
args.speech_mask_prob = getattr(args, "speech_mask_prob", 0.65)
args.speech_sup_mask_prob = getattr(args, "speech_sup_mask_prob", 0.3)
args.speech_unsup_mask_prob = getattr(
args, "speech_unsup_mask_prob", args.speech_mask_prob
)
args.speech_mask_selection = getattr(args, "speech_mask_selection", "static")
args.speech_mask_other = getattr(args, "speech_mask_other", 0)
args.speech_mask_min_space = getattr(args, "speech_mask_min_space", 1)
args.speech_no_mask_overlap = getattr(args, "speech_no_mask_overlap", False)
args.speech_mask_channel_length = getattr(args, "speech_mask_channel_length", 10)
args.speech_mask_channel_prob = getattr(args, "speech_mask_channel_prob", 0.0)
args.speech_mask_channel_selection = getattr(
args, "speech_mask_channel_selection", "static"
)
args.speech_mask_channel_other = getattr(args, "speech_mask_channel_other", 0)
args.speech_mask_channel_min_space = getattr(
args, "speech_mask_channel_min_space", 1
)
args.speech_no_mask_channel_overlap = getattr(
args, "speech_no_mask_channel_overlap", False
)
args.no_scale_feature = getattr(args, "", False)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 1.0) # 0.1
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(
args, "encoder_ffn_embed_dim", args.encoder_embed_dim * 4
)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.speech_conv_bias = getattr(args, "speech_conv_bias", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_attention_heads = getattr(
args, "decoder_attention_heads", args.encoder_attention_heads
)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu") # gelu?
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.speech_unsup_dropout = getattr(args, "speech_unsup_dropout", 0)
args.speech_unsup_feature_dropout = getattr(args, "speech_unsup_feature_dropout", 0)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.encoder_shared_text_layers_from_begin = getattr(
args, "encoder_shared_text_layers_from_begin", 6
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.no_emb_update_unsup = getattr(args, "no_emb_update_unsup", False)
@register_model_architecture(
"speech_text_pretrain_bart", "speech_text_pretrain_bart_base_stack"
)
def speech_text_pretrain_bart_base_stack(args):
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 6)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.encoder_shared_text_layers_from_begin = getattr(
args, "encoder_shared_text_layers_from_begin", 0
)
args.stacked_encoder = getattr(args, "stacked_encoder", "all")
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
speech_text_pretrain_bart_base(args)
@register_model_architecture(
"speech_text_pretrain_bart", "speech_text_pretrain_bart_large"
)
def speech_text_pretrain_bart_large(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 24)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 12)
args.encoder_shared_text_layers_from_begin = getattr(
args, "encoder_shared_text_layers_from_begin", 12
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.dropout = getattr(args, "dropout", 0.3)
speech_text_pretrain_bart_base(args)
@register_model_architecture(
"speech_text_pretrain_bart", "speech_text_pretrain_bart_large_stack"
)
def speech_text_pretrain_bart_large_stack(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 6)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 12)
args.encoder_shared_text_layers_from_begin = getattr(
args, "encoder_shared_text_layers_from_begin", 0
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.stacked_encoder = getattr(args, "stacked_encoder", "s2s")
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
speech_text_pretrain_bart_base(args)
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/models/joint_speech_text_pretrain_transformer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import re
from collections import OrderedDict
import torch
from fairseq.file_io import PathManager
def is_update(param_name, module_name):
if module_name in param_name:
return True
return False
def load_checkpoint(src_cpt):
with PathManager.open(src_cpt, "rb") as f:
state_src = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, "cpu")
),
)
return state_src
def save_checkpoint(tgt_cpt, states):
with PathManager.open(tgt_cpt, "wb") as f:
torch.save(
states,
f,
)
# convert the pre-trained model into bart model
def main():
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument('--input-model', required=True,
help='Input checkpoint file path.')
parser.add_argument('--output-model', required=True,
help='output checkpoint file path.')
# fmt: on
args = parser.parse_args()
print(args)
states = load_checkpoint(args.input_model)
model = states["model"]
new_model = OrderedDict()
for key in model.keys():
if re.search("^encoder.text_encoder", key):
new_key = re.sub("encoder.text_encoder", "encoder", key)
new_model[new_key] = model[key]
elif re.search("^decoder.text_decoder", key):
new_key = re.sub("decoder.text_decoder", "decoder", key)
new_model[new_key] = model[key]
states["model"] = new_model
save_checkpoint(args.output_model, states)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/scripts/convert_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import itertools
import logging
import re
import time
from g2p_en import G2p
logger = logging.getLogger(__name__)
FAIL_SENT = "FAILED_SENTENCE"
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("--data-path", type=str, required=True)
parser.add_argument("--out-path", type=str, required=True)
parser.add_argument("--lower-case", action="store_true")
parser.add_argument("--do-filter", action="store_true")
parser.add_argument("--use-word-start", action="store_true")
parser.add_argument("--dup-vowel", default=1, type=int)
parser.add_argument("--dup-consonant", default=1, type=int)
parser.add_argument("--no-punc", action="store_true")
parser.add_argument("--reserve-word", type=str, default="")
parser.add_argument(
"--reserve-first-column",
action="store_true",
help="first column is sentence id",
)
###
parser.add_argument("--parallel-process-num", default=1, type=int)
parser.add_argument("--logdir", default="")
args = parser.parse_args()
return args
def process_sent(sent, g2p, res_wrds, args):
sents = pre_process_sent(sent, args.do_filter, args.lower_case, res_wrds)
pho_seqs = [do_g2p(g2p, s, res_wrds, i == 0) for i, s in enumerate(sents)]
pho_seq = (
[FAIL_SENT]
if [FAIL_SENT] in pho_seqs
else list(itertools.chain.from_iterable(pho_seqs))
)
if args.no_punc:
pho_seq = remove_punc(pho_seq)
if args.dup_vowel > 1 or args.dup_consonant > 1:
pho_seq = dup_pho(pho_seq, args.dup_vowel, args.dup_consonant)
if args.use_word_start:
pho_seq = add_word_start(pho_seq)
return " ".join(pho_seq)
def remove_punc(sent):
ns = []
regex = re.compile("[^a-zA-Z0-9 ]")
for p in sent:
if (not regex.search(p)) or p == FAIL_SENT:
if p == " " and (len(ns) == 0 or ns[-1] == " "):
continue
ns.append(p)
return ns
def do_g2p(g2p, sent, res_wrds, is_first_sent):
if sent in res_wrds:
pho_seq = [res_wrds[sent]]
else:
pho_seq = g2p(sent)
if not is_first_sent:
pho_seq = [" "] + pho_seq # add space to separate
return pho_seq
def pre_process_sent(sent, do_filter, lower_case, res_wrds):
if do_filter:
sent = re.sub("-", " ", sent)
sent = re.sub("—", " ", sent)
if len(res_wrds) > 0:
wrds = sent.split()
wrds = ["SPLIT_ME " + w + " SPLIT_ME" if w in res_wrds else w for w in wrds]
sents = [x.strip() for x in " ".join(wrds).split("SPLIT_ME") if x.strip() != ""]
else:
sents = [sent]
if lower_case:
sents = [s.lower() if s not in res_wrds else s for s in sents]
return sents
def dup_pho(sent, dup_v_num, dup_c_num):
"""
duplicate phoneme defined as cmudict
http://www.speech.cs.cmu.edu/cgi-bin/cmudict
"""
if dup_v_num == 1 and dup_c_num == 1:
return sent
ns = []
for p in sent:
ns.append(p)
if re.search(r"\d$", p):
for i in range(1, dup_v_num):
ns.append(f"{p}-{i}P")
elif re.search(r"\w", p):
for i in range(1, dup_c_num):
ns.append(f"{p}-{i}P")
return ns
def add_word_start(sent):
ns = []
do_add = True
ws = "▁"
for p in sent:
if do_add:
p = ws + p
do_add = False
if p == " ":
do_add = True
else:
ns.append(p)
return ns
def load_reserve_word(reserve_word):
if reserve_word == "":
return []
with open(reserve_word, "r") as fp:
res_wrds = [x.strip().split() for x in fp.readlines() if x.strip() != ""]
assert sum([0 if len(x) == 2 else 1 for x in res_wrds]) == 0
res_wrds = dict(res_wrds)
return res_wrds
def process_sents(sents, args):
g2p = G2p()
out_sents = []
res_wrds = load_reserve_word(args.reserve_word)
for sent in sents:
col1 = ""
if args.reserve_first_column:
col1, sent = sent.split(None, 1)
sent = process_sent(sent, g2p, res_wrds, args)
if args.reserve_first_column and col1 != "":
sent = f"{col1} {sent}"
out_sents.append(sent)
return out_sents
def main():
args = parse()
out_sents = []
with open(args.data_path, "r") as fp:
sent_list = [x.strip() for x in fp.readlines()]
if args.parallel_process_num > 1:
try:
import submitit
except ImportError:
logger.warn(
"submitit is not found and only one job is used to process the data"
)
submitit = None
if args.parallel_process_num == 1 or submitit is None:
out_sents = process_sents(sent_list, args)
else:
# process sentences with parallel computation
lsize = len(sent_list) // args.parallel_process_num + 1
executor = submitit.AutoExecutor(folder=args.logdir)
executor.update_parameters(timeout_min=1000, cpus_per_task=4)
jobs = []
for i in range(args.parallel_process_num):
job = executor.submit(
process_sents, sent_list[lsize * i : lsize * (i + 1)], args
)
jobs.append(job)
is_running = True
while is_running:
time.sleep(5)
is_running = sum([job.done() for job in jobs]) < len(jobs)
out_sents = list(itertools.chain.from_iterable([job.result() for job in jobs]))
with open(args.out_path, "w") as fp:
fp.write("\n".join(out_sents) + "\n")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/scripts/g2p_encode.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
import re
import torch
from fairseq.data import data_utils
from fairseq.data.language_pair_dataset import LanguagePairDataset
# Part of the code is modified from DenoisingDataset
# compared with DenoisingDataset, no permute_sentences or documents (rotate_ratio, permute_sentence_ratio)
class LanguagePairDenoisingDataset(LanguagePairDataset):
def __init__(
self,
src,
src_sizes,
src_dict,
tgt,
tgt_sizes,
tgt_dict,
mask_idx,
mask_whole_words,
seed,
args,
left_pad_source=True,
left_pad_target=False,
shuffle=True,
input_feeding=True,
remove_eos_from_source=False,
append_eos_to_target=False,
align_dataset=None,
constraints=None,
append_bos=False,
eos=None,
num_buckets=0,
src_lang_id=None,
tgt_lang_id=None,
pad_to_multiple=1,
):
super().__init__(
src,
src_sizes,
src_dict,
tgt,
tgt_sizes,
tgt_dict,
left_pad_source,
left_pad_target,
shuffle,
input_feeding,
remove_eos_from_source,
append_eos_to_target,
align_dataset,
constraints,
append_bos,
eos,
num_buckets,
src_lang_id,
tgt_lang_id,
pad_to_multiple,
)
self.mask_idx = mask_idx
self.mask_whole_word = mask_whole_words
self.mask_ratio = args.mask
self.random_ratio = args.mask_random
self.insert_ratio = args.insert
self.replace_length = args.replace_length
if self.replace_length not in [-1, 0, 1]:
raise ValueError(f"invalid arg: replace_length={self.replace_length}")
if args.mask_length not in ["subword", "word", "span-poisson"]:
raise ValueError(f"invalid arg: mask-length={args.mask_length}")
if args.mask_length == "subword" and args.replace_length not in [0, 1]:
raise ValueError("if using subwords, use replace-length=1 or 0")
self.mask_span_distribution = None
if args.mask_length == "span-poisson":
# Text infilling: "A number of text spans are sampled, with span lengths drawn from a Poisson distribution (λ = 3). Each span is replaced with a single [MASK] token. 0-length spans correspond to the insertion of [MASK] tokens."
_lambda = args.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
self.epoch = 0
self.seed = seed
def _is_phoneme(x):
if re.search("<lang:", x) or x in (
"<mask>",
"<sil>",
"<pad>",
"<s>",
"</s>",
"<unk>",
):
return False
return True
self.voc_valid_ids = torch.LongTensor(
[i for i, x in enumerate(self.src_dict.symbols) if _is_phoneme(x)]
)
self.voc_valid_size = self.voc_valid_ids.size(0)
@property
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch, **unused):
self.epoch = epoch
def __getitem__(self, index):
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item = copy.deepcopy(self.src[index])
with data_utils.numpy_seed(self.seed, self.epoch, index):
source = src_item
assert source[-1] == self.eos
if self.mask_ratio > 0:
source = self.add_whole_word_mask(source, self.mask_ratio)
if self.insert_ratio > 0:
source = self.add_insertion_noise(source, self.insert_ratio)
src_item = source
if self.append_eos_to_target:
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.tgt and self.tgt[index][-1] != eos:
tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
if self.append_bos:
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
if self.tgt and self.tgt[index][0] != bos:
tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]])
bos = self.src_dict.bos()
if src_item[0] != bos:
src_item = torch.cat([torch.LongTensor([bos]), src_item])
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if src_item[-1] == eos:
src_item = src_item[:-1]
example = {
"id": index,
"source": src_item,
"target": tgt_item,
}
if self.align_dataset is not None:
example["alignment"] = self.align_dataset[index]
if self.constraints is not None:
example["constraints"] = self.constraints[index]
if self.src_lang_id is not None:
example["src_lang_id"] = self.src_lang_id
if self.tgt_lang_id is not None:
example["tgt_lang_id"] = self.tgt_lang_id
return example
# following functions are borrowed from denoising_dataset
def word_starts(self, source):
if self.mask_whole_word is not None:
is_word_start = self.mask_whole_word.gather(0, source)
else:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[-1] = 0
return is_word_start
def add_whole_word_mask(self, source, p):
is_word_start = self.word_starts(source)
num_to_mask = int(math.ceil(is_word_start.float().sum() * p))
num_inserts = 0
if num_to_mask == 0:
return source
if self.mask_span_distribution is not None:
lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))
# Make sure we have enough to mask
cum_length = torch.cumsum(lengths, 0)
while cum_length[-1] < num_to_mask:
lengths = torch.cat(
[
lengths,
self.mask_span_distribution.sample(sample_shape=(num_to_mask,)),
],
dim=0,
)
cum_length = torch.cumsum(lengths, 0)
# Trim to masking budget
i = 0
while cum_length[i] < num_to_mask:
i += 1
lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])
num_to_mask = i + 1
lengths = lengths[:num_to_mask]
# Handle 0-length mask (inserts) separately
lengths = lengths[lengths > 0]
num_inserts = num_to_mask - lengths.size(0)
num_to_mask -= num_inserts
if num_to_mask == 0:
return self.add_insertion_noise(source, num_inserts / source.size(0))
assert (lengths > 0).all()
else:
lengths = torch.ones((num_to_mask,)).long()
assert is_word_start[-1] == 0
word_starts = is_word_start.nonzero(as_tuple=False)
indices = word_starts[
torch.randperm(word_starts.size(0))[:num_to_mask]
].squeeze(1)
mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio
source_length = source.size(0)
assert source_length - 1 not in indices
to_keep = torch.ones(source_length, dtype=torch.bool)
is_word_start[
-1
] = 255 # acts as a long length, so spans don't go over the end of doc
if self.replace_length == 0:
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = self.voc_valid_ids[
torch.randint(0, self.voc_valid_size - 1, size=(mask_random.sum(),))
]
if self.mask_span_distribution is not None:
assert len(lengths.size()) == 1
assert lengths.size() == indices.size()
lengths -= 1
while indices.size(0) > 0:
assert lengths.size() == indices.size()
lengths -= is_word_start[indices + 1].long()
uncompleted = lengths >= 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = self.voc_valid_ids[
torch.randint(
0, self.voc_valid_size - 1, size=(mask_random.sum(),)
)
]
else:
# A bit faster when all lengths are 1
while indices.size(0) > 0:
uncompleted = is_word_start[indices + 1] == 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = self.voc_valid_ids[
torch.randint(
0, self.voc_valid_size - 1, size=(mask_random.sum(),)
)
]
assert source_length - 1 not in indices
source = source[to_keep]
if num_inserts > 0:
source = self.add_insertion_noise(source, num_inserts / source.size(0))
return source
def add_insertion_noise(self, tokens, p):
if p == 0.0:
return tokens
num_tokens = len(tokens)
n = int(math.ceil(num_tokens * p))
noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor(n + len(tokens)).fill_(-1)
num_random = int(math.ceil(n * self.random_ratio))
result[noise_indices[num_random:]] = self.mask_idx
result[noise_indices[:num_random]] = self.voc_valid_ids[
torch.randint(0, self.voc_valid_size - 1, size=(num_random,))
]
result[~noise_mask] = tokens
assert (result >= 0).all()
return result
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/data/pair_denoising_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.criterions import register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
LabelSmoothedCrossEntropyCriterionConfig,
label_smoothed_nll_loss,
)
@register_criterion(
"speech_text_pretrain_cross_entropy",
dataclass=LabelSmoothedCrossEntropyCriterionConfig,
)
class SpeechTextPreTrainCrossEntCriterion(LabelSmoothedCrossEntropyCriterion):
def __init__(self, task, sentence_avg, label_smoothing, report_accuracy=False):
super().__init__(
task, sentence_avg, label_smoothing, report_accuracy=report_accuracy
)
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
loss, nll_loss, nsentences, ntokens, n_correct = self.compute_loss(
model, net_output, sample, reduce=reduce
)
sample_size = nsentences if self.sentence_avg else ntokens
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
if self.report_accuracy:
logging_output["n_correct"] = utils.item(n_correct)
logging_output["total"] = utils.item(ntokens)
return loss, sample_size, logging_output
def get_lprobs_and_target(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, net_output)
assert self.ignore_prefix_size == 0
if self.ignore_prefix_size > 0:
if getattr(lprobs, "batch_first", False):
lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
target = target[:, self.ignore_prefix_size :].contiguous()
else:
lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
target = target[self.ignore_prefix_size :, :].contiguous()
return lprobs, target
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
n_correct = 0
if isinstance(target, dict):
t_lprobs = target["target_logprobs"]
if not lprobs.batch_first:
lprobs = lprobs.transpose(0, 1)
t_lprobs = t_lprobs.transpose(0, 1)
nsentences, seq_len = lprobs.size()[:2]
ntokens = nsentences * seq_len
t_probs = t_lprobs.exp()
mask_indices = (
net_output[1]["mask_indices"][0]
if len(net_output[1]["mask_indices"]) > 0
else None
)
# mask_indices is True for those masking frames
if mask_indices is not None: # B X T
t_probs = t_probs.masked_fill(mask_indices.eq(False).unsqueeze(-1), 0)
ntokens = mask_indices.int().sum()
t_probs = t_probs.detach()
t_lprobs = t_lprobs.detach()
loss = (
-(t_probs * (lprobs - t_lprobs)).sum()
if reduce
else -(t_probs * (lprobs - t_lprobs)).sum(-1, keepdim=True)
)
nll_loss = loss
else:
nsentences = target.size(0)
mask = target.ne(self.padding_idx)
loss, nll_loss = label_smoothed_nll_loss(
lprobs.view(-1, lprobs.size(-1)),
target.view(-1),
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
n_correct = torch.sum(
lprobs.argmax(-1).masked_select(mask).eq(target.masked_select(mask))
)
ntokens = torch.sum(mask)
return loss, nll_loss, nsentences, ntokens, n_correct
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/criterions/multi_modality_cross_entropy.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
from fairseq.logging import metrics
@register_criterion("guided_label_smoothed_cross_entropy_with_accuracy")
class GuidedCrossEntAccCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
guide_alpha,
text_input_cost_ratio,
label_smoothing,
disable_text_guide_update_num=0,
attentive_cost_regularization=0,
):
"""
guide_alpha: alpha to inteplate nll and kd loss
text_input_cost_ratio: loss ratio for text only input data
label_smoothing: label smoothing ratio
disable_text_guide_update_num: only use nll loss for the first N updates
attentive_cost_regularization: ratio fo attentive cost
"""
super().__init__(task)
self.alpha = guide_alpha
self.attn_beta = attentive_cost_regularization
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.text_input_cost_ratio = text_input_cost_ratio
self.disable_update_num = disable_text_guide_update_num
assert self.alpha >= 0 and self.alpha <= 1.0
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
# fmt: off
parser.add_argument('--guide-alpha', default=0., type=float, metavar='D',
help='alpha to merge kd cost from text to speech input with ce loss')
# fmt: off
parser.add_argument('--disable-text-guide-update-num', default=0, type=int, metavar='D',
help='disable guided target from text for the first N updates.')
parser.add_argument("--attentive-cost-regularization", default=0.0, type=float, metavar='D',
help="use encoder attentive loss regularization with cost ratio D")
parser.add_argument("--attentive-cost-without-normalize", action='store_true',
help="Don't do normalization during attentive cost computation")
def forward(self, model, sample, reduce=True):
reduction = 'sum' if reduce else 'none'
net_input = sample["net_input"]
net_output = model(**net_input)
attn_cost = None
lprobs = model.get_normalized_probs(net_output, log_probs=True)
is_dual_input = True if net_input['src_tokens'] is not None and net_input.get('src_txt_tokens') is not None else False
target = model.get_targets(sample, net_output)
src_token_num = 0
if is_dual_input:
# lprobs_spch from speech encoder and lprobs_text from text encoder
lprobs_spch, lprobs_text = torch.chunk(lprobs, 2)
lprobs_spch.batch_first = lprobs.batch_first
lprobs_text.batch_first = lprobs.batch_first
speech_loss, speech_nll_loss, speech_correct, speech_total = \
self.guide_loss_and_acc(model, lprobs_spch, lprobs_text, target, reduce=(reduction == 'sum'))
text_loss, text_nll_loss, text_correct, text_total = self.compute_loss_and_acc(model, lprobs_text, target, reduction=reduction)
loss = (speech_loss + text_loss)
nll_loss = (speech_nll_loss + text_nll_loss)
correct = speech_correct + text_correct
total = speech_total + text_total
attn_cost = net_output[1].get('attn_cost')
if attn_cost is not None:
# attn_cost is batch_first and padding tokens have been masked already
src_token_num = attn_cost.ne(0).sum()
attn_cost = attn_cost.sum()
loss = loss + attn_cost * self.attn_beta
else:
attn_cost = 0
else:
loss, nll_loss, correct, total = self.compute_loss_and_acc(model, lprobs, target, reduction=reduction)
if sample["net_input"]['src_tokens'] is None: # text input only
loss = loss * self.text_input_cost_ratio
speech_loss = None
speech_nll_loss = None
sample_size, logging_output = self.get_logging_output(
sample, loss, nll_loss, correct, total, src_token_num, speech_loss, speech_nll_loss, attn_cost, is_dual_input
)
return loss, sample_size, logging_output
def compute_loss_and_acc(self, model, lprobs, target, reduction='sum'):
if not lprobs.batch_first:
lprobs = lprobs.transpose(0, 1)
lprobs = lprobs.view(-1, lprobs.size(-1)) # -> (B x T) x C
target = target.view(-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=(reduction == 'sum'),
)
mask = target.ne(self.padding_idx)
correct = torch.sum(lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask)))
total = torch.sum(mask)
return loss, nll_loss, correct, total
def guide_loss_and_acc(self, model, lprobs, lprobs_teacher, target, reduce=True):
""" lprobs_teacher is used as guide for lprobs """
if self.alpha == 0.0 or model.num_updates < self.disable_update_num:
return self.compute_loss_and_acc(model, lprobs, target, reduction=('sum' if reduce else 'none'))
if not lprobs.batch_first:
lprobs = lprobs.transpose(0, 1)
lprobs_teacher = lprobs_teacher.transpose(0, 1)
lprobs = lprobs.view(-1, lprobs.size(-1)).float() # -> (B x T) x C
lprobs_teacher = lprobs_teacher.view(-1, lprobs_teacher.size(-1)).float() # -> (B x T) x C
target = target.view(-1)
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction='sum' if reduce else 'none')
nll_loss = loss
probs_teacher = lprobs_teacher.exp().masked_fill_(target.unsqueeze(-1).eq(self.padding_idx), 0)
probs_teacher = probs_teacher.detach()
guide_loss = -(probs_teacher*lprobs).sum() if reduce else -(probs_teacher*lprobs).sum(-1, keepdim=True)
loss = self.alpha*guide_loss + (1.0 - self.alpha)*loss
mask = target.ne(self.padding_idx)
correct = torch.sum(lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask)))
total = torch.sum(mask)
return loss, nll_loss, correct, total
def get_logging_output(
self,
sample,
loss,
nll_loss,
correct,
total,
src_token_num=0,
speech_loss=None,
speech_nll_loss=None,
attn_cost=None,
is_dual_input=False,
):
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
mul_size = 2 if is_dual_input else 1
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"nll_loss": utils.item(nll_loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"]*mul_size,
"nsentences": sample["target"].size(0)*mul_size,
"sample_size": sample_size*mul_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"src_token_num": utils.item(src_token_num.data) if src_token_num > 0 else 0,
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
if speech_loss is not None:
logging_output["speech_loss"] = utils.item(speech_loss.data)
logging_output["speech_nll_loss"] = utils.item(speech_nll_loss.data)
logging_output["sample_size_speech_cost"] = sample_size
logging_output["speech_attn_loss"] = attn_cost
return sample_size*mul_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
src_token_sum = sum(log.get("src_token_num", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
speech_loss_sum = sum(log.get("speech_loss", 0) for log in logging_outputs)
speech_nll_loss_sum = sum(log.get("speech_nll_loss", 0) for log in logging_outputs)
speech_attn_loss_sum = sum(log.get("speech_attn_loss", 0) for log in logging_outputs)
sample_size_speech = sum(log.get("sample_size_speech_cost", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
"nll_loss": nll_loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, and loss
# is per-sentence loss; else sample_size is ntokens, and the loss
# becomes per-output token loss
"speech_loss": speech_loss_sum / sample_size_speech / math.log(2) if sample_size_speech > 0 else 0.0,
"speech_nll_loss": speech_nll_loss_sum / sample_size_speech / math.log(2) if sample_size_speech > 0 else 0.0,
"speech_attn_loss": speech_attn_loss_sum / src_token_sum / math.log(2) if src_token_sum > 0 else 0.0,
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
"src_token_num": src_token_sum,
# total is the number of validate tokens
}
return agg_output
@classmethod
def reduce_metrics(cls, logging_outputs):
"""Aggregate logging outputs from data parallel training."""
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for k, v in agg_logging_outputs.items():
if k in {'nsentences', 'ntokens', 'sample_size'}:
continue
metrics.log_scalar(k, v, round=3)
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/criterions/text_guide_cross_entropy_acc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_text_joint_to_text.criterions." + criterion_name
)
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/criterions/__init__.py |
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from dataclasses import dataclass, field
from fairseq import utils
from fairseq.logging import metrics
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.ctc import CtcCriterion, CtcCriterionConfig
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterionConfig,
)
from fairseq.logging.meters import safe_round
from .multi_modality_cross_entropy import SpeechTextPreTrainCrossEntCriterion
logger = logging.getLogger(__name__)
@dataclass
class SpeechTextPreTrainCompoundCriterionConfig(
LabelSmoothedCrossEntropyCriterionConfig
):
zero_infinity: bool = field(
default=False,
metadata={"help": "zero inf loss when source length <= target length"},
)
post_process: str = field(
default="none",
metadata={
"help": "how to post process predictions into words. can be letter, "
"wordpiece, BPE symbols, etc. "
"See fairseq.data.data_utils.post_process() for full list of options"
},
)
@register_criterion(
"speech_text_pretrain_compound", dataclass=SpeechTextPreTrainCompoundCriterionConfig
)
class SpeechTextPreTrainCompoundCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
report_accuracy=False,
zero_infinity=False,
post_process=None,
):
super().__init__(task)
self.xent = SpeechTextPreTrainCrossEntCriterion(
task, sentence_avg, label_smoothing, report_accuracy
)
cfg_dict = {
"zero_infinity": zero_infinity,
"sentence_avg": sentence_avg,
"post_process": post_process,
}
cfg_ctc = CtcCriterionConfig(**cfg_dict)
self.ctc = CtcCriterion(cfg_ctc, task)
def forward(self, model, sample, reduce=True):
mode = sample["net_input"]["mode"]
if mode == "sup_speech_ctc": # CTC
sample["net_input"][
"src_lengths"
] = None # get downsampled src_lengths from padding_mask
loss, sample_size, logging_output = self.ctc(model, sample, reduce)
logging_output["mode"] = SpeechTextPreTrainCompoundCriterion.mode2value(
"CTC"
)
else:
loss, sample_size, logging_output = self.xent(model, sample, reduce)
logging_output["mode"] = SpeechTextPreTrainCompoundCriterion.mode2value(
"xent"
)
return loss, sample_size, logging_output
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
@staticmethod
def mode2value(mode): # make the logging_outputs_can_be_summed = True
if mode == "CTC":
return 907 # prime number
if mode == "xent":
return 887 # prime number
return 0
@staticmethod
def value2mode(value):
if value % 907 == 0:
return "CTC"
if value % 887 == 0:
return "xent"
raise ValueError("Unknow mode")
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
def _get_mode(logging_outputs):
mds = [
SpeechTextPreTrainCompoundCriterion.value2mode(log["mode"])
for log in logging_outputs
]
if sum([1 if l != mds[0] else 0 for l in mds]) > 0:
raise ValueError("mode in one mini-batch is expected to be the same!")
return mds[0]
log_mode = _get_mode(logging_outputs)
if log_mode == "xent":
return SpeechTextPreTrainCrossEntCriterion.reduce_metrics(logging_outputs)
# ctc loss
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"ctc_loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("ctc_ntokens", ntokens)
metrics.log_scalar("ctc_nsentences", nsentences)
if sample_size != ntokens:
metrics.log_scalar(
"ctc_nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
metrics.log_scalar("_c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in logging_outputs)
metrics.log_scalar("_c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in logging_outputs)
metrics.log_scalar("_w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs)
metrics.log_scalar("_wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in logging_outputs)
metrics.log_scalar("_w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3
)
if meters["_c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
| EXA-1-master | exa/libraries/fairseq/examples/speech_text_joint_to_text/criterions/multi_modality_compound.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.optim.amp_optimizer import AMPOptimizer
from fairseq.tasks import register_task
from fairseq.tasks.speech_to_text import SpeechToTextTask
from .data.speech_to_text_dataset_with_domain import SpeechToTextDatasetCreatorWithDomain
from .loss.attention_head_selection import HeadSelectionLoss
@register_task("speech_to_text_head_selection")
class SpeechToTextHeadSelectionTask(SpeechToTextTask):
@classmethod
def add_args(cls, parser):
SpeechToTextTask.add_args(parser)
parser.add_argument(
"--task-type",
type=str,
default="lang",
help="task type for head selection, lang or domain"
)
parser.add_argument(
"--kl-weight",
type=float,
default=0.0,
help="the weight of KL loss"
)
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.task_type = args.task_type
assert self.task_type in ["lang", "domain"], "invalid task_type: {}, should be either lang or domain".format(self.task_type)
self.map_task_to_id(args.train_subset)
self.encoder_head_prior = float(args.decoder_attention_heads) / args.total_decoder_attention_heads
self.decoder_head_prior = float(args.encoder_attention_heads) / args.total_encoder_attention_heads
self.kl_loss = HeadSelectionLoss(args)
def map_task_to_id(self, train_subset):
src_lang_set, tgt_lang_set, domain_set = set(), set(), set()
for split in train_subset.split(","):
seq = split.split("_")
assert len(seq) == 4, "subset {} should be in the format of train_src_tgt_domain".format(split)
_, src_lang, tgt_lang, domain = seq
src_lang_set.add(src_lang)
tgt_lang_set.add(tgt_lang)
domain_set.add(domain)
src_langs = sorted(src_lang_set)
tgt_langs = sorted(tgt_lang_set)
domains = sorted(domain_set)
self.src_lang_map = {src_lang: i for (i, src_lang) in enumerate(src_langs)}
self.tgt_lang_map = {tgt_lang: i for (i, tgt_lang) in enumerate(tgt_langs)}
self.domain_map = {domain: i for (i, domain) in enumerate(domains)}
if self.task_type == "lang":
self.encoder_tasks = len(self.src_lang_map)
self.decoder_tasks = len(self.tgt_lang_map)
elif self.task_type == "domain":
self.encoder_tasks = len(self.domain_map)
self.decoder_tasks = len(self.domain_map)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreatorWithDomain.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
src_lang_map=self.src_lang_map,
tgt_lang_map=self.tgt_lang_map,
domain_map=self.domain_map,
speaker_to_id=self.speaker_to_id
)
def build_model(self, args):
args.encoder_tasks = self.encoder_tasks
args.decoder_tasks = self.decoder_tasks
return super(SpeechToTextHeadSelectionTask, self).build_model(args)
def get_sample_sizes(self, sample, task_ids, num_tasks):
"""
task_ids: (bsz,)
get sample sizes for each task
"""
bsz = task_ids.size(0)
mat = torch.zeros((num_tasks, bsz), device=task_ids.device)
mat[task_ids, torch.arange(bsz)] = 1.0
ntokens = torch.sum(sample['target'] != 1, dim=-1)
sample_sizes = torch.matmul(mat, ntokens.float())
return sample_sizes
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
# task ids
if self.task_type == "lang":
encoder_task_ids = sample["src_lang_ids"]
decoder_task_ids = sample["tgt_lang_ids"]
elif self.task_type == "domain":
encoder_task_ids = sample["domain_ids"]
decoder_task_ids = sample["domain_ids"]
model.encoder.set_task_ids(encoder_task_ids)
model.decoder.set_task_ids(decoder_task_ids)
with torch.autograd.profiler.record_function("forward"):
with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):
loss, sample_size, logging_output = criterion(model, sample)
# KL loss
if self.args.encoder_attn_head_select:
sample_sizes = self.get_sample_sizes(sample, encoder_task_ids, self.encoder_tasks)
loss += self.kl_loss(
model.encoder.attn_head_selector.head_samples,
sample_sizes,
self.encoder_head_prior
)
if self.args.decoder_self_attn_head_select:
sample_sizes = self.get_sample_sizes(sample, decoder_task_ids, self.decoder_tasks)
loss += self.kl_loss(
model.decoder.self_attn_head_selector.head_samples,
sample_sizes,
self.decoder_head_prior
)
if self.args.dec_enc_attn_head_select:
sample_sizes = self.get_sample_sizes(sample, decoder_task_ids, self.decoder_tasks)
loss += self.kl_loss(
model.decoder.enc_attn_head_selector.head_sampes,
sample_sizes,
self.decoder_head_prior
)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
# task ids
if self.task_type == "lang":
encoder_task_ids = sample["src_lang_ids"]
decoder_task_ids = sample["tgt_lang_ids"]
elif self.task_type == "domain":
encoder_task_ids = sample["domain_ids"]
decoder_task_ids = sample["domain_ids"]
model.encoder.set_task_ids(encoder_task_ids)
model.decoder.set_task_ids(decoder_task_ids)
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
# task ids
if self.task_type == "lang":
encoder_task_ids = sample["src_lang_ids"][:1]
decoder_task_ids = sample["tgt_lang_ids"][:1]
elif self.task_type == "domain":
encoder_task_ids = sample["domain_ids"][:1]
decoder_task_ids = sample["domain_ids"][:1]
for model in models:
model.encoder.set_task_ids(encoder_task_ids)
model.decoder.set_task_ids(decoder_task_ids)
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, constraints=constraints
)
| EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/speech_to_text_head_selection.py |
EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch.nn.modules.loss import _Loss
class HeadSelectionLoss(_Loss):
def __init__(self, args):
super().__init__()
self.args = args
self.kl_weight = getattr(args, "kl_weight", 0.0)
def forward(self, head_samples, sample_sizes, prior=0.5, eps=1e-7):
"""
head_scores: (num_tasks, num_layers, num_heads)
sample_sizes: (num_tasks, )
"""
kl_loss = (head_samples * (torch.log(head_samples + eps) - math.log(prior))).sum(-1).sum(-1)
kl_loss /= (torch.numel(head_samples) / head_samples.size(0))
kl_loss = self.kl_weight * torch.matmul(kl_loss, sample_sizes)
return kl_loss
| EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/loss/attention_head_selection.py |
EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/loss/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, List, Optional
from pathlib import Path
import torch.nn as nn
from torch import Tensor
from fairseq import checkpoint_utils
from fairseq.models import register_model, register_model_architecture
from fairseq.utils import safe_hasattr
from fairseq.models.speech_to_text.s2t_transformer import (
S2TTransformerModel,
S2TTransformerEncoder,
TransformerDecoderScriptable
)
from fairseq.models.speech_to_text.s2t_transformer import base_architecture as s2t_base_architecture
from ..modules.attn_head_selector import AttnHeadSelector
from ..modules.head_selection_transformer_layer import HeadSelectionTransformerEncoderLayer
from .head_selection_transformer import HeadSelectionTransformerDecoder
logger = logging.getLogger(__name__)
@register_model("head_selection_s2t_transformer")
class HeadSelectionS2TTransformerModel(S2TTransformerModel):
"""
Head selection implemented in S2TTransformer
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
S2TTransformerModel.add_args(parser)
# encoder head selection
parser.add_argument(
"--encoder-attn-head-select",
action="store_true",
default=False,
help="encoder head selection"
)
parser.add_argument(
"--total-encoder-attention-heads",
type=int,
help="total number of encoder attention heads"
)
# decoder self attention selection
parser.add_argument(
"--decoder-self-attn-head-select",
action="store_true",
default=False,
help="decoder self-attention head selection"
)
# decoder-encoder attention selection
parser.add_argument(
"--dec-enc-attn-head-select",
action="store_true",
default=False,
help="decoder-encoder attention head selection"
)
parser.add_argument(
"--total-decoder-attention-heads",
type=int,
help="total number of decoder attention heads"
)
# selection strategy
parser.add_argument(
"--attn-head-select-strategy",
type=str,
help="attention head selection strategy, subset or group"
)
@classmethod
def build_encoder(cls, args):
if safe_hasattr(args, "encoder_attn_head_select") and args.encoder_attn_head_select:
encoder = HeadSelectionS2TTransformerEncoder(args)
else:
encoder = S2TTransformerEncoder(args)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
if (safe_hasattr(args, "decoder_self_attn_head_select") and args.decoder_self_attn_head_select) or (safe_hasattr(args, "dec_enc_attn_head_select") and args.dec_enc_attn_head_select):
return HeadSelectionTransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
else:
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
class HeadSelectionS2TTransformerEncoder(S2TTransformerEncoder):
def __init__(self, args):
super().__init__(args)
self.attn_head_selector = AttnHeadSelector(
args.encoder_tasks,
args.encoder_layers,
args.total_encoder_attention_heads,
args.encoder_attention_heads,
args.attn_head_select_strategy,
)
self.task_ids = None
self.transformer_layers = nn.ModuleList([
HeadSelectionTransformerEncoderLayer(args, layer_idx, attn_head_selector=self.attn_head_selector) for layer_idx in range(args.encoder_layers)
])
def set_task_ids(self, task_ids):
self.task_ids = task_ids
def _forward(self, src_tokens, src_lengths, return_all_hiddens=False):
self.attn_head_selector.head_select(self.task_ids)
return super()._forward(src_tokens, src_lengths, return_all_hiddens)
class HeadSelectionTransformerDecoderScriptable(HeadSelectionTransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
@register_model_architecture(model_name="head_selection_s2t_transformer", arch_name="head_selection_s2t_transformer")
def base_architecture(args):
s2t_base_architecture(args)
args.encoder_attn_head_select = getattr(args, "encoder_attn_head_select", False)
args.decoder_self_attn_head_select = getattr(args, "decoder_self_attn_head_select", False)
args.dec_enc_attn_head_select = getattr(args, "dec_enc_attn_head_select", False)
args.total_encoder_attention_heads = getattr(args, "total_encoder_attention_heads", 8)
args.total_decoder_attention_heads = getattr(args, "total_decoder_attention_heads", 8)
args.attn_head_select_strategy = getattr(args, "attn_head_select_strategy", "group")
@register_model_architecture("head_selection_s2t_transformer", "head_selection_s2t_transformer_s")
def head_selection_s2t_transformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/models/head_selection_s2t_transformer.py |
EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/models/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, List, Dict, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq.utils import safe_hasattr
from fairseq.models.transformer import (
TransformerModel,
TransformerEncoder,
TransformerDecoder
)
from ..modules.attn_head_selector import AttnHeadSelector
from ..modules.head_selection_transformer_layer import (
HeadSelectionTransformerEncoderLayer,
HeadSelectionTransformerDecoderLayer
)
class HeadSelectionTransformerModel(TransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
@staticmethod
def add_args(parser):
TransformerModel.add_args(parser)
# encoder head selection
parser.add_argument(
"--encoder-attn-head-select",
action="store_true",
default=False,
help="encoder head selection"
)
parser.add_argument(
"--total-encoder-attention-heads",
type=int,
help="total number of encoder attention heads"
)
# decoder self attention
parser.add_argument(
"--decoder-self-attn-head-select",
action="store_true",
default=False,
help="decoder self-attention head selection"
)
# decoder-encoder attention
parser.add_argument(
"--dec-enc-attn-head-select",
action="store_true",
default=False,
help="decoder-encoder attention head selection"
)
parser.add_argument(
"--total-decoder-attention-heads",
type=int,
help="total number of decoder attention heads"
)
# selection strategy
parser.add_argument(
"--attn-head-select-strategy",
type=str,
help="attention head selection strategy, subset or group"
)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
if safe_hasattr(args, "encoder_attn_head_select") and args.encoder_attn_head_select:
return HeadSelectionTransformerEncoder(
args, src_dict, embed_tokens
)
else:
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
if (safe_hasattr(args, "decoder_self_attn_head_select") and args.decoder_self_attn_head_select) or (safe_hasattr(args, "dec_enc_attn_head_select") and args.dec_enc_attn_head_select):
return HeadSelectionTransformerDecoder(
args, tgt_dict, embed_tokens
)
else:
return TransformerDecoder(args, tgt_dict, embed_tokens)
class HeadSelectionTransformerEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
self.num_tasks = args.encoder_tasks
self.num_layers = args.encoder_layers
self.total_num_heads = args.total_encoder_attention_heads
self.num_heads = args.encoder_attention_heads
self.select_strategy = args.attn_head_select_strategy
super().__init__(args, dictionary, embed_tokens)
self.attn_head_selector = AttnHeadSelector(
self.num_tasks,
self.num_layers,
self.total_num_heads,
self.num_heads,
self.select_strategy
)
self.task_ids = None
self.layers = nn.ModuleList(
[self.build_encoder_layer(args, i) for i in range(args.encoder_layers)]
)
def set_task_ids(self, task_ids):
self.task_ids = task_ids
def build_encoder_layer(self, args, layer_idx=None):
return HeadSelectionTransformerEncoderLayer(
args,
layer_idx,
attn_head_selector=self.attn_head_selector
)
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
self.attn_head_selector.head_select(self.task_ids)
return super().forward(src_tokens, src_lengths, return_all_hiddens, token_embeddings)
class HeadSelectionTransformerDecoder(TransformerDecoder):
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.num_tasks = args.decoder_tasks
self.num_layers = args.decoder_layers
self.total_num_heads = args.total_decoder_attention_heads
self.num_heads = args.decoder_attention_heads
self.select_strategy = args.attn_head_select_strategy
super().__init__(
args, dictionary, embed_tokens,
no_encoder_attn=no_encoder_attn,
output_projection=output_projection
)
self.self_attn_head_selector = None
self.enc_attn_head_selector = None
if safe_hasattr(args, "decoder_self_attn_head_select") and args.decoder_self_attn_head_select:
self.self_attn_head_selector = AttnHeadSelector(
self.num_tasks,
self.num_layers,
self.total_num_heads,
self.num_heads,
self.select_strategy
)
if safe_hasattr(args, "dec_enc_attn_head_select") and args.dec_enc_attn_head_select:
self.enc_attn_head_selector = AttnHeadSelector(
self.num_tasks,
self.num_layers,
self.total_num_heads,
self.num_heads,
self.select_strategy
)
self.task_ids = None
self.layers = nn.ModuleList(
[
self.build_head_selection_decoder_layer(args, no_encoder_attn, idx) for idx in range(args.decoder_layers)
]
)
def set_task_ids(self, task_ids):
self.task_ids = task_ids
def build_head_selection_decoder_layer(self, args, no_encoder_attn=False, layer_idx=None):
return HeadSelectionTransformerDecoderLayer(
args,
layer_idx,
self.self_attn_head_selector,
self.enc_attn_head_selector,
no_encoder_attn=no_encoder_attn
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
if self.self_attn_head_selector is not None:
self.self_attn_head_selector.head_select(self.task_ids)
if self.enc_attn_head_selector is not None:
self.enc_attn_head_selector.head_select(self.task_ids)
return super().forward(
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
features_only=features_only,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens
)
| EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/models/head_selection_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
import torch
from torch import Tensor
from torch.nn.functional import (
linear, softmax, dropout, pad,
has_torch_function,
handle_torch_function,
_in_projection_packed,
)
import math
import warnings
def _scaled_dot_product_attention(
q: Tensor,
k: Tensor,
v: Tensor,
attn_mask: Optional[Tensor] = None,
dropout_p: float = 0.0,
bsz: int = 1,
subset_heads: Optional[Tensor] = None,
subset_weights: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor]:
B, Nt, E = q.shape
q = q / math.sqrt(E)
# B: bsz * total_num_heads
# (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)
attn = torch.bmm(q, k.transpose(-2, -1))
if attn_mask is not None:
attn += attn_mask
attn = softmax(attn, dim=-1)
if dropout_p > 0.0:
attn = dropout(attn, p=dropout_p)
if subset_heads is None:
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn, v)
else:
mixed_output = torch.bmm(attn, v).contiguous().view(bsz, -1, Nt, E)
output = torch.stack(
[mixed_output[torch.arange(bsz), subset_heads[:, col], :, :] for col in range(subset_heads.size(1))],
dim=1
)
output = output * subset_weights.unsqueeze(2).unsqueeze(3)
output = output.contiguous().view(-1, Nt, E)
if subset_heads is not None:
_, Nt, Ns = attn.size()
mixed_attn = attn.view(bsz, -1, Nt, Ns)
attn = torch.stack(
[mixed_attn[torch.arange(bsz), subset_heads[:, col], :, :] for col in range(subset_heads.size(1))], dim=1
)
return output, attn
def _in_projection(
q: Tensor,
k: Tensor,
v: Tensor,
w_q: Tensor,
w_k: Tensor,
w_v: Tensor,
b_q: Optional[Tensor] = None,
b_k: Optional[Tensor] = None,
b_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor]:
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
total_num_heads: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Optional[Tensor],
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Optional[Tensor],
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
subset_heads: Optional[Tensor] = None,
subset_weights: Optional[Tensor] = None,
):
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
if has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
total_num_heads,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
subset_heads=subset_heads,
subset_weights=subset_weights
)
# set up shape vars
tgt_len, bsz, embed_dim = query.shape
src_len, _, _ = key.shape
assert embed_dim == embed_dim_to_check, \
f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
if isinstance(embed_dim, torch.Tensor):
# embed_dim can be a tensor when JIT tracing
head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
if use_separate_proj_weight:
# allow MHA to have different embedding dimensions when separate projection weights are used
assert key.shape[:2] == value.shape[:2], \
f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
else:
assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
#
# compute in-projection
#
if not use_separate_proj_weight:
q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
else:
assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
if in_proj_bias is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = in_proj_bias.chunk(3)
q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
# prep attention mask
if attn_mask is not None:
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
else:
assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \
f"Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}"
# ensure attn_mask's dim is 3
if attn_mask.dim() == 2:
correct_2d_size = (tgt_len, src_len)
if attn_mask.shape != correct_2d_size:
raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
attn_mask = attn_mask.unsqueeze(0)
elif attn_mask.dim() == 3:
correct_3d_size = (bsz * total_num_heads, tgt_len, src_len)
if attn_mask.shape != correct_3d_size:
raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
else:
raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
# prep key padding mask
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
# add bias along batch dimension (currently second)
if bias_k is not None and bias_v is not None:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert bias_k is None
assert bias_v is None
#
# reshape q, k, v for multihead attention and make em batch first
#
q = q.contiguous().view(tgt_len, bsz * total_num_heads, head_dim).transpose(0, 1)
if static_k is None:
k = k.contiguous().view(k.shape[0], bsz * total_num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert static_k.size(0) == bsz * total_num_heads, \
f"expecting static_k.size(0) of {bsz * total_num_heads}, but got {static_k.size(0)}"
assert static_k.size(2) == head_dim, \
f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
k = static_k
if static_v is None:
v = v.contiguous().view(v.shape[0], bsz * total_num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert static_v.size(0) == bsz * total_num_heads, \
f"expecting static_v.size(0) of {bsz * total_num_heads}, but got {static_v.size(0)}"
assert static_v.size(2) == head_dim, \
f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
v = static_v
# add zero attention along batch dimension (now first)
if add_zero_attn:
zero_attn_shape = (bsz * total_num_heads, 1, head_dim)
k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
# update source sequence length after adjustments
src_len = k.size(1)
# merge key padding and attention masks
if key_padding_mask is not None:
assert key_padding_mask.shape == (bsz, src_len), \
f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
expand(-1, total_num_heads, -1, -1).reshape(bsz * total_num_heads, 1, src_len)
if attn_mask is None:
attn_mask = key_padding_mask
elif attn_mask.dtype == torch.bool:
attn_mask = attn_mask.logical_or(key_padding_mask)
else:
attn_mask = attn_mask.masked_fill(key_padding_mask, float("-inf"))
# convert mask to float
if attn_mask is not None and attn_mask.dtype == torch.bool:
new_attn_mask = torch.zeros_like(attn_mask, dtype=torch.float)
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
attn_mask = new_attn_mask
# adjust dropout probability
if not training:
dropout_p = 0.0
#
# (deep breath) calculate attention and out projection
#
attn_output, attn_output_weights = _scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, bsz, subset_heads, subset_weights)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/modules/multihead_functional.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple
import torch
from fairseq import utils
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
from fairseq.modules.multihead_attention import MultiheadAttention
from ..modules.multihead_functional import multi_head_attention_forward
class MultiheadAttentionSelection(MultiheadAttention):
def __init__(
self,
embed_dim,
total_num_heads,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
layer_idx=0,
attn_head_selector=None
):
super().__init__(
embed_dim,
num_heads,
kdim=kdim,
vdim=vdim,
dropout=dropout,
bias=bias,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=self_attention,
encoder_decoder_attention=encoder_decoder_attention,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
self.layer_idx = layer_idx
self.attn_head_selector = attn_head_selector
self.total_num_heads = total_num_heads
self.total_embed_dim = self.head_dim * total_num_heads
self.k_proj = quant_noise(
nn.Linear(self.kdim, self.total_embed_dim, bias=bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, self.total_embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, self.total_embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, self.total_embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, self.total_embed_dim))
else:
self.bias_k = self.bias_v = None
self.reset_parameters()
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
# subset_heads: Optional[Tensor] = None,
# subset_weights: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor]]:
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
subset_heads, subset_weights = self.attn_head_selector(self.layer_idx)
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert list(query.size()) == [tgt_len, bsz, self.embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if (
not self.onnx_trace
and not is_tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
):
assert key is not None and value is not None
return multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.total_num_heads,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
subset_heads=subset_heads,
subset_weights=subset_weights
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.total_num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.total_num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.total_num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.total_num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.total_num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.total_num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.total_num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.total_num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.total_num_heads, tgt_len, src_len)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
# evaluation
if subset_heads is not None and subset_heads.numel() == 1:
subset_heads = subset_heads.repeat(bsz)
subset_weights = subset_weights.repeat(bsz)
if subset_heads is None:
attn = torch.bmm(attn_probs, v)
else:
# training with head selection
mixed_attn = torch.bmm(attn_probs, v).contiguous().view(bsz, self.total_num_heads, tgt_len, self.head_dim)
attn = torch.stack(
[mixed_attn[torch.arange(bsz), subset_heads[:, col], :, :] for col in range(subset_heads.size(1))], dim=1
)
attn = attn * subset_weights.unsqueeze(2).unsqueeze(3)
attn = attn.contiguous().view(bsz * self.num_heads, tgt_len, self.head_dim)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
if subset_heads is None:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
else:
mixed_attn_weights = attn_weights_float.view(
bsz, self.total_num_heads, tgt_len, src_len
)
attn_weights = torch.stack(
[mixed_attn_weights[torch.arange(bsz), subset_heads[:, col], :, :] for col in range(subset_heads.size(1))], dim=1
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
| EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/modules/multihead_attention_selection.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.utils import safe_getattr
from fairseq.modules import TransformerEncoderLayer, TransformerDecoderLayer
from ..modules.multihead_attention_selection import MultiheadAttentionSelection
class HeadSelectionTransformerEncoderLayer(TransformerEncoderLayer):
def __init__(self, args, layer_idx, attn_head_selector=None):
super().__init__(args)
self.layer_idx = layer_idx
self.self_attn = self.build_self_attention_selection(
self.embed_dim, args, attn_head_selector
)
def build_self_attention_selection(self, embed_dim, args, attn_head_selector=None):
return MultiheadAttentionSelection(
embed_dim,
args.total_encoder_attention_heads,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
layer_idx=self.layer_idx,
attn_head_selector=attn_head_selector
)
class HeadSelectionTransformerDecoderLayer(TransformerDecoderLayer):
def __init__(
self,
args,
layer_idx,
self_attn_head_selector=None,
enc_attn_head_selector=None,
no_encoder_attn=False,
add_bias_kv=False,
add_zero_attn=False,
):
self.layer_idx = layer_idx
super().__init__(args, no_encoder_attn, add_bias_kv, add_zero_attn)
if self_attn_head_selector is not None:
self.self_attn = self.build_self_attention_selection(
self.embed_dim, args,
self_attn_head_selector=self_attn_head_selector,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn
)
if enc_attn_head_selector is not None:
self.encoder_attn = self.build_encoder_attention_selection(
self.embed_dim, args,
enc_attn_head_selector=enc_attn_head_selector
)
def build_self_attention_selection(
self, embed_dim, args, self_attn_head_selector=None,
add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttentionSelection(
embed_dim,
args.total_decoder_attention_heads,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not safe_getattr(args, "cross_self_attention"),
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
layer_idx=self.layer_idx,
attn_head_selector=self_attn_head_selector,
)
def build_encoder_attention_selection(self, embed_dim, args, enc_attn_head_selector=None):
return MultiheadAttentionSelection(
embed_dim,
args.total_decoder_attention_heads,
args.decoder_attention_heads,
kdim=args.encoder_embed_dim,
vdim=args.encoder_embed_dim,
dropout=args.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
layer_idx=self.layer_idx,
attn_head_selector=enc_attn_head_selector,
)
| EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/modules/head_selection_transformer_layer.py |
EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/modules/__init__.py |
|
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import math
class AttnHeadSelector(nn.Module):
"""
Latent variable modeling of attention head selection
"""
def __init__(
self, num_tasks, num_layers,
total_num_heads, num_heads,
select_strategy="group",
head_select_temp=5.0
):
super(AttnHeadSelector, self).__init__()
self.num_tasks = num_tasks
self.num_layers = num_layers
self.total_num_heads = total_num_heads
self.num_heads = num_heads
self.select_strategy = select_strategy
self.temp = head_select_temp
self.head_logits = torch.nn.Parameter(
torch.Tensor(self.num_tasks, self.num_layers, total_num_heads),
requires_grad=True
)
nn.init.uniform_(
self.head_logits, a=math.log(0.01),
b=math.log(1.0)
)
def gumbel_sample(self, logits, tau=1.0):
gumbels1 = -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format).exponential_().log()
gumbels2 = -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format).exponential_().log()
gumbels1 = (logits + gumbels1 - gumbels2) / tau
y_soft = gumbels1.sigmoid()
return y_soft
def subset_select(self, y_soft, topk, dim=-1):
top_values, top_inds = torch.topk(y_soft, k=topk, dim=dim)
top_ret = 1.0 - top_values.detach() + top_values
return top_inds.detach(), top_ret
def group_selet(self, y_soft, topk, dim=-1):
# top_values: (num_tasks, num_layers, topk)
top_values, top_inds = torch.max(
y_soft.view(self.num_tasks, self.num_layers, -1, topk), dim=2
)
top_inds = top_inds * topk + torch.arange(topk, device=top_inds.device).unsqueeze(0).unsqueeze(1)
top_ret = 1.0 - top_values.detach() + top_values
return top_inds.detach(), top_ret
def head_select(self, task_ids=None):
# gumbel_sample
self.head_samples = self.gumbel_sample(self.head_logits, tau=self.temp)
# head select
if self.select_strategy == "subset":
self.subset_heads, self.subset_weights = self.subset_select(
self.head_samples,
topk=self.num_heads,
)
elif self.select_strategy == "group":
self.subset_heads, self.subset_weights = self.group_selet(
self.head_samples,
topk=self.num_heads,
)
else:
raise ValueError("{} is not supported".format(self.select_strategy))
self.batch_subset = self.subset_heads[task_ids, :, :]
self.batch_weights = self.subset_weights[task_ids, :, :]
def forward(self, layer_idx):
assert layer_idx is not None
batch_subset = self.batch_subset[:, layer_idx, :]
batch_weights = self.batch_weights[:, layer_idx, :]
return batch_subset, batch_weights
| EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/modules/attn_head_selector.py |
EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/data/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
from typing import Dict, List, Optional
from dataclasses import dataclass
import torch
from fairseq.data import (
ConcatDataset,
Dictionary,
FairseqDataset,
ResamplingDataset
)
from fairseq.data.audio.data_cfg import S2TDataConfig
from fairseq.data.audio.speech_to_text_dataset import (
SpeechToTextDatasetItem,
SpeechToTextDataset,
SpeechToTextDatasetCreator
)
logger = logging.getLogger(__name__)
@dataclass
class SpeechToTextDatasetItemWithDomain(SpeechToTextDatasetItem):
src_lang_id: Optional[torch.Tensor] = None
tgt_lang_id: Optional[torch.Tensor] = None
domain_id: Optional[torch.Tensor] = None
class SpeechToTextDatasetWithDomain(SpeechToTextDataset):
def __init__(
self,
split: str,
is_train_split: bool,
cfg: S2TDataConfig,
audio_paths: List[str],
n_frames: List[int],
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
n_frames_per_step=1,
speaker_to_id=None,
src_lang_ids: Optional[List[int]] = None,
tgt_lang_ids: Optional[List[int]] = None,
domain_ids: Optional[List[int]] = None
):
super().__init__(
split, is_train_split, cfg, audio_paths, n_frames,
src_texts, tgt_texts, speakers, src_langs, tgt_langs,
ids, tgt_dict, pre_tokenizer, bpe_tokenizer,
n_frames_per_step, speaker_to_id
)
assert src_lang_ids is None or len(src_lang_ids) == self.n_samples
assert tgt_lang_ids is None or len(tgt_lang_ids) == self.n_samples
assert domain_ids is None or len(domain_ids) == self.n_samples
self.src_lang_ids = src_lang_ids
self.tgt_lang_ids = tgt_lang_ids
self.domain_ids = domain_ids
def __getitem__(self, index: int) -> SpeechToTextDatasetItemWithDomain:
item = super().__getitem__(index)
src_lang_id = self.src_lang_ids[index]
tgt_lang_id = self.tgt_lang_ids[index]
domain_id = self.domain_ids[index]
return SpeechToTextDatasetItemWithDomain(
index=item.index, source=item.source,
target=item.target, speaker_id=item.speaker_id,
src_lang_id=src_lang_id,
tgt_lang_id=tgt_lang_id,
domain_id=domain_id
)
def collater(
self, samples: List[SpeechToTextDatasetItem], return_order: bool = False
) -> Dict:
if len(samples) == 0:
return {}
out = super().collater(samples, return_order=True)
order = out["order"]
src_lang_ids = torch.tensor([x.src_lang_id for x in samples], dtype=torch.long).index_select(0, order)
tgt_lang_ids = torch.tensor([x.tgt_lang_id for x in samples], dtype=torch.long).index_select(0, order)
domain_ids = torch.tensor([x.domain_id for x in samples], dtype=torch.long).index_select(0, order)
out["src_lang_ids"] = src_lang_ids
out["tgt_lang_ids"] = tgt_lang_ids
out["domain_ids"] = domain_ids
if not return_order:
del out["order"]
return out
class SpeechToTextDatasetCreatorWithDomain(SpeechToTextDatasetCreator):
KEY_SRC_LANG_ID, KEY_TGT_LANG_ID = "src_lang_id", "tgt_lang_id"
KEY_DOMAIN_ID = "domain_id"
# default values
DEFAULT_SRC_LANG_ID, DEFAULT_TGT_LANG_ID, DEFAULT_DOMAIN_ID = 0, 0, 0
@classmethod
def _from_list(
cls,
split_name: str,
is_train_split,
samples: List[Dict],
cfg: S2TDataConfig,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id
) -> SpeechToTextDatasetWithDomain:
audio_root = Path(cfg.audio_root)
ids = [s[cls.KEY_ID] for s in samples]
audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples]
n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples]
tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples]
src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples]
speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples]
src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples]
tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples]
src_lang_ids = [s.get(cls.KEY_SRC_LANG_ID, cls.DEFAULT_SRC_LANG_ID) for s in samples]
tgt_lang_ids = [s.get(cls.KEY_TGT_LANG_ID, cls.DEFAULT_TGT_LANG_ID) for s in samples]
domain_ids = [s.get(cls.KEY_DOMAIN_ID, cls.DEFAULT_DOMAIN_ID) for s in samples]
return SpeechToTextDatasetWithDomain(
split_name,
is_train_split,
cfg,
audio_paths,
n_frames,
src_texts=src_texts,
tgt_texts=tgt_texts,
speakers=speakers,
src_langs=src_langs,
tgt_langs=tgt_langs,
ids=ids,
tgt_dict=tgt_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
n_frames_per_step=n_frames_per_step,
speaker_to_id=speaker_to_id,
src_lang_ids=src_lang_ids,
tgt_lang_ids=tgt_lang_ids,
domain_ids=domain_ids
)
@classmethod
def _load_samples_from_tsv(
cls,
root: str,
split: str,
src_lang_map,
tgt_lang_map,
domain_map
):
# metadata from split
_, src_lang, tgt_lang, domain = split.split("_")
src_lang_id = src_lang_map[src_lang]
tgt_lang_id = tgt_lang_map[tgt_lang]
domain_id = domain_map[domain]
samples = SpeechToTextDatasetCreator._load_samples_from_tsv(root, split)
for s in samples:
s.update({
cls.KEY_SRC_LANG_ID: src_lang_id,
cls.KEY_TGT_LANG_ID: tgt_lang_id,
cls.KEY_DOMAIN_ID: domain_id
})
return samples
@classmethod
def _from_tsv(
cls,
root: str,
cfg: S2TDataConfig,
split: str,
tgt_dict,
is_train_split: bool,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id,
src_lang_map: Dict[str, int],
tgt_lang_map: Dict[str, int],
domain_map: Dict[str, int]
) -> SpeechToTextDatasetItemWithDomain:
samples = cls._load_samples_from_tsv(
root, split, src_lang_map,
tgt_lang_map, domain_map
)
return cls._from_list(
split, is_train_split, samples, cfg, tgt_dict, pre_tokenizer,
bpe_tokenizer, n_frames_per_step, speaker_to_id
)
@classmethod
def from_tsv(
cls,
root: str,
cfg: S2TDataConfig,
splits: str,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split: bool,
epoch: int,
seed: int,
src_lang_map: Dict[str, int],
tgt_lang_map: Dict[str, int],
domain_map: Dict[str, int],
n_frames_per_step: int = 1,
speaker_to_id=None
) -> SpeechToTextDatasetWithDomain:
datasets = [
cls._from_tsv(
root, cfg, split, tgt_dict, is_train_split, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, src_lang_map, tgt_lang_map, domain_map
)
for split in splits.split(",")
]
if is_train_split and len(datasets) > 1 and cfg.sampling_alpha != 1.0:
# temperature-based sampling
size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha)
datasets = [
ResamplingDataset(
d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)
)
for r, d in zip(size_ratios, datasets)
]
return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
| EXA-1-master | exa/libraries/fairseq/examples/attention_head_selection/src/data/speech_to_text_dataset_with_domain.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import numpy as np
import soundfile as sf
import torch
import tqdm
import fairseq
from torch import nn
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fname])
model = model[0]
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for flashlight datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i", help="Input Directory", **kwargs)
self.add_argument("--output", "-o", help="Output Directory", **kwargs)
self.add_argument("--model", help="Path to model checkpoint", **kwargs)
self.add_argument("--split", help="Dataset Splits", nargs="+", **kwargs)
self.add_argument(
"--ext", default="wav", required=False, help="Audio file extension"
)
self.add_argument(
"--no-copy-labels",
action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in flashlight then.",
)
self.add_argument(
"--use-feat",
action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features",
)
self.add_argument("--gpu", help="GPU to use", default=0, type=int)
class Prediction:
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer:
""" Write features as hdf5 file in flashlight compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
"""Given a model and a flashlight dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the flashlight dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(
self,
input_root,
output_root,
split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), "Input path '{}' does not exist".format(
self.input_path
)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(
filter(
lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))
)
)
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(
lambda x: os.path.join(
self.output_path, x.replace("." + self.extension, ".h5context")
),
map(os.path.basename, paths),
)
for name, target_fname in self._progress(
zip(paths, fnames_context), total=len(self)
):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__
)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/wav2vec_featurize.py |
EXA-1-master | exa/libraries/fairseq/examples/wav2vec/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import argparse
import glob
import os
import random
import soundfile
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"root", metavar="DIR", help="root directory containing flac files to index"
)
parser.add_argument(
"--valid-percent",
default=0.01,
type=float,
metavar="D",
help="percentage of data to use as validation set (between 0 and 1)",
)
parser.add_argument(
"--dest", default=".", type=str, metavar="DIR", help="output directory"
)
parser.add_argument(
"--ext", default="flac", type=str, metavar="EXT", help="extension to look for"
)
parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed")
parser.add_argument(
"--path-must-contain",
default=None,
type=str,
metavar="FRAG",
help="if set, path must contain this substring for a file to be included in the manifest",
)
return parser
def main(args):
assert args.valid_percent >= 0 and args.valid_percent <= 1.0
if not os.path.exists(args.dest):
os.makedirs(args.dest)
dir_path = os.path.realpath(args.root)
search_path = os.path.join(dir_path, "**/*." + args.ext)
rand = random.Random(args.seed)
valid_f = (
open(os.path.join(args.dest, "valid.tsv"), "w")
if args.valid_percent > 0
else None
)
with open(os.path.join(args.dest, "train.tsv"), "w") as train_f:
print(dir_path, file=train_f)
if valid_f is not None:
print(dir_path, file=valid_f)
for fname in glob.iglob(search_path, recursive=True):
file_path = os.path.realpath(fname)
if args.path_must_contain and args.path_must_contain not in file_path:
continue
frames = soundfile.info(fname).frames
dest = train_f if rand.random() > args.valid_percent else valid_f
print(
"{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest
)
if valid_f is not None:
valid_f.close()
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/wav2vec_manifest.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument("tsv")
parser.add_argument("--output-dir", required=True)
parser.add_argument("--output-name", required=True)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
transcriptions = {}
with open(args.tsv, "r") as tsv, open(
os.path.join(args.output_dir, args.output_name + ".ltr"), "w"
) as ltr_out, open(
os.path.join(args.output_dir, args.output_name + ".wrd"), "w"
) as wrd_out:
root = next(tsv).strip()
for line in tsv:
line = line.strip()
dir = os.path.dirname(line)
if dir not in transcriptions:
parts = dir.split(os.path.sep)
trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt"
path = os.path.join(root, dir, trans_path)
assert os.path.exists(path)
texts = {}
with open(path, "r") as trans_f:
for tline in trans_f:
items = tline.strip().split()
texts[items[0]] = " ".join(items[1:])
transcriptions[dir] = texts
part = os.path.basename(line).split(".")[0]
assert part in transcriptions[dir]
print(transcriptions[dir][part], file=wrd_out)
print(
" ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |",
file=ltr_out,
)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/libri_labels.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
import os.path as osp
import pprint
import soundfile as sf
import torch
import fairseq
from torch import nn
from torch.utils.data import DataLoader
try:
import tqdm
except:
print("Install tqdm to use --log-format=tqdm")
class FilesDataset:
def __init__(self, files, labels):
self.files = files
if labels and osp.exists(labels):
with open(labels, "r") as lbl_f:
self.labels = [line.rstrip() for line in lbl_f]
else:
self.labels = labels
def __len__(self):
return len(self.files)
def __getitem__(self, index):
fname = self.files[index]
wav, sr = sf.read(fname)
assert sr == 16000
wav = torch.from_numpy(wav).float()
lbls = None
if self.labels:
if isinstance(self.labels, str):
lbl_file = osp.splitext(fname)[0] + "." + self.labels
with open(lbl_file, "r") as lblf:
lbls = lblf.readline()
assert lbls is not None
else:
lbls = self.labels[index]
return wav, lbls
def collate(self, batch):
return batch
class ArgTypes:
@staticmethod
def existing_path(arg):
arg = str(arg)
assert osp.exists(arg), f"File {arg} does not exist"
return arg
@staticmethod
def mkdir(arg):
arg = str(arg)
os.makedirs(arg, exist_ok=True)
return arg
class DatasetWriter:
def __init__(self):
self.args = self.load_config()
pprint.pprint(self.args.__dict__)
self.model = self.load_model()
def __getattr__(self, attr):
return getattr(self.args, attr)
def read_manifest(self, fname):
with open(fname, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
fnames = [
osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0
]
return fnames
def process_splits(self):
if self.args.shard is not None or self.args.num_shards is not None:
assert self.args.shard is not None and self.args.num_shards is not None
for split in self.splits:
print(split)
if self.extension == "tsv":
datadir = osp.join(self.data_dir, f"{split}.{self.extension}")
print("Reading manifest file: ", datadir)
files = self.read_manifest(datadir)
else:
datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}")
files = glob.glob(datadir, recursive=True)
assert len(files) > 0
if self.args.shard is not None:
files = files[self.args.shard :: self.args.num_shards]
lbls = []
with open(self.data_file(split), "w") as srcf:
for line, lbl in self.iterate(files):
print(line, file=srcf)
if self.args.labels:
lbls.append(lbl + "\n")
if self.args.labels:
assert all(a is not None for a in lbls)
with open(self.lbl_file(split), "w") as lblf:
lblf.writelines(lbls)
def iterate(self, files):
data = self.load_data(files)
for samples in tqdm.tqdm(data, total=len(files) // 32):
for wav, lbl in samples:
x = wav.unsqueeze(0).float().cuda()
div = 1
while x.size(-1) // div > self.args.max_size:
div += 1
xs = x.chunk(div, dim=-1)
result = []
for x in xs:
torch.cuda.empty_cache()
x = self.model.feature_extractor(x)
if self.quantize_location == "encoder":
with torch.no_grad():
_, idx = self.model.vector_quantizer.forward_idx(x)
idx = idx.squeeze(0).cpu()
else:
with torch.no_grad():
z = self.model.feature_aggregator(x)
_, idx = self.model.vector_quantizer.forward_idx(z)
idx = idx.squeeze(0).cpu()
result.append(idx)
idx = torch.cat(result, dim=0)
yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl
def lbl_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.lbl{shard_part}")
def data_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.src{shard_part}")
def var_file(self):
return osp.join(self.output_dir, f"vars.pt")
def load_config(self):
parser = argparse.ArgumentParser("Vector Quantized wav2vec features")
# Model Arguments
parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True)
parser.add_argument("--data-parallel", action="store_true")
# Output Arguments
parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True)
# Data Arguments
parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True)
parser.add_argument("--splits", type=str, nargs="+", required=True)
parser.add_argument("--extension", type=str, required=True)
parser.add_argument("--labels", type=str, required=False)
parser.add_argument("--shard", type=int, default=None)
parser.add_argument("--num-shards", type=int, default=None)
parser.add_argument("--max-size", type=int, default=1300000)
# Logger Arguments
parser.add_argument(
"--log-format", type=str, choices=["none", "simple", "tqdm"]
)
return parser.parse_args()
def load_data(self, fnames):
dataset = FilesDataset(fnames, self.args.labels)
loader = DataLoader(
dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8
)
return loader
def load_model(self):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint])
model = model[0]
self.quantize_location = getattr(cfg.model, "vq", "encoder")
model.eval().float()
model.cuda()
if self.data_parallel:
model = nn.DataParallel(model)
return model
def __call__(self):
self.process_splits()
if hasattr(self.model.feature_extractor, "vars") and (
self.args.shard is None or self.args.shard == 0
):
vars = (
self.model.feature_extractor.vars.view(
self.model.feature_extractor.banks,
self.model.feature_extractor.num_vars,
-1,
)
.cpu()
.detach()
)
print("writing learned latent variable embeddings: ", vars.shape)
torch.save(vars, self.var_file())
if __name__ == "__main__":
write_data = DatasetWriter()
write_data()
print("Done.")
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/vq-wav2vec_featurize.py |
EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/__init__.py |
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
from collections import namedtuple
from dataclasses import dataclass, field
from enum import Enum, auto
import hydra
from hydra.core.config_store import ConfigStore
import logging
import math
import os
from omegaconf import OmegaConf
from typing import Optional
import sys
import editdistance
import torch
from hydra.core.hydra_config import HydraConfig
from fairseq import checkpoint_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import FairseqDataclass, FairseqConfig
from fairseq.logging.meters import StopwatchMeter
from omegaconf import open_dict
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoderConfig
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
class DecoderType(Enum):
VITERBI = auto()
KENLM = auto()
FAIRSEQ = auto()
KALDI = auto()
@dataclass
class UnsupGenerateConfig(FairseqDataclass):
fairseq: FairseqConfig = FairseqConfig()
lm_weight: float = field(
default=2.0,
metadata={"help": "language model weight"},
)
w2l_decoder: DecoderType = field(
default=DecoderType.VITERBI,
metadata={"help": "type of decoder to use"},
)
kaldi_decoder_config: Optional[KaldiDecoderConfig] = None
lexicon: Optional[str] = field(
default=None,
metadata={
"help": "path to lexicon. This is also used to 'phonemize' for unsupvised param tuning"
},
)
lm_model: Optional[str] = field(
default=None,
metadata={"help": "path to language model (kenlm or fairseq)"},
)
decode_stride: Optional[float] = field(
default=None,
metadata={"help": "changing the decoding frequency of the generator"},
)
unit_lm: bool = field(
default=False,
metadata={"help": "whether to use unit lm"},
)
beam_threshold: float = field(
default=50.0,
metadata={"help": "beam score threshold"},
)
beam_size_token: float = field(
default=100.0,
metadata={"help": "max tokens per beam"},
)
beam: int = field(
default=5,
metadata={"help": "decoder beam size"},
)
nbest: int = field(
default=1,
metadata={"help": "number of results to return"},
)
word_score: float = field(
default=1.0,
metadata={"help": "word score to add at end of word"},
)
unk_weight: float = field(
default=-math.inf,
metadata={"help": "unknown token weight"},
)
sil_weight: float = field(
default=0.0,
metadata={"help": "silence token weight"},
)
targets: Optional[str] = field(
default=None,
metadata={"help": "extension of ground truth labels to compute UER"},
)
results_path: Optional[str] = field(
default=None,
metadata={"help": "where to store results"},
)
post_process: Optional[str] = field(
default=None,
metadata={"help": "how to post process results"},
)
vocab_usage_power: float = field(
default=2,
metadata={"help": "for unsupervised param tuning"},
)
viterbi_transcript: Optional[str] = field(
default=None,
metadata={"help": "for unsupervised param tuning"},
)
min_lm_ppl: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
min_vt_uer: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
blank_weight: float = field(
default=0,
metadata={"help": "value to add or set for blank emission"},
)
blank_mode: str = field(
default="set",
metadata={
"help": "can be add or set, how to modify blank emission with blank weight"
},
)
sil_is_blank: bool = field(
default=False,
metadata={"help": "if true, <SIL> token is same as blank token"},
)
unsupervised_tuning: bool = field(
default=False,
metadata={
"help": "if true, returns a score based on unsupervised param selection metric instead of UER"
},
)
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def get_dataset_itr(cfg, task):
return task.get_batch_iterator(
dataset=task.dataset(cfg.fairseq.dataset.gen_subset),
max_tokens=cfg.fairseq.dataset.max_tokens,
max_sentences=cfg.fairseq.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=cfg.fairseq.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.fairseq.dataset.required_batch_size_multiple,
num_shards=cfg.fairseq.dataset.num_shards,
shard_id=cfg.fairseq.dataset.shard_id,
num_workers=cfg.fairseq.dataset.num_workers,
data_buffer_size=cfg.fairseq.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
cfg: UnsupGenerateConfig,
hypos,
tgt_dict,
target_tokens,
res_files,
):
retval = []
word_preds = []
transcriptions = []
dec_scores = []
for i, hypo in enumerate(hypos[: min(len(hypos), cfg.nbest)]):
if torch.is_tensor(hypo["tokens"]):
tokens = hypo["tokens"].int().cpu()
tokens = tokens[tokens >= tgt_dict.nspecial]
hyp_pieces = tgt_dict.string(tokens)
else:
hyp_pieces = " ".join(hypo["tokens"])
if "words" in hypo and len(hypo["words"]) > 0:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, cfg.post_process)
to_write = {}
if res_files is not None:
to_write[res_files["hypo.units"]] = hyp_pieces
to_write[res_files["hypo.words"]] = hyp_words
tgt_words = ""
if target_tokens is not None:
if isinstance(target_tokens, str):
tgt_pieces = tgt_words = target_tokens
else:
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, cfg.post_process)
if res_files is not None:
to_write[res_files["ref.units"]] = tgt_pieces
to_write[res_files["ref.words"]] = tgt_words
if not cfg.fairseq.common_eval.quiet:
logger.info(f"HYPO {i}:" + hyp_words)
if tgt_words:
logger.info("TARGET:" + tgt_words)
if "am_score" in hypo and "lm_score" in hypo:
logger.info(
f"DECODER AM SCORE: {hypo['am_score']}, DECODER LM SCORE: {hypo['lm_score']}, DECODER SCORE: {hypo['score']}"
)
elif "score" in hypo:
logger.info(f"DECODER SCORE: {hypo['score']}")
logger.info("___________________")
hyp_words_arr = hyp_words.split()
tgt_words_arr = tgt_words.split()
retval.append(
(
editdistance.eval(hyp_words_arr, tgt_words_arr),
len(hyp_words_arr),
len(tgt_words_arr),
hyp_pieces,
hyp_words,
)
)
word_preds.append(hyp_words_arr)
transcriptions.append(to_write)
dec_scores.append(-hypo.get("score", 0)) # negate cuz kaldi returns NLL
if len(retval) > 1:
best = None
for r, t in zip(retval, transcriptions):
if best is None or r[0] < best[0][0]:
best = r, t
for dest, tran in best[1].items():
print(tran, file=dest)
dest.flush()
return best[0]
assert len(transcriptions) == 1
for dest, tran in transcriptions[0].items():
print(tran, file=dest)
return retval[0]
def prepare_result_files(cfg: UnsupGenerateConfig):
def get_res_file(file_prefix):
if cfg.fairseq.dataset.num_shards > 1:
file_prefix = f"{cfg.fairseq.dataset.shard_id}_{file_prefix}"
path = os.path.join(
cfg.results_path,
"{}{}.txt".format(
cfg.fairseq.dataset.gen_subset,
file_prefix,
),
)
return open(path, "w", buffering=1)
if not cfg.results_path:
return None
return {
"hypo.words": get_res_file(""),
"hypo.units": get_res_file("_units"),
"ref.words": get_res_file("_ref"),
"ref.units": get_res_file("_ref_units"),
"hypo.nbest.words": get_res_file("_nbest_words"),
}
def optimize_models(cfg: UnsupGenerateConfig, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.eval()
if cfg.fairseq.common.fp16:
model.half()
if use_cuda:
model.cuda()
GenResult = namedtuple(
"GenResult",
[
"count",
"errs_t",
"gen_timer",
"lengths_hyp_unit_t",
"lengths_hyp_t",
"lengths_t",
"lm_score_t",
"num_feats",
"num_sentences",
"num_symbols",
"vt_err_t",
"vt_length_t",
],
)
def generate(cfg: UnsupGenerateConfig, models, saved_cfg, use_cuda):
task = tasks.setup_task(cfg.fairseq.task)
saved_cfg.task.labels = cfg.fairseq.task.labels
task.load_dataset(cfg.fairseq.dataset.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
cfg.fairseq.task.data,
cfg.fairseq.dataset.gen_subset,
len(task.dataset(cfg.fairseq.dataset.gen_subset)),
)
)
# Load dataset (possibly sharded)
itr = get_dataset_itr(cfg, task)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(cfg: UnsupGenerateConfig):
w2l_decoder = cfg.w2l_decoder
if w2l_decoder == DecoderType.VITERBI:
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KENLM:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.FAIRSEQ:
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KALDI:
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoder
assert cfg.kaldi_decoder_config is not None
return KaldiDecoder(
cfg.kaldi_decoder_config,
cfg.beam,
)
else:
raise NotImplementedError(
"only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment but found "
+ str(w2l_decoder)
)
generator = build_generator(cfg)
kenlm = None
fairseq_lm = None
if cfg.lm_model is not None:
import kenlm
kenlm = kenlm.Model(cfg.lm_model)
num_sentences = 0
if cfg.results_path is not None and not os.path.exists(cfg.results_path):
os.makedirs(cfg.results_path)
res_files = prepare_result_files(cfg)
errs_t = 0
lengths_hyp_t = 0
lengths_hyp_unit_t = 0
lengths_t = 0
count = 0
num_feats = 0
all_hyp_pieces = []
all_hyp_words = []
num_symbols = (
len([s for s in tgt_dict.symbols if not s.startswith("madeup")])
- tgt_dict.nspecial
)
targets = None
if cfg.targets is not None:
tgt_path = os.path.join(
cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset + "." + cfg.targets
)
if os.path.exists(tgt_path):
with open(tgt_path, "r") as f:
targets = f.read().splitlines()
viterbi_transcript = None
if cfg.viterbi_transcript is not None and len(cfg.viterbi_transcript) > 0:
logger.info(f"loading viterbi transcript from {cfg.viterbi_transcript}")
with open(cfg.viterbi_transcript, "r") as vf:
viterbi_transcript = vf.readlines()
viterbi_transcript = [v.rstrip().split() for v in viterbi_transcript]
gen_timer.start()
start = 0
end = len(itr)
hypo_futures = None
if cfg.w2l_decoder == DecoderType.KALDI:
logger.info("Extracting features")
hypo_futures = []
samples = []
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if "net_input" not in sample or i < start or i >= end:
continue
if "padding_mask" not in sample["net_input"]:
sample["net_input"]["padding_mask"] = None
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
hypo_futures.append(hypos)
samples.append(sample)
itr = list(zip(hypo_futures, samples))
start = 0
end = len(itr)
logger.info("Finished extracting features")
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if i < start or i >= end:
continue
if hypo_futures is not None:
hypos, sample = sample
hypos = [h.result() for h in hypos]
else:
if "net_input" not in sample:
continue
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
for i, sample_id in enumerate(sample["id"].tolist()):
if targets is not None:
target_tokens = targets[sample_id]
elif "target" in sample or "target_label" in sample:
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
else:
target_tokens = None
# Process top predictions
(
errs,
length_hyp,
length,
hyp_pieces,
hyp_words,
) = process_predictions(
cfg,
hypos[i],
tgt_dict,
target_tokens,
res_files,
)
errs_t += errs
lengths_hyp_t += length_hyp
lengths_hyp_unit_t += (
len(hyp_pieces) if len(hyp_pieces) > 0 else len(hyp_words)
)
lengths_t += length
count += 1
all_hyp_pieces.append(hyp_pieces)
all_hyp_words.append(hyp_words)
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
lm_score_sum = 0
if kenlm is not None:
if cfg.unit_lm:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_pieces)
else:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_words)
elif fairseq_lm is not None:
lm_score_sum = sum(fairseq_lm.score([h.split() for h in all_hyp_words])[0])
vt_err_t = 0
vt_length_t = 0
if viterbi_transcript is not None:
unit_hyps = []
if cfg.targets is not None and cfg.lexicon is not None:
lex = {}
with open(cfg.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
lex[items[0]] = items[1:]
for h in all_hyp_pieces:
hyp_ws = []
for w in h.split():
assert w in lex, w
hyp_ws.extend(lex[w])
unit_hyps.append(hyp_ws)
else:
unit_hyps.extend([h.split() for h in all_hyp_words])
vt_err_t = sum(
editdistance.eval(vt, h) for vt, h in zip(viterbi_transcript, unit_hyps)
)
vt_length_t = sum(len(h) for h in viterbi_transcript)
if res_files is not None:
for r in res_files.values():
r.close()
gen_timer.stop(lengths_hyp_t)
return GenResult(
count,
errs_t,
gen_timer,
lengths_hyp_unit_t,
lengths_hyp_t,
lengths_t,
lm_score_sum,
num_feats,
num_sentences,
num_symbols,
vt_err_t,
vt_length_t,
)
def gen_hypos(generator, models, num_feats, sample, task, use_cuda):
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "features" in sample["net_input"]:
sample["net_input"]["dense_x_only"] = True
num_feats += (
sample["net_input"]["features"].shape[0]
* sample["net_input"]["features"].shape[1]
)
hypos = task.inference_step(generator, models, sample, None)
return hypos, num_feats
def main(cfg: UnsupGenerateConfig, model=None):
if (
cfg.fairseq.dataset.max_tokens is None
and cfg.fairseq.dataset.batch_size is None
):
cfg.fairseq.dataset.max_tokens = 1024000
use_cuda = torch.cuda.is_available() and not cfg.fairseq.common.cpu
task = tasks.setup_task(cfg.fairseq.task)
overrides = ast.literal_eval(cfg.fairseq.common_eval.model_overrides)
if cfg.fairseq.task._name == "unpaired_audio_text":
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
"blank_is_sil": cfg.sil_is_blank,
"no_softmax": True,
"segmentation": {
"type": "NONE",
},
}
else:
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
}
if cfg.decode_stride:
overrides["model"]["generator_stride"] = cfg.decode_stride
if model is None:
# Load ensemble
logger.info("| loading model(s) from {}".format(cfg.fairseq.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
cfg.fairseq.common_eval.path.split("\\"),
arg_overrides=overrides,
task=task,
suffix=cfg.fairseq.checkpoint.checkpoint_suffix,
strict=(cfg.fairseq.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.fairseq.checkpoint.checkpoint_shard_count,
)
optimize_models(cfg, use_cuda, models)
else:
models = [model]
saved_cfg = cfg.fairseq
with open_dict(saved_cfg.task):
saved_cfg.task.shuffle = False
saved_cfg.task.sort_by_length = False
gen_result = generate(cfg, models, saved_cfg, use_cuda)
wer = None
if gen_result.lengths_t > 0:
wer = gen_result.errs_t * 100.0 / gen_result.lengths_t
logger.info(f"WER: {wer}")
lm_ppl = float("inf")
if gen_result.lm_score_t != 0 and gen_result.lengths_hyp_t > 0:
hyp_len = gen_result.lengths_hyp_t
lm_ppl = math.pow(
10, -gen_result.lm_score_t / (hyp_len + gen_result.num_sentences)
)
logger.info(f"LM PPL: {lm_ppl}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
" sentences/s, {:.2f} tokens/s)".format(
gen_result.num_sentences,
gen_result.gen_timer.n,
gen_result.gen_timer.sum,
gen_result.num_sentences / gen_result.gen_timer.sum,
1.0 / gen_result.gen_timer.avg,
)
)
vt_diff = None
if gen_result.vt_length_t > 0:
vt_diff = gen_result.vt_err_t / gen_result.vt_length_t
vt_diff = max(cfg.min_vt_uer, vt_diff)
lm_ppl = max(cfg.min_lm_ppl, lm_ppl)
if not cfg.unsupervised_tuning:
weighted_score = wer
else:
weighted_score = math.log(lm_ppl) * (vt_diff or 1.0)
res = (
f"| Generate {cfg.fairseq.dataset.gen_subset} with beam={cfg.beam}, "
f"lm_weight={cfg.kaldi_decoder_config.acoustic_scale if cfg.kaldi_decoder_config else cfg.lm_weight}, "
f"word_score={cfg.word_score}, sil_weight={cfg.sil_weight}, blank_weight={cfg.blank_weight}, "
f"WER: {wer}, LM_PPL: {lm_ppl}, num feats: {gen_result.num_feats}, "
f"length: {gen_result.lengths_hyp_t}, UER to viterbi: {(vt_diff or 0) * 100}, score: {weighted_score}"
)
logger.info(res)
# print(res)
return task, weighted_score
@hydra.main(
config_path=os.path.join("../../..", "fairseq", "config"), config_name="config"
)
def hydra_main(cfg):
with open_dict(cfg):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
cfg.job_logging_cfg = OmegaConf.to_container(
HydraConfig.get().job_logging, resolve=True
)
cfg = OmegaConf.create(
OmegaConf.to_container(cfg, resolve=False, enum_to_str=False)
)
OmegaConf.set_struct(cfg, True)
logger.info(cfg)
utils.import_user_module(cfg.fairseq.common)
_, score = main(cfg)
if cfg.is_ax:
return score, None
return score
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=UnsupGenerateConfig)
hydra_main()
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/w2vu_generate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .unpaired_audio_text import UnpairedAudioText
__all__ = [
"UnpairedAudioText",
]
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/tasks/__init__.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from dataclasses import dataclass, field
import logging
import math
import os
from typing import Optional
import torch
from fairseq.logging import metrics
from fairseq.tasks import FairseqTask, register_task
from ..data import ExtractedFeaturesDataset, RandomInputDataset
from fairseq.data import (
Dictionary,
data_utils,
StripTokenDataset,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed.utils import get_data_parallel_world_size
from omegaconf import MISSING
from examples.speech_recognition.kaldi.kaldi_decoder import (
KaldiDecoder,
KaldiDecoderConfig,
)
logger = logging.getLogger(__name__)
@dataclass
class DecodingConfig(FairseqDataclass):
kenlm_path: Optional[str] = None
lm_weight: float = 0
blank_weight: float = 0
@dataclass
class UnpairedAudioTextConfig(FairseqDataclass):
data: str = field(
default=MISSING, metadata={"help": "path to data directory containing audio"}
)
text_data: str = field(
default=MISSING, metadata={"help": "path to data directory containing text"}
)
max_length: Optional[int] = None
labels: Optional[str] = field(
default=None,
metadata={"help": "extension of the label file to load, used for fine-tuning"},
)
aux_target_postfix: Optional[str] = field(
default=None,
metadata={"help": "auxaliry target filename extension"},
)
unfiltered: bool = field(
default=False, metadata={"help": "load data with _unfiltered suffix"}
)
ctc_eval: bool = field(
default=False, metadata={"help": "eval UER as if computed by CTC"}
)
sort_by_length: bool = field(
default=True, metadata={"help": "sort examples by length of audio timesteps"}
)
shuffle: bool = field(default=True, metadata={"help": "shuffle examples"})
append_eos: bool = field(default=False, metadata={"help": "append eos"})
uppercase: Optional[bool] = field(
default=False, metadata={"help": "uppercase for LM score computation"}
)
skipwords: Optional[str] = field(
default="",
metadata={
"help": "comma-separated words to be removed for LM score computation"
},
)
kenlm_path: Optional[str] = None
vocab_usage_power: float = 2
word_decoder_config: Optional[KaldiDecoderConfig] = None
word_kenlm_path: Optional[str] = None
decoding_config: DecodingConfig = DecodingConfig()
@register_task("unpaired_audio_text", dataclass=UnpairedAudioTextConfig)
class UnpairedAudioText(FairseqTask):
""" """
cfg: UnpairedAudioTextConfig
def __init__(
self,
cfg: UnpairedAudioTextConfig,
source_dictionary=None,
target_dictionary=None,
):
super().__init__(cfg)
self._target_dictionary = target_dictionary
self._source_dictionary = source_dictionary
self.num_symbols = (
len([s for s in target_dictionary.symbols if not s.startswith("madeup")])
- target_dictionary.nspecial
)
self.sil_id = (
target_dictionary.index("<SIL>") if "<SIL>" in target_dictionary else -1
)
self.kenlm = None
if cfg.kenlm_path is not None:
import kenlm
self.kenlm = kenlm.Model(cfg.kenlm_path)
self.word_kenlm = None
if cfg.word_kenlm_path is not None:
import kenlm
self.word_kenlm = kenlm.Model(cfg.word_kenlm_path)
self.uppercase = cfg.uppercase
self.skipwords = set(cfg.skipwords.split(","))
def str_postprocess(s):
s = " ".join(w for w in s.split() if w not in self.skipwords)
s = s.upper() if self.uppercase else s
return s
self.str_postprocess = str_postprocess
self.compute_lm_score = lambda s: self.kenlm.score(self.str_postprocess(s))
self.compute_word_score = None
if cfg.word_decoder_config is not None:
self.kaldi_decoder = KaldiDecoder(cfg.word_decoder_config, beam=10)
def compute_word_score(logits, padding):
res = self.kaldi_decoder.decode(logits, padding)
for r in res:
r = r.result()
assert len(r) == 1
r = r[0]
yield r["score"], r["words"]
self.compute_word_score = compute_word_score
@classmethod
def setup_task(cls, cfg: UnpairedAudioTextConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
dict_path = os.path.join(cfg.text_data, "dict.txt")
if os.path.exists(dict_path):
target_dictionary = Dictionary.load(dict_path)
else:
dict_path = os.path.join(cfg.data, f"dict.{cfg.labels}.txt")
target_dictionary = Dictionary.load(dict_path)
return cls(cfg, target_dictionary=target_dictionary)
def optimizer_step(self, optimizer, model, update_num):
if hasattr(model, "get_groups_for_update"):
groups = model.get_groups_for_update(update_num)
optimizer.step(groups={groups})
else:
optimizer.step()
def valid_step(self, sample, model, criterion):
res = model(
**sample["net_input"],
dense_x_only=True,
)
dense_x = res["logits"]
padding_mask = res["padding_mask"]
word_scores = None
if self.compute_word_score is not None:
word_scores = self.compute_word_score(dense_x.cpu(), padding_mask.cpu())
z = dense_x.argmax(-1)
z[padding_mask] = self.target_dictionary.pad()
vocab_seen = torch.zeros(self.num_symbols, dtype=torch.bool)
import editdistance
c_err = 0
c_len = 0
pred_c_len = 0
lm_score_sum = 0
for i, (x, t, id) in enumerate(
zip(
z,
sample["target"] if "target" in sample else [None] * len(z),
sample["id"],
)
):
if t is not None:
t = t[(t >= self.target_dictionary.nspecial)]
x = x[
(x >= self.target_dictionary.nspecial)
& (x < (self.num_symbols + self.target_dictionary.nspecial))
]
if self.sil_id >= 0:
x = x[x != self.sil_id]
vocab_seen[x - self.target_dictionary.nspecial] = True
pred_units_arr = x
if self.cfg.ctc_eval:
pred_units_arr = pred_units_arr.unique_consecutive()
pred_units_arr = pred_units_arr[pred_units_arr != 0]
if id == 0:
if t is not None:
logger.info(f"REF: {self.target_dictionary.string(t)}")
logger.info(f"HYP: {self.target_dictionary.string(pred_units_arr)}")
if self.kenlm is not None:
if t is not None:
ref_lm_s = self.compute_lm_score(
self.target_dictionary.string(t)
)
logger.info(
f"LM [REF]: {ref_lm_s}, {math.pow(10, -ref_lm_s / (len(t) + 1))}"
)
hyp_lm_s = self.compute_lm_score(
self.target_dictionary.string(pred_units_arr)
)
logger.info(
f"LM [HYP]: {hyp_lm_s}, {math.pow(10, -hyp_lm_s / (len(pred_units_arr) + 1))}"
)
pred_units_arr = pred_units_arr.tolist()
pred_c_len += len(pred_units_arr)
if t is not None:
t = t.tolist()
c_err += editdistance.eval(pred_units_arr, t)
c_len += len(t)
else:
c_len = pred_c_len
if self.kenlm is not None:
pred_str = self.target_dictionary.string(pred_units_arr)
lm_score = self.compute_lm_score(pred_str)
lm_score_sum += lm_score
kaldi_score_sum = 0
word_lm_sum = 0
num_words = 0
if word_scores is not None:
for score, words in word_scores:
kaldi_score_sum += score
num_words += len(words)
if self.word_kenlm is not None:
word_lm_sum += self.kenlm.score(" ".join(words))
try:
world_size = get_data_parallel_world_size()
except:
world_size = 1
logging_output = {
"loss": c_err,
"_num_char_errors": c_err,
"_num_chars": c_len,
"_num_pred_chars": pred_c_len,
"ntokens": c_len,
"nsentences": z.size(0),
"sample_size": c_len,
"_world_size": world_size,
"_lm_score_sum": lm_score_sum,
"_kaldi_score_sum": kaldi_score_sum,
"_word_lm_sum": word_lm_sum,
"_num_words": num_words,
"_vocab_seen": vocab_seen,
}
return c_err, c_len, logging_output
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
data_path = self.cfg.data
task_cfg = task_cfg or self.cfg
has_unpaired_text = os.path.exists(
os.path.join(self.cfg.text_data, f"{split}.idx")
)
self.datasets[split] = ExtractedFeaturesDataset(
path=data_path,
split=split,
min_length=3,
max_length=task_cfg.max_length,
labels=None if has_unpaired_text else task_cfg.labels,
label_dict=self.target_dictionary,
shuffle=getattr(task_cfg, "shuffle", True),
sort_by_length=task_cfg.sort_by_length,
aux_target_postfix=task_cfg.aux_target_postfix,
)
logger.info(f"split {split} has unpaired text? {has_unpaired_text}")
if has_unpaired_text:
text_dataset = data_utils.load_indexed_dataset(
os.path.join(self.cfg.text_data, split), self.target_dictionary
)
text_dataset = StripTokenDataset(text_dataset, self.target_dictionary.eos())
self.datasets[split] = RandomInputDataset(
self.datasets[split],
text_dataset,
["random_label"],
add_to_input=True,
pad_idx=self.target_dictionary.pad(),
)
@property
def source_dictionary(self):
return self._source_dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self._target_dictionary
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
zero = torch.scalar_tensor(0.0)
num_char_errors = sum(
log.get("_num_char_errors", zero) for log in logging_outputs
)
num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs)
num_word_errors = sum(
log.get("_num_word_errors", zero) for log in logging_outputs
)
num_words = sum(log.get("_num_words", zero) for log in logging_outputs)
num_pred_chars = sum(
log.get("_num_pred_chars", zero) for log in logging_outputs
)
lm_score_sum = sum(log.get("_lm_score_sum", zero) for log in logging_outputs)
vocab_seen = (
sum(log.get("_vocab_seen", zero) for log in logging_outputs)
.bool()
.sum()
.item()
)
kaldi_score_sum = sum(
log.get("_kaldi_score_sum", zero) for log in logging_outputs
)
word_lm_sum = sum(log.get("_word_lm_sum", zero) for log in logging_outputs)
metrics.log_scalar_sum("_num_char_errors", num_char_errors)
metrics.log_scalar_sum("_num_chars", num_chars)
metrics.log_scalar_sum("_num_word_errors", num_word_errors)
metrics.log_scalar_sum("_num_words", num_words)
metrics.log_scalar_sum("lm_score_sum", lm_score_sum)
metrics.log_scalar_sum("num_pred_chars", num_pred_chars)
if self.cfg.word_kenlm_path is not None:
metrics.log_scalar_sum("kaldi_score_sum", kaldi_score_sum)
metrics.log_scalar_sum("word_lm_sum", word_lm_sum)
if num_chars > 0:
metrics.log_derived(
"uer",
lambda meters: meters["_num_char_errors"].sum
* 100.0
/ meters["_num_chars"].sum
if meters["_num_chars"].sum > 0
else float("nan"),
)
if lm_score_sum < 0 and vocab_seen > 0:
metrics.log_scalar("vocab_seen_pct", vocab_seen / self.num_symbols)
metrics.log_derived(
"weighted_lm_ppl",
lambda meters: math.pow(
10,
-meters["lm_score_sum"].sum
/ (
meters["num_pred_chars"].sum + meters["nsentences"].sum
), # account for </s>
)
/ meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power,
)
metrics.log_derived(
"lm_ppl",
lambda meters: math.pow(
10,
-meters["lm_score_sum"].sum
/ (
meters["num_pred_chars"].sum + meters["nsentences"].sum
), # account for </s>
),
)
else:
metrics.log_derived("weighted_lm_ppl", lambda meters: float("inf"))
if num_words > 0:
if word_lm_sum != 0:
metrics.log_derived(
"word_lm_ppl",
lambda meters: math.pow(
10,
-meters["word_lm_sum"].sum
/ (
meters["_num_words"].sum + meters["nsentences"].sum
), # account for </s>
),
)
metrics.log_derived(
"weighted_word_lm_ppl",
lambda meters: math.pow(
10,
-meters["word_lm_sum"].sum
/ (
meters["_num_words"].sum + meters["nsentences"].sum
), # account for </s>
)
/ meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power,
)
if self.cfg.word_kenlm_path is not None:
metrics.log_derived(
"kaldi_score",
lambda meters: meters["kaldi_score_sum"].sum
/ meters["nsentences"].sum,
)
def build_model(self, cfg: FairseqDataclass, from_checkpoint=False):
model = super().build_model(cfg)
return model
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/tasks/unpaired_audio_text.py |
import kaldi_io
import numpy as np
import os
def get_parser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("w2v_dir", help="wav2vec feature and text directory")
parser.add_argument("tar_root", help="output data directory in kaldi's format")
parser.add_argument("split", help="name of the subset")
parser.add_argument("--label", default="", help="if specified, copy labels too")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
tar_dir = os.path.join(args.tar_root, args.split)
os.makedirs(tar_dir, exist_ok=True)
lengths_path = os.path.join(args.w2v_dir, f"{args.split}.lengths")
with open(lengths_path) as f:
lengths = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengths[:-1]).tolist()
feats = np.load(
os.path.join(args.w2v_dir, f"{args.split}.npy"),
mmap_mode="r"
)
assert feats.shape[0] == sum(lengths), \
f"lengths mismatch {feats.shape[0]} != {sum(lengths)}"
ark_path = os.path.join(tar_dir, "feats.ark")
scp_path = os.path.join(tar_dir, "feats.scp")
wspec = f"ark:| copy-feats --compress=true ark:- ark,scp:{ark_path},{scp_path}"
with kaldi_io.open_or_fd(wspec, "wb") as f:
for idx, (offset, length) in enumerate(zip(offsets, lengths)):
feat = feats[offset:offset+length]
kaldi_io.write_mat(f, feat, key=f"utt{idx:010d}")
u2s_path = os.path.join(tar_dir, "utt2spk")
s2u_path = os.path.join(tar_dir, "spk2utt")
with open(u2s_path, "w") as f_u2s, open(s2u_path, "w") as f_s2u:
for idx in range(len(lengths)):
f_u2s.write(f"utt{idx:010d} utt{idx:010d}\n")
f_s2u.write(f"utt{idx:010d} utt{idx:010d}\n")
if bool(args.label):
lab_path = os.path.join(args.w2v_dir, f"{args.split}.{args.label}")
txt_path = os.path.join(tar_dir, "text")
with open(lab_path) as f_lab, open(txt_path, "w") as f_txt:
for idx, line in enumerate(f_lab):
f_txt.write(f"utt{idx:010d} {line}")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py |
"""
Implement unsupervised metric for decoding hyperparameter selection:
$$ alpha * LM_PPL + ViterbitUER(%) * 100 $$
"""
import argparse
import logging
import math
import sys
import kenlm
import editdistance
from g2p_en import G2p
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("ref_tra", help="reference pseudo labels")
parser.add_argument("hyp_tra", help="decoded pseudo labels to be assess")
parser.add_argument("--kenlm_path", default="/checkpoint/abaevski/data/speech/libri/librispeech_lm_novox.phnc_o5.bin", help="")
parser.add_argument("--uppercase", action="store_true", help="")
parser.add_argument("--skipwords", default="", help="")
parser.add_argument("--gt_tra", default="", help="ground truth pseudo labels for computing oracle WER")
parser.add_argument("--min_vt_uer", default=0.0, type=float)
parser.add_argument("--phonemize", action="store_true", help="phonemize word hypotheses, used when reference is phone transcript")
parser.add_argument("--phonemize_lexicon", default="", type=str, help="use a lexicon for phonemizing")
return parser
def load_tra(tra_path):
with open(tra_path, "r") as f:
uid_to_tra = {}
for line in f:
toks = line.rstrip().split()
uid, tra = toks[0], " ".join(toks[1:])
uid_to_tra[uid] = tra
logger.debug(f"loaded {len(uid_to_tra)} utterances from {tra_path}")
return uid_to_tra
def load_lex(lex_path):
with open(lex_path, "r") as f:
w2p = {}
for line in f:
w, p = line.rstrip().split(None, 1)
w2p[w] = p.split()
return w2p
def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p, g2p_dict):
d_cnt = 0
w_cnt = 0
w_cnt_h = 0
for uid in hyp_uid_to_tra:
ref = ref_uid_to_tra[uid].split()
if g2p_dict is not None:
hyp = []
for word in hyp_uid_to_tra[uid].split():
if word in g2p_dict:
hyp = hyp + g2p_dict[word]
else:
logger.warning(f"{word} not in g2p_dict")
elif g2p is not None:
hyp = g2p(hyp_uid_to_tra[uid])
hyp = [p for p in hyp if p != "'" and p != " "]
hyp = [p[:-1] if p[-1].isnumeric() else p for p in hyp]
else:
hyp = hyp_uid_to_tra[uid].split()
logger.debug((
f"======================\n"
f"HYP: {' '.join(hyp)}\n"
f"REF: {' '.join(ref)}"
))
d_cnt += editdistance.eval(ref, hyp)
w_cnt += len(ref)
w_cnt_h += len(hyp)
wer = float(d_cnt) / w_cnt
logger.debug((
f"wer = {wer*100:.2f}%; num. of ref words = {w_cnt}; "
f"num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}"
))
return wer
def compute_lm_ppl(hyp_uid_to_tra, score_fn):
lm_score = 0.
w_cnt = 0
for hyp in hyp_uid_to_tra.values():
cur_score = score_fn(hyp)
cur_cnt = len(hyp.split()) + 1 # plus one for </s>
lm_score += cur_score
w_cnt += cur_cnt
logger.debug((
f"======================\n"
f"score sum/avg = {cur_score:.2f}/{cur_score/cur_cnt:.2f}\n"
f"hyp = {hyp}"
))
lm_ppl = math.pow(10, -lm_score / w_cnt)
logger.debug(f"lm ppl = {lm_ppl:.2f}; num. of words = {w_cnt}")
return lm_ppl
def main():
args = get_parser().parse_args()
logger.debug(f"Args: {args}")
ref_uid_to_tra = load_tra(args.ref_tra)
hyp_uid_to_tra = load_tra(args.hyp_tra)
assert not bool(set(hyp_uid_to_tra.keys()) - set(ref_uid_to_tra.keys()))
lm = kenlm.Model(args.kenlm_path)
skipwords = set(args.skipwords.split(","))
def compute_lm_score(s):
s = " ".join(w for w in s.split() if w not in skipwords)
s = s.upper() if args.uppercase else s
return lm.score(s)
g2p, g2p_dict = None, None
if args.phonemize:
if args.phonemize_lexicon:
g2p_dict = load_lex(args.phonemize_lexicon)
else:
g2p = G2p()
wer = compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p, g2p_dict)
lm_ppl = compute_lm_ppl(hyp_uid_to_tra, compute_lm_score)
gt_wer = -math.inf
if args.gt_tra:
gt_uid_to_tra = load_tra(args.gt_tra)
gt_wer = compute_wer(gt_uid_to_tra, hyp_uid_to_tra, None, None)
score = math.log(lm_ppl) * max(wer, args.min_vt_uer)
logging.info(f"{args.hyp_tra}: score={score:.4f}; wer={wer*100:.2f}%; lm_ppl={lm_ppl:.4f}; gt_wer={gt_wer*100:.2f}%")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select.py |
import sys
for idx, line in enumerate(sys.stdin):
print(f"utt{idx:010d} {line}", end='') | EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import Enum, auto
import math
import numpy as np
from typing import Tuple, List, Optional, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
from fairseq import checkpoint_utils, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
SamePad,
TransposeLast,
)
class SegmentationType(Enum):
NONE = auto()
RANDOM = auto()
UNIFORM_RANDOM = auto()
UNIFORM_RANDOM_JOIN = auto()
JOIN = auto()
@dataclass
class SegmentationConfig(FairseqDataclass):
type: SegmentationType = SegmentationType.NONE
subsample_rate: float = 0.25
mean_pool: bool = True
mean_pool_join: bool = False
remove_zeros: bool = False
@dataclass
class Wav2vec_UConfig(FairseqDataclass):
discriminator_kernel: int = 3
discriminator_dilation: int = 1
discriminator_dim: int = 256
discriminator_causal: bool = True
discriminator_linear_emb: bool = False
discriminator_depth: int = 1
discriminator_max_pool: bool = False
discriminator_act_after_linear: bool = False
discriminator_dropout: float = 0.0
discriminator_spectral_norm: bool = False
discriminator_weight_norm: bool = False
generator_kernel: int = 4
generator_dilation: int = 1
generator_stride: int = 1
generator_pad: int = -1
generator_bias: bool = False
generator_dropout: float = 0.0
generator_batch_norm: int = 0
generator_residual: bool = False
blank_weight: float = 0
blank_mode: str = "add"
blank_is_sil: bool = False
no_softmax: bool = False
smoothness_weight: float = 0.0
smoothing: float = 0.0
smoothing_one_sided: bool = False
gradient_penalty: float = 0.0
probabilistic_grad_penalty_slicing: bool = False
code_penalty: float = 0.0
mmi_weight: float = 0.0
target_dim: int = 64
target_downsample_rate: int = 2
gumbel: bool = False
hard_gumbel: bool = True
temp: Tuple[float, float, float] = (2, 0.1, 0.99995)
input_dim: int = 128
segmentation: SegmentationConfig = SegmentationConfig()
class Segmenter(nn.Module):
cfg: SegmentationConfig
def __init__(self, cfg: SegmentationConfig):
super().__init__()
self.cfg = cfg
self.subsample_rate = cfg.subsample_rate
def pre_segment(self, dense_x, dense_padding_mask):
return dense_x, dense_padding_mask
def logit_segment(self, logits, padding_mask):
return logits, padding_mask
class RandomSegmenter(Segmenter):
def pre_segment(self, dense_x, dense_padding_mask):
target_num = math.ceil(dense_x.size(1) * self.subsample_rate)
ones = torch.ones(dense_x.shape[:-1], device=dense_x.device)
indices, _ = ones.multinomial(target_num).sort(dim=-1)
indices_ld = indices.unsqueeze(-1).expand(-1, -1, dense_x.size(-1))
dense_x = dense_x.gather(1, indices_ld)
dense_padding_mask = dense_padding_mask.gather(1, index=indices)
return dense_x, dense_padding_mask
class UniformRandomSegmenter(Segmenter):
def pre_segment(self, dense_x, dense_padding_mask):
bsz, tsz, fsz = dense_x.shape
target_num = math.ceil(tsz * self.subsample_rate)
rem = tsz % target_num
if rem > 0:
dense_x = F.pad(dense_x, [0, 0, 0, target_num - rem])
dense_padding_mask = F.pad(
dense_padding_mask, [0, target_num - rem], value=True
)
dense_x = dense_x.view(bsz, target_num, -1, fsz)
dense_padding_mask = dense_padding_mask.view(bsz, target_num, -1)
if self.cfg.mean_pool:
dense_x = dense_x.mean(dim=-2)
dense_padding_mask = dense_padding_mask.all(dim=-1)
else:
ones = torch.ones((bsz, dense_x.size(2)), device=dense_x.device)
indices = ones.multinomial(1)
indices = indices.unsqueeze(-1).expand(-1, target_num, -1)
indices_ld = indices.unsqueeze(-1).expand(-1, -1, -1, fsz)
dense_x = dense_x.gather(2, indices_ld).reshape(bsz, -1, fsz)
dense_padding_mask = dense_padding_mask.gather(2, index=indices).reshape(
bsz, -1
)
return dense_x, dense_padding_mask
class JoinSegmenter(Segmenter):
def logit_segment(self, logits, padding_mask):
preds = logits.argmax(dim=-1)
if padding_mask.any():
preds[padding_mask] = -1 # mark pad
uniques = []
bsz, tsz, csz = logits.shape
for p in preds:
uniques.append(
p.cpu().unique_consecutive(return_inverse=True, return_counts=True)
)
new_tsz = max(u[0].numel() for u in uniques)
new_logits = logits.new_zeros(bsz, new_tsz, csz)
new_pad = padding_mask.new_zeros(bsz, new_tsz)
for b in range(bsz):
u, idx, c = uniques[b]
keep = u != -1
if self.cfg.remove_zeros:
keep.logical_and_(u != 0)
if self.training and not self.cfg.mean_pool_join:
u[0] = 0
u[1:] = c.cumsum(0)[:-1]
m = c > 1
r = torch.rand(m.sum())
o = (c[m] * r).long()
u[m] += o
new_logits[b, : u.numel()] = logits[b, u]
else:
new_logits[b].index_add_(
dim=0, index=idx.to(new_logits.device), source=logits[b]
)
new_logits[b, : c.numel()] /= c.unsqueeze(-1).to(new_logits.device)
new_sz = keep.sum()
if not keep.all():
kept_logits = new_logits[b, : c.numel()][keep]
new_logits[b, :new_sz] = kept_logits
if new_sz < new_tsz:
pad = new_tsz - new_sz
new_logits[b, -pad:] = 0
new_pad[b, -pad:] = True
return new_logits, new_pad
class UniformRandomJoinSegmenter(UniformRandomSegmenter, JoinSegmenter):
pass
SEGMENT_FACTORY = {
SegmentationType.NONE: Segmenter,
SegmentationType.RANDOM: RandomSegmenter,
SegmentationType.UNIFORM_RANDOM: UniformRandomSegmenter,
SegmentationType.UNIFORM_RANDOM_JOIN: UniformRandomJoinSegmenter,
SegmentationType.JOIN: JoinSegmenter,
}
class Discriminator(nn.Module):
def __init__(self, dim, cfg: Wav2vec_UConfig):
super().__init__()
inner_dim = cfg.discriminator_dim
kernel = cfg.discriminator_kernel
dilation = cfg.discriminator_dilation
self.max_pool = cfg.discriminator_max_pool
if cfg.discriminator_causal:
padding = kernel - 1
else:
padding = kernel // 2
def make_conv(in_d, out_d, k, p=0, has_dilation=True):
conv = nn.Conv1d(
in_d,
out_d,
kernel_size=k,
padding=p,
dilation=dilation if has_dilation else 1,
)
if cfg.discriminator_spectral_norm:
conv = nn.utils.spectral_norm(conv)
elif cfg.discriminator_weight_norm:
conv = nn.utils.weight_norm(conv)
return conv
inner_net = [
nn.Sequential(
make_conv(inner_dim, inner_dim, kernel, padding),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
nn.Dropout(cfg.discriminator_dropout),
nn.GELU(),
)
for _ in range(cfg.discriminator_depth - 1)
] + [
make_conv(inner_dim, 1, kernel, padding, has_dilation=False),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
]
if cfg.discriminator_linear_emb:
emb_net = [make_conv(dim, inner_dim, 1)]
else:
emb_net = [
make_conv(dim, inner_dim, kernel, padding),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
]
if cfg.discriminator_act_after_linear:
emb_net.append(nn.GELU())
self.net = nn.Sequential(
*emb_net,
nn.Dropout(cfg.discriminator_dropout),
*inner_net,
)
def forward(self, x, padding_mask):
x = x.transpose(1, 2) # BTC -> BCT
x = self.net(x)
x = x.transpose(1, 2)
x_sz = x.size(1)
if padding_mask is not None and padding_mask.any() and padding_mask.dim() > 1:
padding_mask = padding_mask[:, : x.size(1)]
x[padding_mask] = float("-inf") if self.max_pool else 0
x_sz = x_sz - padding_mask.sum(dim=-1)
x = x.squeeze(-1)
if self.max_pool:
x, _ = x.max(dim=-1)
else:
x = x.sum(dim=-1)
x = x / x_sz
return x
class Generator(nn.Module):
def __init__(self, input_dim, output_dim, cfg: Wav2vec_UConfig):
super().__init__()
self.cfg = cfg
self.output_dim = output_dim
self.stride = cfg.generator_stride
self.dropout = nn.Dropout(cfg.generator_dropout)
self.batch_norm = cfg.generator_batch_norm != 0
self.residual = cfg.generator_residual
padding = (
cfg.generator_kernel // 2 if cfg.generator_pad < 0 else cfg.generator_pad
)
self.proj = nn.Sequential(
TransposeLast(),
nn.Conv1d(
input_dim,
output_dim,
kernel_size=cfg.generator_kernel,
stride=cfg.generator_stride,
dilation=cfg.generator_dilation,
padding=padding,
bias=cfg.generator_bias,
),
TransposeLast(),
)
if self.batch_norm:
self.bn = nn.BatchNorm1d(input_dim)
self.bn.weight.data.fill_(cfg.generator_batch_norm)
if self.residual:
self.in_proj = nn.Linear(input_dim, input_dim)
def forward(self, dense_x, tokens, dense_padding_mask):
result = {}
if self.batch_norm:
dense_x = self.bn_padded_data(dense_x, dense_padding_mask)
if self.residual:
inter_x = self.in_proj(self.dropout(dense_x))
dense_x = dense_x + inter_x
result["inter_x"] = inter_x
dense_x = self.dropout(dense_x)
dense_x = self.proj(dense_x)
if self.stride > 1:
dense_padding_mask = dense_padding_mask[:, :: self.stride]
if dense_padding_mask.size(1) != dense_x.size(1):
new_padding = dense_padding_mask.new_zeros(dense_x.shape[:-1])
diff = new_padding.size(1) - dense_padding_mask.size(1)
if diff > 0:
new_padding[:, diff:] = dense_padding_mask
else:
assert diff < 0
new_padding = dense_padding_mask[:, :diff]
dense_padding_mask = new_padding
token_x = None
if tokens is not None:
token_x = dense_x.new_zeros(tokens.numel(), self.output_dim)
token_x.scatter_(1, tokens.view(-1, 1).long(), 1)
token_x = token_x.view(tokens.shape + (self.output_dim,))
result["dense_x"] = dense_x
result["token_x"] = token_x
result["dense_padding_mask"] = dense_padding_mask
return result
def bn_padded_data(self, feature, padding_mask):
normed_feature = feature.clone()
normed_feature[~padding_mask] = self.bn(
feature[~padding_mask].unsqueeze(-1)
).squeeze(-1)
return normed_feature
@register_model("wav2vec_u", dataclass=Wav2vec_UConfig)
class Wav2vec_U(BaseFairseqModel):
def calc_gradient_penalty(self, real_data, fake_data):
b_size = min(real_data.size(0), fake_data.size(0))
t_size = min(real_data.size(1), fake_data.size(1))
if self.cfg.probabilistic_grad_penalty_slicing:
def get_slice(data, dim, target_size):
size = data.size(dim)
diff = size - target_size
if diff <= 0:
return data
start = np.random.randint(0, diff + 1)
return data.narrow(dim=dim, start=start, length=target_size)
real_data = get_slice(real_data, 0, b_size)
real_data = get_slice(real_data, 1, t_size)
fake_data = get_slice(fake_data, 0, b_size)
fake_data = get_slice(fake_data, 1, t_size)
else:
real_data = real_data[:b_size, :t_size]
fake_data = fake_data[:b_size, :t_size]
alpha = torch.rand(real_data.size(0), 1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.to(real_data.device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
disc_interpolates = self.discriminator(interpolates, None)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size(), device=real_data.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradient_penalty = (gradients.norm(2, dim=1) - 1) ** 2
return gradient_penalty
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.update_num = num_updates
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def discrim_step(self, num_updates):
return num_updates % 2 == 1
def get_groups_for_update(self, num_updates):
return "discriminator" if self.discrim_step(num_updates) else "generator"
def __init__(self, cfg: Wav2vec_UConfig, target_dict):
super().__init__()
self.cfg = cfg
self.zero_index = target_dict.index("<SIL>") if "<SIL>" in target_dict else 0
self.smoothness_weight = cfg.smoothness_weight
output_size = len(target_dict)
self.pad = target_dict.pad()
self.eos = target_dict.eos()
self.smoothing = cfg.smoothing
self.smoothing_one_sided = cfg.smoothing_one_sided
self.no_softmax = cfg.no_softmax
self.gumbel = cfg.gumbel
self.hard_gumbel = cfg.hard_gumbel
self.last_acc = None
self.gradient_penalty = cfg.gradient_penalty
self.code_penalty = cfg.code_penalty
self.mmi_weight = cfg.mmi_weight
self.blank_weight = cfg.blank_weight
self.blank_mode = cfg.blank_mode
self.blank_index = target_dict.index("<SIL>") if cfg.blank_is_sil else 0
assert self.blank_index != target_dict.unk()
self.discriminator = Discriminator(output_size, cfg)
for p in self.discriminator.parameters():
p.param_group = "discriminator"
self.pca_A = self.pca_b = None
d = cfg.input_dim
self.segmenter = SEGMENT_FACTORY[cfg.segmentation.type](cfg.segmentation)
self.generator = Generator(d, output_size, cfg)
for p in self.generator.parameters():
p.param_group = "generator"
for p in self.segmenter.parameters():
p.param_group = "generator"
self.max_temp, self.min_temp, self.temp_decay = cfg.temp
self.curr_temp = self.max_temp
self.update_num = 0
if self.mmi_weight > 0:
self.target_downsample_rate = cfg.target_downsample_rate
self.decoder = nn.Linear(d, cfg.target_dim)
for p in self.decoder.parameters():
p.param_group = "generator"
@classmethod
def build_model(cls, cfg, task):
return cls(cfg, task.target_dictionary)
def get_logits(
self,
net_output: Optional[Dict[str, List[Optional[torch.Tensor]]]],
normalize: bool = False,
):
logits = net_output["logits"]
if self.blank_weight != 0:
if self.blank_mode == "add":
logits[..., self.blank_index] += self.blank_weight
elif self.blank_mode == "set":
logits[..., self.blank_index] = self.blank_weight
else:
raise Exception(f"invalid blank mode {self.blank_mode}")
padding = net_output["padding_mask"]
if padding.any():
logits[padding] = float("-inf")
logits[padding][..., self.blank_index] = float("inf")
if normalize:
logits = utils.log_softmax(logits.float(), dim=-1)
return logits.transpose(0, 1)
def get_normalized_probs(
self,
net_output: Tuple[
torch.Tensor, Optional[Dict[str, List[Optional[torch.Tensor]]]]
],
log_probs: bool,
sample: Optional[Dict[str, torch.Tensor]] = None,
):
logits = self.get_logits(net_output)
probs = super().get_normalized_probs(logits, log_probs, sample)
# BTC -> TBC for ctc
probs = probs.transpose(0, 1)
return probs
def normalize(self, dense_x):
bsz, tsz, csz = dense_x.shape
if dense_x.numel() == 0:
raise Exception(dense_x.shape)
_, k = dense_x.max(-1)
hard_x = (
dense_x.new_zeros(bsz * tsz, csz)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(-1, csz)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
code_perplexity = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
)
avg_probs = torch.softmax(dense_x.reshape(-1, csz).float(), dim=-1).mean(dim=0)
prob_perplexity = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
)
if not self.no_softmax:
if self.training and self.gumbel:
dense_x = F.gumbel_softmax(
dense_x.float(), tau=self.curr_temp, hard=self.hard_gumbel
).type_as(dense_x)
else:
dense_x = dense_x.softmax(-1)
return dense_x, code_perplexity, prob_perplexity
def forward(
self,
features,
padding_mask,
random_label=None,
dense_x_only=False,
segment=True,
aux_target=None,
):
if segment:
features, padding_mask = self.segmenter.pre_segment(features, padding_mask)
orig_size = features.size(0) * features.size(1) - padding_mask.sum()
gen_result = self.generator(features, random_label, padding_mask)
orig_dense_x, token_x = gen_result["dense_x"], gen_result["token_x"]
orig_dense_padding_mask = gen_result["dense_padding_mask"]
if segment:
dense_x, dense_padding_mask = self.segmenter.logit_segment(
orig_dense_x, orig_dense_padding_mask
)
else:
dense_x = orig_dense_x
dense_padding_mask = orig_dense_padding_mask
dense_logits = dense_x
prob_perplexity = None
code_perplexity = None
if not (self.no_softmax and dense_x_only):
dense_x, code_perplexity, prob_perplexity = self.normalize(dense_logits)
if dense_x_only or self.discriminator is None:
return {
"logits": dense_x,
"padding_mask": dense_padding_mask,
}
token_padding_mask = random_label == self.pad
dense_y = self.discriminator(dense_x, dense_padding_mask)
token_y = self.discriminator(token_x, token_padding_mask)
sample_size = features.size(0)
d_step = self.discrim_step(self.update_num)
fake_smooth = self.smoothing
real_smooth = self.smoothing
if self.smoothing_one_sided:
fake_smooth = 0
zero_loss = None
smoothness_loss = None
code_pen = None
mmi_loss = None
if d_step:
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_ones(dense_y.shape) - fake_smooth,
reduction="sum",
)
loss_token = F.binary_cross_entropy_with_logits(
token_y,
token_y.new_zeros(token_y.shape) + real_smooth,
reduction="sum",
)
if self.training and self.gradient_penalty > 0:
grad_pen = self.calc_gradient_penalty(token_x, dense_x)
grad_pen = grad_pen.sum() * self.gradient_penalty
else:
grad_pen = None
else:
grad_pen = None
loss_token = None
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_zeros(dense_y.shape) + fake_smooth,
reduction="sum",
)
num_vars = dense_x.size(-1)
if prob_perplexity is not None:
code_pen = (num_vars - prob_perplexity) / num_vars
code_pen = code_pen * sample_size * self.code_penalty
if self.smoothness_weight > 0:
smoothness_loss = F.mse_loss(
dense_logits[:, :-1], dense_logits[:, 1:], reduction="none"
)
smoothness_loss[dense_padding_mask[:, 1:]] = 0
smoothness_loss = (
smoothness_loss.mean() * sample_size * self.smoothness_weight
)
if (self.mmi_weight > 0) and (aux_target is not None):
inter_x = self.decoder(gen_result["inter_x"])
if self.target_downsample_rate > 1:
aux_target = aux_target[:, :: self.target_downsample_rate]
max_t_len = min(aux_target.shape[1], inter_x.shape[1])
mmi_loss = F.cross_entropy(
inter_x[:, :max_t_len].transpose(1, 2),
aux_target[:, :max_t_len],
ignore_index=-1,
reduction="none",
)
mmi_loss = mmi_loss.mean() * mmi_loss.shape[0] * self.mmi_weight
result = {
"losses": {
"grad_pen": grad_pen,
"code_pen": code_pen,
"smoothness": smoothness_loss,
"mmi": mmi_loss,
},
"temp": self.curr_temp,
"code_ppl": code_perplexity,
"prob_ppl": prob_perplexity,
"d_steps": int(d_step),
"sample_size": sample_size,
}
suff = "_d" if d_step else "_g"
result["losses"]["dense" + suff] = loss_dense
result["losses"]["token" + suff] = loss_token
return result
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/models/wav2vec_u.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .wav2vec_u import Wav2vec_U
__all__ = [
"Wav2vec_U",
]
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/models/__init__.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from copy import deepcopy
from scipy.signal import lfilter
import numpy as np
from tqdm import tqdm
import soundfile as sf
import os.path as osp
def get_parser():
parser = argparse.ArgumentParser(description="compute vad segments")
parser.add_argument(
"--rvad-home",
"-r",
help="path to rvad home (see https://github.com/zhenghuatan/rVADfast)",
required=True,
)
return parser
def rvad(speechproc, path):
winlen, ovrlen, pre_coef, nfilter, nftt = 0.025, 0.01, 0.97, 20, 512
ftThres = 0.5
vadThres = 0.4
opts = 1
data, fs = sf.read(path)
assert fs == 16_000, "sample rate must be 16khz"
ft, flen, fsh10, nfr10 = speechproc.sflux(data, fs, winlen, ovrlen, nftt)
# --spectral flatness --
pv01 = np.zeros(ft.shape[0])
pv01[np.less_equal(ft, ftThres)] = 1
pitch = deepcopy(ft)
pvblk = speechproc.pitchblockdetect(pv01, pitch, nfr10, opts)
# --filtering--
ENERGYFLOOR = np.exp(-50)
b = np.array([0.9770, -0.9770])
a = np.array([1.0000, -0.9540])
fdata = lfilter(b, a, data, axis=0)
# --pass 1--
noise_samp, noise_seg, n_noise_samp = speechproc.snre_highenergy(
fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk
)
# sets noisy segments to zero
for j in range(n_noise_samp):
fdata[range(int(noise_samp[j, 0]), int(noise_samp[j, 1]) + 1)] = 0
vad_seg = speechproc.snre_vad(
fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk, vadThres
)
return vad_seg, data
def main():
parser = get_parser()
args = parser.parse_args()
sys.path.append(args.rvad_home)
import speechproc
stride = 160
lines = sys.stdin.readlines()
root = lines[0].rstrip()
for fpath in tqdm(lines[1:]):
path = osp.join(root, fpath.split()[0])
vads, wav = rvad(speechproc, path)
start = None
vad_segs = []
for i, v in enumerate(vads):
if start is None and v == 1:
start = i * stride
elif start is not None and v == 0:
vad_segs.append((start, i * stride))
start = None
if start is not None:
vad_segs.append((start, len(wav)))
print(" ".join(f"{v[0]}:{v[1]}" for v in vad_segs))
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/vads.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import tqdm
import torch
import sys
import faiss
import torch.nn.functional as F
from wav2vec_cluster_faiss import parse_faiss_specs, Wav2VecFeatureReader
def get_parser():
parser = argparse.ArgumentParser(description="apply clusters")
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--split', help='split to process', required=True)
parser.add_argument('--labels', help='split to process', default="phn")
parser.add_argument('--path', help='path to pca and centroids', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True)
parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14)
parser.add_argument('--max-tsz', type=int, help='batch kmeans up to this much', default=14)
# fmt: on
return parser
def get_iterator(args):
label_path = osp.join(args.data, f"{args.split}.{args.labels}")
if osp.exists(label_path):
lp = open(label_path, "r")
else:
lp = None
with open(osp.join(args.data, f"{args.split}.tsv"), "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [line.rstrip() for line in lines if len(line) > 0]
if lp is not None:
lbls = [line.rstrip() for line in lp]
else:
lbls = [None] * len(files)
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname, lbl in zip(files, lbls):
file = osp.join(root, fname.split("\t")[0])
feats = reader.get_feats(file)
yield feats.data, fname, lbl
return iterate, num, root
def main():
parser = get_parser()
args = parser.parse_args()
spec = osp.basename(args.path)
try:
faiss_spec = parse_faiss_specs(spec.rstrip("/"))[0]
except:
print(spec)
raise
print("Faiss Spec:", faiss_spec, file=sys.stderr)
if faiss_spec.pca:
A = torch.from_numpy(np.load(osp.join(args.path, "pca_A.npy"))).cuda()
b = torch.from_numpy(np.load(osp.join(args.path, "pca_b.npy"))).cuda()
print("Loaded PCA", file=sys.stderr)
centroids = np.load(osp.join(args.path, "centroids.npy"))
print("Loaded centroids", centroids.shape, file=sys.stderr)
res = faiss.StandardGpuResources()
index_flat = (
faiss.IndexFlatL2(centroids.shape[1])
if not faiss_spec.sphere
else faiss.IndexFlatIP(centroids.shape[1])
)
faiss_index = faiss.index_cpu_to_gpu(res, 0, index_flat)
faiss_index.add(centroids)
generator, num, root = get_iterator(args)
iterator = generator()
had_labels = False
label_path = osp.join(args.path, f"{args.split}.{args.labels}")
with torch.no_grad():
with open(osp.join(args.path, f"{args.split}.src"), "w") as fp, open(
osp.join(args.path, f"{args.split}.tsv"), "w"
) as pp, open(label_path, "w") as lp:
print(root, file=pp)
for f, fname, lbl in tqdm.tqdm(iterator, total=num):
if faiss_spec.pca:
f = torch.mm(f, A) + b
if faiss_spec.norm:
f = F.normalize(f, p=2, dim=-1)
f = f.cpu().numpy()
_, z = faiss_index.search(f, 1)
print(" ".join(str(x.item()) for x in z), file=fp)
print(fname, file=pp)
if lbl is not None:
print(lbl, file=lp)
had_labels = True
if not had_labels:
os.remove(label_path)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gc
import os
import os.path as osp
import random
import numpy as np
import tqdm
import torch
from collections import namedtuple
import faiss
import fairseq
import soundfile as sf
def get_parser():
parser = argparse.ArgumentParser(
description="compute kmeans codebook from kaldi-computed feats"
)
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True)
parser.add_argument('--sample-pct', '-r', type=float, help='percentage of timesteps to sample', default=0)
parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14)
parser.add_argument('--faiss-specs', '-f', type=str,
help='faiss index specs; separated by space '
'format is: PCAx_NORM_CLUSx_SPHERICAL -> '
'PCAx if exists first apply PCA '
'NORM if exists, normalize the vector by L2 norm '
'CLUSx must exist, cluster to x clusters '
'SPEHRICAL if exists, apply spherical kmeans',
default='l2')
# fmt: on
return parser
faiss_spec = namedtuple("faiss_spec", ["pca", "norm", "n_clus", "sphere", "spec_str"])
def parse_faiss_specs(specs_str):
specs = []
for ss in specs_str.split():
comps = ss.split("_")
pca = 0
norm = False
n_clus = 0
sphere = False
for c in comps:
if c.startswith("PCA"):
pca = int(c[3:])
elif c == "NORM":
norm = True
elif c.startswith("CLUS"):
n_clus = int(c[4:])
elif c == "SPHERICAL":
sphere = True
assert n_clus > 0
specs.append(
faiss_spec(pca=pca, norm=norm, n_clus=n_clus, sphere=sphere, spec_str=ss)
)
return specs
class Wav2VecFeatureReader(object):
def __init__(self, cp_file, layer):
state = fairseq.checkpoint_utils.load_checkpoint_to_cpu(cp_file)
self.layer = layer
if "cfg" in state:
w2v_args = state["cfg"]
task = fairseq.tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
else:
w2v_args = state["args"]
task = fairseq.tasks.setup_task(w2v_args)
model = task.build_model(w2v_args)
model.load_state_dict(state["model"], strict=True)
model.eval()
model.cuda()
self.model = model
def read_audio(self, fname):
"""Load an audio file and return PCM along with the sample rate"""
wav, sr = sf.read(fname)
assert sr == 16e3
return wav
def get_feats(self, loc):
x = self.read_audio(loc)
with torch.no_grad():
source = torch.from_numpy(x).view(1, -1).float().cuda()
res = self.model(
source=source, mask=False, features_only=True, layer=self.layer
)
return res["layer_results"][self.layer][0].squeeze(1)
def get_iterator(args):
with open(args.data, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0]
if getattr(args, "sample_pct", 0) > 0:
files = random.sample(files, int(args.sample_pct * len(files)))
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname in files:
feats = reader.get_feats(fname)
yield feats.cpu().numpy()
return iterate, num
def main():
parser = get_parser()
args = parser.parse_args()
faiss_specs = parse_faiss_specs(args.faiss_specs)
print("Faiss Specs:", faiss_specs)
feat_path = osp.join(args.save_dir, "features")
if osp.exists(feat_path + ".npy"):
feats = np.load(feat_path + ".npy")
else:
generator, num = get_iterator(args)
iterator = generator()
feats = []
for f in tqdm.tqdm(iterator, total=num):
feats.append(f)
del iterator
del generator
feats = np.concatenate(feats)
print(feats.shape)
os.makedirs(args.save_dir, exist_ok=True)
# np.save(feat_path, feats)
gc.collect()
torch.cuda.empty_cache()
reload = False
for spec in faiss_specs:
print("Processing spec", spec)
if reload:
print("Reloading...")
del feats
gc.collect()
feats = np.load(feat_path + ".npy")
save_path = osp.join(args.save_dir, spec.spec_str)
os.makedirs(save_path, exist_ok=True)
d = feats.shape[-1]
x = feats
if spec.pca > 0:
print("Computing PCA")
pca = faiss.PCAMatrix(d, spec.pca)
pca.train(x)
d = spec.pca
b = faiss.vector_to_array(pca.b)
A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in)
np.save(osp.join(save_path, "pca_A"), A.T)
np.save(osp.join(save_path, "pca_b"), b)
print("Applying PCA")
x = pca.apply_py(x)
if spec.norm:
reload = spec.pca <= 0
print("Normalizing")
faiss.normalize_L2(x)
print("Computing kmeans")
kmeans = faiss.Kmeans(
d,
spec.n_clus,
niter=50,
verbose=True,
spherical=spec.sphere,
max_points_per_centroid=feats.shape[0],
gpu=True,
nredo=3,
)
kmeans.train(x)
np.save(osp.join(save_path, "centroids"), kmeans.centroids)
del kmeans
del x
gc.collect()
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("--tsv", required=True, type=str)
parser.add_argument("--no-skip", action="store_true")
parser.add_argument("--keep", action="store_true")
params = parser.parse_args()
def get_fname(line):
p = os.path.basename(line.split("\t")[0])
p = os.path.splitext(p)[0]
return p
# filenames to exclude
seen = set()
with open(params.tsv) as f:
if not params.no_skip:
root = next(f).rstrip()
for line in f:
seen.add(get_fname(line))
for i, line in enumerate(sys.stdin):
exists = get_fname(line) in seen
keep = (exists and params.keep) or (not exists and not params.keep)
if i == 0 or keep:
print(line, end="")
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/filter_tsv.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import tqdm
import torch
import random
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--cluster-dir', help='where the clusters are')
parser.add_argument('--pooling', type=str, default='mean', choices=['mean', 'sample'], help='how to pool')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
cluster_path = osp.join(args.cluster_dir, args.split + ".src")
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
sizes = []
offsets = []
offset = 0
with open(source_path + ".lengths", "r") as len_f:
for line in len_f:
length = int(line.rstrip())
sizes.append(length)
offsets.append(offset)
offset += length
clusters = []
with open(cluster_path, "r") as cf:
for line in cf:
line = line.rstrip()
items = line.split()
items = list(map(int, items))
clusters.append(items)
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
def merge(feats, clust):
feats = torch.from_numpy(feats.copy())
clust = torch.LongTensor(clust)
_, counts = clust.unique_consecutive(return_counts=True)
curr = 0
merged = []
for c in counts:
c = c.item()
start = curr
end = curr + c
curr += c
if args.pooling == "mean":
new_x = feats[start:end].mean(dim=0)
elif args.pooling == "sample":
new_x = feats[start + int(random.random() * c)]
else:
raise NotImplementedError()
merged.append(new_x)
return torch.stack(merged, dim=0).numpy()
with open(save_path + ".lengths", "w") as l_f:
for size, offset, clust in tqdm.tqdm(
zip(sizes, offsets, clusters), total=len(sizes)
):
end = size + offset
feats = features[offset:end]
feats = merge(feats, clust)
print(len(feats), file=l_f)
npaa.append(feats)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/merge_clusters.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
def main():
for line in sys.stdin:
print(line.replace(" ", "").replace("|", " ").strip())
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import sys
def get_parser():
parser = argparse.ArgumentParser(
description="converts words to phones adding optional silences around in between words"
)
parser.add_argument(
"--sil-prob",
"-s",
type=float,
default=0,
help="probability of inserting silence between each word",
)
parser.add_argument(
"--surround",
action="store_true",
help="if set, surrounds each example with silence",
)
parser.add_argument(
"--lexicon",
help="lexicon to convert to phones",
required=True,
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
sil_prob = args.sil_prob
surround = args.surround
sil = "<SIL>"
wrd_to_phn = {}
with open(args.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
assert len(items) > 1, line
assert items[0] not in wrd_to_phn, items
wrd_to_phn[items[0]] = items[1:]
for line in sys.stdin:
words = line.strip().split()
if not all(w in wrd_to_phn for w in words):
continue
phones = []
if surround:
phones.append(sil)
sample_sil_probs = None
if sil_prob > 0 and len(words) > 1:
sample_sil_probs = np.random.random(len(words) - 1)
for i, w in enumerate(words):
phones.extend(wrd_to_phn[w])
if (
sample_sil_probs is not None
and i < len(sample_sil_probs)
and sample_sil_probs[i] < sil_prob
):
phones.append(sil)
if surround:
phones.append(sil)
print(" ".join(phones))
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="mean pools representations by compressing uniform splits of the data"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to')
parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
with open(source_path + ".lengths", "r") as lf:
lengths = lf.readlines()
fsz = features.shape[-1]
start = 0
with torch.no_grad():
with open(save_path + ".lengths", "w") as lengths_out:
for length in tqdm.tqdm(lengths):
length = int(length)
end = start + length
feats = features[start:end]
start += length
x = torch.from_numpy(feats).cuda()
target_num = math.ceil(length * args.subsample_rate)
rem = length % target_num
if rem > 0:
if args.remove_extra:
to_rem = target_num - rem
target_num -= 1
x = x[:-to_rem]
else:
to_add = target_num - rem
x = F.pad(x, [0, 0, 0, to_add])
x[-to_add:] = x[-to_add - 1]
x = x.view(target_num, -1, fsz)
x = x.mean(dim=-2)
print(target_num, file=lengths_out)
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/mean_pool.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
def main():
for line in sys.stdin:
print(" ".join(list(line.strip().replace(" ", "|"))) + " |")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import regex
import sys
def main():
filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]")
for line in sys.stdin:
line = line.strip()
line = filter_r.sub(" ", line)
line = " ".join(line.split())
print(line)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/normalize_text.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Implement unsupervised metric for decoding hyperparameter selection:
$$ alpha * LM_PPL + ViterbitUER(%) * 100 $$
"""
import argparse
import logging
import sys
import editdistance
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--hypo", help="hypo transcription", required=True)
parser.add_argument(
"-r", "--reference", help="reference transcription", required=True
)
return parser
def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p):
d_cnt = 0
w_cnt = 0
w_cnt_h = 0
for uid in hyp_uid_to_tra:
ref = ref_uid_to_tra[uid].split()
if g2p is not None:
hyp = g2p(hyp_uid_to_tra[uid])
hyp = [p for p in hyp if p != "'" and p != " "]
hyp = [p[:-1] if p[-1].isnumeric() else p for p in hyp]
else:
hyp = hyp_uid_to_tra[uid].split()
d_cnt += editdistance.eval(ref, hyp)
w_cnt += len(ref)
w_cnt_h += len(hyp)
wer = float(d_cnt) / w_cnt
logger.debug(
(
f"wer = {wer * 100:.2f}%; num. of ref words = {w_cnt}; "
f"num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}"
)
)
return wer
def main():
args = get_parser().parse_args()
errs = 0
count = 0
with open(args.hypo, "r") as hf, open(args.reference, "r") as rf:
for h, r in zip(hf, rf):
h = h.rstrip().split()
r = r.rstrip().split()
errs += editdistance.eval(r, h)
count += len(r)
logger.info(f"UER: {errs / count * 100:.2f}%")
if __name__ == "__main__":
main()
def load_tra(tra_path):
with open(tra_path, "r") as f:
uid_to_tra = {}
for line in f:
uid, tra = line.split(None, 1)
uid_to_tra[uid] = tra
logger.debug(f"loaded {len(uid_to_tra)} utterances from {tra_path}")
return uid_to_tra
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/wer.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
for idx, line in enumerate(sys.stdin):
print(f"utt{idx:010d} {line}", end="")
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/copy_labels.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import faiss
def get_parser():
parser = argparse.ArgumentParser(
description="compute a pca matrix given an array of numpy features"
)
# fmt: off
parser.add_argument('data', help='numpy file containing features')
parser.add_argument('--output', help='where to save the pca matrix', required=True)
parser.add_argument('--dim', type=int, help='dim for pca reduction', required=True)
parser.add_argument('--eigen-power', type=float, default=0, help='eigen power, -0.5 for whitening')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
print("Reading features")
x = np.load(args.data, mmap_mode="r")
print("Computing PCA")
pca = faiss.PCAMatrix(x.shape[-1], args.dim, args.eigen_power)
pca.train(x)
b = faiss.vector_to_array(pca.b)
A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in)
os.makedirs(args.output, exist_ok=True)
prefix = str(args.dim)
if args.eigen_power != 0:
prefix += f"_{args.eigen_power}"
np.save(osp.join(args.output, f"{prefix}_pca_A"), A.T)
np.save(osp.join(args.output, f"{prefix}_pca_b"), b)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/pca.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fasttext as ft
import os
import regex
import sys
def get_parser():
parser = argparse.ArgumentParser(
description="reads text from stdin and outputs normalized, lid-filtered version to stdout"
)
parser.add_argument(
"--fasttext-model",
help="path to fasttext model",
default="lid.187.bin",
)
parser.add_argument("--lang", help="language id", required=True)
parser.add_argument(
"--lid-threshold",
type=float,
help="threshold for this lang id probability",
default=0.4,
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]")
lg = args.lang.lower()
lg_label = f"__label__{lg}"
thresh = args.lid_threshold
if os.path.exists(args.fasttext_model):
model = ft.load_model(args.fasttext_model)
else:
print(
f"fasttext language id model {args.fasttext_model} not found. Proceeding without language filtering. "
f"To enable language filtering, please download the latest language id model "
f"from https://fasttext.cc/docs/en/language-identification.html",
file=sys.stderr,
)
model = None
for line in sys.stdin:
line = line.strip()
line = filter_r.sub(" ", line)
line = " ".join(line.split())
if model is not None:
lid, prob = model.predict(line, k=100)
try:
target_idx = lid.index(lg_label)
except ValueError:
continue
if target_idx == 0 or prob[target_idx] >= thresh:
print(line)
else:
print(line)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.