python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
import os
import numpy as np
import pickle
from . import retri
from ..utils import get_local_rank
class VectorPool(object):
"""
Base class of retrieval space.
"""
def __init__(self, config):
from transformers import AutoConfig
self.hidden_size = AutoConfig.from_pretrained(
config.dataset.bert_name).hidden_size
self.retriever_cls = getattr(retri, config.retriever_cls)
def __call__(self, sample, **kwargs):
raise NotImplementedError
def build_retriver(
self,
retriever_cls=None,
hidden_size=None,
centroids=512,
db_type="flatl2",
examples_per_cent_to_train=48
):
"""merge results from multiple gpus and return a retriver.."""
self.retriver = retriever_cls(
hidden_size, centroids, db_type, examples_per_cent_to_train)
return self.retriver
def __repr__(self):
if hasattr(self, "retriver"):
retriver_name = str(len(self.retriver))
else:
retriver_name = "no retriver field yet"
return self.__class__.__name__ \
+ "(" + retriver_name + ")"
class VideoVectorPool(VectorPool):
"""
average clips of a video as video representation.
"""
def __init__(self, config):
super().__init__(config)
self.build_retriver(self.retriever_cls, self.hidden_size)
def __call__(self, sample, subsampling, **kwargs):
hidden_states = (
sample["pooled_video"] + sample["pooled_text"]) / 2.
hidden_states = hidden_states.view(
-1, subsampling,
hidden_states.size(-1))
hidden_states = torch.mean(hidden_states, dim=1)
hidden_states = hidden_states.cpu().detach().numpy()
video_ids = []
for offset_idx, video_id in enumerate(sample["video_id"]):
if isinstance(video_id, tuple) and len(video_id) == 3:
# a sharded video_id.
video_id = video_id[0]
video_ids.append(video_id)
assert len(video_ids) == len(hidden_states)
self.retriver.add(
hidden_states.astype("float32"),
video_ids
)
class DistributedVectorPool(VectorPool):
"""
support sync of multiple gpus/nodes.
"""
def __init__(self, config):
super().__init__(config)
self.out_dir = os.path.join(
config.fairseq.checkpoint.save_dir,
"retri")
os.makedirs(self.out_dir, exist_ok=True)
self.hidden_states = []
self.video_ids = []
def build_retriver(
self,
retriever_cls=None,
hidden_size=None,
centroids=4096,
db_type="flatl2",
examples_per_cent_to_train=48
):
if retriever_cls is None:
retriever_cls = self.retriever_cls
if hidden_size is None:
hidden_size = self.hidden_size
"""merge results from multiple gpus and return a retriver.."""
if torch.distributed.is_initialized():
self.save()
# sync saving.
torch.distributed.barrier()
world_size = torch.distributed.get_world_size()
else:
world_size = 1
self.retriver = retriever_cls(
hidden_size, centroids, db_type, examples_per_cent_to_train)
# each gpu process has its own retriever.
for local_rank in range(world_size):
if get_local_rank() == 0:
print("load local_rank", local_rank)
hidden_states, video_ids = self.load(local_rank)
hidden_states = hidden_states.astype("float32")
self.retriver.add(hidden_states, video_ids)
return self.retriver
def load(self, local_rank):
hidden_states = np.load(
os.path.join(
self.out_dir,
"hidden_state" + str(local_rank) + ".npy"
)
)
with open(
os.path.join(
self.out_dir, "video_id" + str(local_rank) + ".pkl"),
"rb") as fr:
video_ids = pickle.load(fr)
return hidden_states, video_ids
def save(self):
hidden_states = np.vstack(self.hidden_states)
assert len(hidden_states) == len(self.video_ids), "{}, {}".format(
len(hidden_states),
len(self.video_ids)
)
local_rank = torch.distributed.get_rank() \
if torch.distributed.is_initialized() else 0
np.save(
os.path.join(
self.out_dir,
"hidden_state" + str(local_rank) + ".npy"),
hidden_states)
with open(
os.path.join(
self.out_dir,
"video_id" + str(local_rank) + ".pkl"),
"wb") as fw:
pickle.dump(
self.video_ids,
fw,
protocol=pickle.HIGHEST_PROTOCOL
)
class DistributedVideoVectorPool(DistributedVectorPool):
"""
average clips of a video as video representation.
"""
def __call__(self, sample, subsampling, **kwargs):
hidden_states = (
sample["pooled_video"] + sample["pooled_text"]) / 2.
hidden_states = hidden_states.view(
-1, subsampling,
hidden_states.size(-1))
hidden_states = torch.mean(hidden_states, dim=1)
hidden_states = hidden_states.cpu().detach().numpy()
video_ids = []
for offset_idx, video_id in enumerate(sample["video_id"]):
if isinstance(video_id, tuple) and len(video_id) == 3:
# a sharded video_id.
video_id = video_id[0]
video_ids.append(video_id)
assert len(video_ids) == len(hidden_states)
self.hidden_states.append(hidden_states)
self.video_ids.extend(video_ids)
# ------------ the following are deprecated --------------
class TextClipVectorPool(VectorPool):
def __init__(self, config):
from transformers import AutoConfig
hidden_size = AutoConfig.from_pretrained(
config.dataset.bert_name).hidden_size
retriever_cls = getattr(retri, config.retriever_cls)
self.build_retriver(retriever_cls, hidden_size)
def __call__(self, sample, **kwargs):
clip_meta = sample["clip_meta"].cpu()
assert torch.all(torch.le(clip_meta[:, 4], clip_meta[:, 5]))
text_meta = [tuple(item.tolist()) for item in clip_meta[:, 3:]]
if hasattr(self, "retriver"):
# build_retriver is called.
self.retriver.add(
sample["pooled_text"].cpu().numpy().astype("float32"),
text_meta
)
else:
raise NotImplementedError
class MMClipVectorPool(VectorPool):
"""
Multimodal Clip-level vector pool.
"""
def __init__(self, out_dir):
"""use hidden_states to store `(video, text)`."""
"""use video_ids to store `(video_id, start, end)`."""
super().__init__(out_dir)
def __call__(self, sample, **kwargs):
pooled_video = sample["pooled_video"].cpu().unsqueeze(1).numpy()
pooled_text = sample["pooled_text"].cpu().unsqueeze(1).numpy()
self.hidden_states.append(
np.concatenate([pooled_video, pooled_text], axis=1)
)
video_starts = sample["video_start"].cpu()
video_ends = sample["video_end"].cpu()
assert torch.all(torch.le(video_starts, video_ends))
text_starts = sample["text_start"].cpu()
text_ends = sample["text_end"].cpu()
assert torch.all(torch.le(text_starts, text_ends))
subsample_size = sample["pooled_video"].size(0) // len(sample["video_id"])
video_ids = [video_id for video_id in sample["video_id"]
for _ in range(subsample_size)
]
for video_id, video_start, video_end, text_start, text_end in zip(
video_ids, video_starts, video_ends, text_starts, text_ends):
self.video_ids.append((
video_id,
(int(video_start), int(video_end)),
(int(text_start), int(text_end))
))
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/mmpt/modules/vectorpool.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch import nn
try:
from transformers.modeling_bert import (
BertEmbeddings,
ACT2FN,
)
except ImportError:
pass
class VideoTokenMLP(nn.Module):
def __init__(self, config):
super().__init__()
input_dim = config.input_dim if hasattr(config, "input_dim") else 512
self.linear1 = nn.Linear(input_dim, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.linear2 = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.linear1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.linear2(hidden_states)
return hidden_states
class MMBertEmbeddings(BertEmbeddings):
def __init__(self, config):
super().__init__(config)
self.max_video_len = config.max_video_len
if hasattr(config, "use_seg_emb") and config.use_seg_emb:
"""the original VLM paper uses seg_embeddings for temporal space.
although not used it changed the randomness of initialization.
we keep it for reproducibility.
"""
self.seg_embeddings = nn.Embedding(256, config.hidden_size)
def forward(
self,
input_ids,
input_video_embeds,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
input_tensor = input_ids if input_ids is not None else inputs_embeds
if input_video_embeds is not None:
input_shape = (
input_tensor.size(0),
input_tensor.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (input_tensor.size(0), input_tensor.size(1))
if position_ids is None:
"""
Auto skip position embeddings for text only case.
use cases:
(1) action localization and segmentation:
feed in len-1 dummy video token needs text part to
skip input_video_embeds.size(1) for the right
position_ids for video [SEP] and rest text tokens.
(2) MMFusionShare for two forward passings:
in `forward_text`: input_video_embeds is None.
need to skip video [SEP] token.
# video_len + 1: [CLS] + video_embed
# self.max_video_len + 1: [SEP] for video.
# self.max_video_len + 2: [SEP] for video.
# self.max_video_len + input_ids.size(1): rest for text.
"""
if input_video_embeds is not None:
video_len = input_video_embeds.size(1)
starting_offset = self.max_video_len + 1 # video [SEP]
ending_offset = self.max_video_len + input_ids.size(1)
else:
video_len = 0
starting_offset = self.max_video_len + 2 # first text token.
ending_offset = self.max_video_len + input_ids.size(1) + 1
position_ids = torch.cat([
self.position_ids[:, :video_len + 1],
self.position_ids[:, starting_offset:ending_offset]
], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
"""
the format of input_ids is [CLS] [SEP] caption [SEP] padding.
the goal is to build [CLS] video tokens [SEP] caption [SEP] .
"""
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if input_video_embeds is not None:
inputs_mm_embeds = torch.cat([
inputs_embeds[:, :1], input_video_embeds, inputs_embeds[:, 1:]
], dim=1)
else:
# text only for `MMFusionShare`.
inputs_mm_embeds = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_mm_embeds + position_embeddings
embeddings += token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class AlignHead(nn.Module):
"""this will load pre-trained weights for NSP, which is desirable."""
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, dropout_pooled_output):
logits = self.seq_relationship(dropout_pooled_output)
return logits
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/mmpt/modules/mm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import pickle
from mmpt.utils import ShardedTensor
class Shard(object):
def __init__(
self,
vfeat_dir,
tfeat_dir,
target_dir,
file_paths,
shard_size=4096
):
self.vfeat_dir = vfeat_dir
self.tfeat_dir = tfeat_dir
self.target_dir = target_dir
self.video_ids = {}
for split, file_path in zip(["train", "val"], file_paths):
with open(file_path) as fr:
self.video_ids[split] = [
line.strip() for line in fr.readlines()]
self.shard_size = shard_size
def __call__(self, split="train"):
for split in ["train", "val"]:
meta = {}
for shard_idx, shard_offset in enumerate(
range(0, len(self.video_ids[split]), self.shard_size)
):
print(shard_idx)
meta_shard = []
video_shard = []
for video_id in self.video_ids[split][shard_offset:shard_offset+self.shard_size]:
meta_shard.append(video_id)
npy_file = os.path.join(self.vfeat_dir, video_id + ".npy")
video_shard.append(np.load(npy_file))
meta[shard_idx] = meta_shard
video_shard = ShardedTensor.from_list(video_shard)
target_path = os.path.join(
self.target_dir, split + "_" + str(shard_idx))
video_shard.save(target_path)
target_path = os.path.join(self.target_dir, split + "_meta")
with open(target_path + ".pkl", "wb") as fw:
pickle.dump(meta, fw, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
shard = Shard(
"data/feat/feat_how2_s3d",
"data/how2/raw_caption_dedup.bert-base-uncased",
"data/feat/feat_how2_s3d_shard_small",
["data/how2/how2_s3d_train.lst", "data/how2/how2_s3d_val.lst"]
)
shard()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/scripts/video_feature_extractor/shard_feature.py |
# Copyright Howto100M authors.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch as th
import pandas as pd
import os
import numpy as np
import ffmpeg
import random
from torch.utils.data import Dataset
class VideoLoader(Dataset):
"""modified from how2's video_feature_extractor."""
def __init__(
self,
csv=None,
video_dict=None,
framerate=1,
size=112,
centercrop=False,
hflip=False,
**kwargs
):
if csv is None and video_dict is None:
raise ValueError("csv and video_dict cannot be both None.")
if csv is not None:
self.csv = pd.read_csv(csv)
if video_dict is not None:
self.csv = pd.DataFrame.from_dict(video_dict)
self.centercrop = centercrop
self.size = size
self.framerate = framerate
self.hflip = hflip
def __len__(self):
return len(self.csv)
def _get_video_dim(self, video_path):
probe = ffmpeg.probe(video_path)
video_stream = next((stream for stream in probe['streams']
if stream['codec_type'] == 'video'), None)
width = int(video_stream['width'])
height = int(video_stream['height'])
return height, width
def _get_video_info(self, video_path):
probe = ffmpeg.probe(video_path)
video_stream = next((stream for stream in probe['streams']
if stream['codec_type'] == 'video'), None)
return video_stream
def _get_output_dim(self, h, w):
if isinstance(self.size, tuple) and len(self.size) == 2:
return self.size
elif h >= w:
return int(h * self.size / w), self.size
else:
return self.size, int(w * self.size / h)
def __getitem__(self, idx):
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
return self._decode(output_file, video_path)
def _decode(self, output_file, video_path):
if not(os.path.isfile(output_file)) and os.path.isfile(video_path):
try:
h, w = self._get_video_dim(video_path)
except Exception:
print('ffprobe failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path,
'output': output_file}
try:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
height, width = self._get_output_dim(h, w)
cmd = (
ffmpeg
.input(video_path)
.filter('fps', fps=self.framerate)
.filter('scale', width, height)
)
if self.hflip:
cmd = cmd.filter('hflip')
if self.centercrop:
x = int((width - self.size) / 2.0)
y = int((height - self.size) / 2.0)
cmd = cmd.crop(x, y, self.size, self.size)
video = self._run(cmd, output_file)
except Exception:
video = th.zeros(1)
else:
video = th.zeros(1)
return {'video': video, 'input': video_path, 'output': output_file}
def _run(self, cmd, output_file):
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True, quiet=True)
)
if self.centercrop and isinstance(self.size, int):
height, width = self.size, self.size
video = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3])
video = th.from_numpy(video.astype('float32'))
return video.permute(0, 3, 1, 2)
class VideoVerifier(VideoLoader):
def __getitem__(self, idx):
video_path = self.csv['video_path'].values[idx]
try:
return self._get_video_info(video_path)
except Exception:
# print('ffprobe failed at: {}'.format(video_path))
return None
class VideoCompressor(VideoLoader):
def __init__(
self,
csv=None,
video_dict=None,
framerate=1,
size=112,
centercrop=False,
hflip=False,
crf=32,
**kwargs
):
super().__init__(
csv,
video_dict,
framerate,
size,
centercrop,
hflip
)
self.crf = crf
def _run(self, cmd, output_file):
out, _ = (
cmd.output(filename=output_file, crf=self.crf)
.run(quiet=True)
)
video = None
return video
class VideoDownloader(VideoCompressor):
"""download"""
def __getitem__(self, idx):
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
if not(os.path.isfile(output_file)):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
cmd = "wget -O" + output_file + " " + video_path
# import subprocess
# subprocess.check_output(
# cmd,
# stderr=subprocess.STDOUT, shell=True)
os.system(cmd)
return {'video': None, 'input': video_path, 'output': output_file}
class AvKeyframeVideoCompressor(VideoLoader):
"""extract keyframes from a video and save it as jpg.
TODO: consider to merge with `CodecProcessor`.
"""
def __init__(
self,
csv=None,
video_dict=None,
framerate=1,
size=112,
centercrop=False,
max_num_frames=5,
**kwargs
):
super().__init__(csv, video_dict, framerate, size, centercrop)
self.max_num_frames = max_num_frames
def _get_video_dim(self, video_fn):
"""decord cannot probe the size of a video, we use pyav instead."""
import av
with av.open(video_fn) as container:
height = container.streams.video[0].codec_context.height
width = container.streams.video[0].codec_context.width
return height, width
def _get_output_dim(self, height, width):
"""
keep the shorter side be `self.size`, strech the other.
"""
if height >= width:
return int(height * self.size / width), self.size
else:
return self.size, int(width * self.size / height)
def __getitem__(self, idx):
import av
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
if not(os.path.isdir(output_file)) and os.path.isfile(video_path):
try:
h, w = self._get_video_dim(video_path)
except Exception:
print('probe failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path,
'output': output_file}
try:
height, width = self._get_output_dim(h, w)
# new for av.
with av.open(video_path) as container:
container.streams.video[0].thread_type = "AUTO"
container.streams.video[0].codec_context.height = height
container.streams.video[0].codec_context.width = width
if self.framerate == 0: # keyframe.
container.streams.video[0].codec_context.skip_frame = 'NONKEY'
frames = []
for frame in container.decode(video=0):
frames.append(frame)
frames = random.sample(frames, self.max_num_frames)
os.makedirs(output_file, exist_ok=True)
for frame in frames:
frame.to_image().save(
os.path.join(
output_file,
"%04d.jpg" % frame.index))
except Exception:
print('extract failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path,
'output': output_file}
video = th.zeros(1)
return {'video': video, 'input': video_path, 'output': output_file}
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/scripts/video_feature_extractor/videoreader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import urllib.parse
import json
import pandas as pd
from tqdm import tqdm
# TODO: extending to other datasets.
supported_formats = {}
class PathBuilder(object):
@classmethod
def build(cls, video_dirs, feature_dir, ext, shards=0, split=None):
meta_fn = os.path.join(feature_dir, "meta_plan.json")
os.makedirs(feature_dir, exist_ok=True)
if os.path.isfile(meta_fn):
with open(meta_fn) as fr:
meta = json.load(fr)
return meta
print("searching videos...")
video_id_to_path = {}
for video_dir in video_dirs.split(","):
# TODO: add supports of recursive listdir.
if video_dir in supported_formats:
supported_formats[video_dir].load(video_dir, video_id_to_path)
else:
for idx, fn in enumerate(tqdm(os.listdir(video_dir))):
video_fn = os.path.join(video_dir, fn)
if os.path.isfile(video_fn):
video_id = os.path.splitext(fn)[0]
video_id_to_path[video_id] = video_fn
elif os.path.isdir(video_fn):
# shards of folders.
shard_dir = video_fn
for idx, fn in enumerate(os.listdir(shard_dir)):
video_fn = os.path.join(shard_dir, fn)
if os.path.isfile(video_fn):
video_id = os.path.splitext(fn)[0]
video_id_to_path[video_id] = video_fn
video_path, feature_path = [], []
valid_ext = set()
for idx, video_id in enumerate(video_id_to_path):
video_path.append(video_id_to_path[video_id])
if ext is None:
# use original file ext for format compatibility.
video_id_to_path[video_id]
path = urllib.parse.urlparse(video_id_to_path[video_id]).path
ext = os.path.splitext(path)[1]
if ext not in valid_ext:
valid_ext.add(ext)
print("adding", ext)
if shards:
shard_id = str(idx % shards)
feature_fn = os.path.join(
feature_dir, shard_id, video_id + ext)
else:
feature_fn = os.path.join(
feature_dir, video_id + ext)
feature_path.append(feature_fn)
print("targeting", len(feature_path), "videos")
meta = {
"video_path": video_path, "feature_path": feature_path}
with open(meta_fn, "w") as fw:
json.dump(meta, fw)
if split is not None:
splits = split.split("/")
assert len(splits) == 2
cur, total = int(splits[0]), int(splits[1])
assert cur < total
import math
chunk = math.ceil(len(meta["video_path"]) / total)
start = cur * chunk
end = (cur + 1) * chunk
meta = {
"video_path": meta["video_path"][start:end],
"feature_path": meta["feature_path"][start:end]
}
return meta
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/scripts/video_feature_extractor/pathbuilder.py |
# Copyright (c) Howto100M authors and Facebook, Inc. All Rights Reserved
import torch as th
from torch import nn
class GlobalAvgPool(nn.Module):
def __init__(self):
super(GlobalAvgPool, self).__init__()
def forward(self, x):
return th.mean(x, dim=[-2, -1])
def get_model(args):
assert args.type in ['2d', '3d', 'vmz', 's3d', 'vae']
if args.type == '2d':
print('Loading 2D-ResNet-152 ...')
import torchvision.models as models
model = models.resnet152(pretrained=True)
model = nn.Sequential(*list(model.children())[:-2], GlobalAvgPool())
model = model.cuda()
elif args.type == 'vmz':
print('Loading VMZ ...')
from vmz34 import r2plus1d_34
model = r2plus1d_34(pretrained_path=args.vmz_model_path, pretrained_num_classes=487)
model = model.cuda()
elif args.type == 's3d':
# we use one copy of s3d instead of dup another one for feature extraction.
from mmpt.processors.models.s3dg import S3D
model = S3D('pretrained_models/s3d_dict.npy', 512)
model.load_state_dict(th.load('pretrained_models/s3d_howto100m.pth'))
model = model.cuda()
elif args.type == '3d':
print('Loading 3D-ResneXt-101 ...')
from videocnn.models import resnext
model = resnext.resnet101(
num_classes=400,
shortcut_type='B',
cardinality=32,
sample_size=112,
sample_duration=16,
last_fc=False)
model = model.cuda()
model_data = th.load(args.resnext101_model_path)
model.load_state_dict(model_data)
elif args.type == 'vae':
from openaivae import OpenAIParallelDiscreteVAE
model = OpenAIParallelDiscreteVAE()
model = model.cuda()
else:
raise ValueError("model not supported yet.")
model.eval()
print('loaded')
return model
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/scripts/video_feature_extractor/model.py |
# Copyright Howto100m authors.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch as th
class Normalize(object):
def __init__(self, mean, std):
self.mean = th.FloatTensor(mean).view(1, 3, 1, 1)
self.std = th.FloatTensor(std).view(1, 3, 1, 1)
def __call__(self, tensor):
tensor = (tensor - self.mean) / (self.std + 1e-8)
return tensor
class Preprocessing(object):
def __init__(self, type):
self.type = type
if type == '2d':
self.norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
elif type == '3d':
self.norm = Normalize(mean=[110.6, 103.2, 96.3], std=[1.0, 1.0, 1.0])
elif type == 'vmz':
self.norm = Normalize(mean=[110.201, 100.64, 95.997], std=[58.1489, 56.4701, 55.3324])
def _zero_pad(self, tensor, size):
n = size - len(tensor) % size
if n == size:
return tensor
else:
z = th.zeros(n, tensor.shape[1], tensor.shape[2], tensor.shape[3])
return th.cat((tensor, z), 0)
def __call__(self, tensor):
if self.type == '2d':
tensor = tensor / 255.0
tensor = self.norm(tensor)
elif self.type == 'vmz':
#tensor = self._zero_pad(tensor, 8)
tensor = self._zero_pad(tensor, 10)
tensor = self.norm(tensor)
#tensor = tensor.view(-1, 8, 3, 112, 112)
tensor = tensor.view(-1, 10, 3, 112, 112)
tensor = tensor.transpose(1, 2)
elif self.type == '3d':
tensor = self._zero_pad(tensor, 16)
tensor = self.norm(tensor)
tensor = tensor.view(-1, 16, 3, 112, 112)
tensor = tensor.transpose(1, 2)
elif self.type == 's3d':
tensor = tensor / 255.0
tensor = self._zero_pad(tensor, 30)
tensor = tensor.view(-1, 30, 3, 224, 224) # N x 30 x 3 x H x W
tensor = tensor.transpose(1, 2) # N x 3 x 30 x H x W
# for vae do nothing
return tensor
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/scripts/video_feature_extractor/preprocessing.py |
# Copyright Howto100M authors.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch as th
import torch.nn.functional as F
import math
import numpy as np
import argparse
from torch.utils.data import DataLoader
from model import get_model
from preprocessing import Preprocessing
from random_sequence_shuffler import RandomSequenceSampler
from tqdm import tqdm
from pathbuilder import PathBuilder
from videoreader import VideoLoader
parser = argparse.ArgumentParser(description='Easy video feature extractor')
parser.add_argument('--vdir', type=str)
parser.add_argument('--fdir', type=str)
parser.add_argument('--hflip', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=64,
help='batch size')
parser.add_argument('--type', type=str, default='2d',
help='CNN type')
parser.add_argument('--half_precision', type=int, default=0,
help='output half precision float')
parser.add_argument('--num_decoding_thread', type=int, default=4,
help='Num parallel thread for video decoding')
parser.add_argument('--l2_normalize', type=int, default=1,
help='l2 normalize feature')
parser.add_argument('--resnext101_model_path', type=str, default='model/resnext101.pth',
help='Resnext model path')
parser.add_argument('--vmz_model_path', type=str, default='model/r2plus1d_34_clip8_ig65m_from_scratch-9bae36ae.pth',
help='vmz model path')
args = parser.parse_args()
# TODO: refactor all args into config. (current code is from different people.)
CONFIGS = {
"2d": {
"fps": 1,
"size": 224,
"centercrop": False,
"shards": 0,
},
"3d": {
"fps": 24,
"size": 112,
"centercrop": True,
"shards": 0,
},
"s3d": {
"fps": 30,
"size": 224,
"centercrop": True,
"shards": 0,
},
"vmz": {
"fps": 24,
"size": 112,
"centercrop": True,
"shards": 0,
},
"vae": {
"fps": 2,
"size": 256,
"centercrop": True,
"shards": 100,
}
}
config = CONFIGS[args.type]
video_dirs = args.vdir
feature_dir = args.fdir
video_dict = PathBuilder.build(video_dirs, feature_dir, ".npy", config["shards"])
dataset = VideoLoader(
video_dict=video_dict,
framerate=config["fps"],
size=config["size"],
centercrop=config["centercrop"],
hflip=args.hflip
)
n_dataset = len(dataset)
sampler = RandomSequenceSampler(n_dataset, 10)
loader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=args.num_decoding_thread,
sampler=sampler if n_dataset > 10 else None,
)
preprocess = Preprocessing(args.type)
model = get_model(args)
with th.no_grad():
for k, data in tqdm(enumerate(loader), total=loader.__len__(), ascii=True):
input_file = data['input'][0]
output_file = data['output'][0]
if len(data['video'].shape) > 3:
video = data['video'].squeeze()
if len(video.shape) == 4:
video = preprocess(video)
n_chunk = len(video)
if args.type == 'vmz':
n_chunk = math.ceil(n_chunk/float(3))
features = th.cuda.FloatTensor(n_chunk, 512).fill_(0)
elif args.type == 's3d':
features = th.cuda.FloatTensor(n_chunk, 512).fill_(0)
elif args.type == "vae":
features = th.cuda.LongTensor(n_chunk, 1024).fill_(0)
else:
features = th.cuda.FloatTensor(n_chunk, 2048).fill_(0)
n_iter = int(math.ceil(n_chunk / float(args.batch_size)))
for i in range(n_iter):
factor = 1
if args.type == 'vmz':
factor = 3
min_ind = factor * i * args.batch_size
max_ind = factor * (i + 1) * args.batch_size
video_batch = video[min_ind:max_ind:factor].cuda()
if args.type == '2d':
batch_features = model(video_batch) # (51, 487), (51, 512)
elif args.type == 's3d':
batch_features = model(video_batch)
batch_features = batch_features['video_embedding']
elif args.type == "vae":
# image_code.
batch_features = model(video_batch)
else:
batch_pred, batch_features = model(video_batch) # (51, 487), (51, 512)
if args.l2_normalize:
batch_features = F.normalize(batch_features, dim=1)
features[i*args.batch_size:(i+1)*args.batch_size] = batch_features
features = features.cpu().numpy()
if args.half_precision:
if args.type == "vae":
features = features.astype(np.int16)
else:
features = features.astype('float16')
else:
if args.type == "vae":
features = features.astype(np.int32)
else:
features = features.astype('float32')
np.save(output_file, features)
else:
print('Video {} error.'.format(input_file))
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/scripts/video_feature_extractor/extract.py |
# Copyright (c) Facebook, Inc. All Rights Reserved
import numpy as np
from torch.utils.data.sampler import Sampler
class RandomSequenceSampler(Sampler):
def __init__(self, n_sample, seq_len):
self.n_sample = n_sample
self.seq_len = seq_len
def _pad_ind(self, ind):
zeros = np.zeros(self.seq_len - self.n_sample % self.seq_len)
ind = np.concatenate((ind, zeros))
return ind
def __iter__(self):
idx = np.arange(self.n_sample)
if self.n_sample % self.seq_len != 0:
idx = self._pad_ind(idx)
idx = np.reshape(idx, (-1, self.seq_len))
np.random.shuffle(idx)
idx = np.reshape(idx, (-1))
return iter(idx.astype(int))
def __len__(self):
return self.n_sample + (self.seq_len - self.n_sample % self.seq_len)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/scripts/video_feature_extractor/random_sequence_shuffler.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import os
import argparse
import numpy as np
from torch.utils.data import Dataset, DataLoader
from mmpt.processors import PKLJSONStrTextProcessor
from mmpt.utils import ShardedTensor, recursive_config
class TokenizerDataset(Dataset):
def __init__(self, config):
self.text_processor = PKLJSONStrTextProcessor(config)
self.video_ids = list(self.text_processor.data.keys())
def __getitem__(self, idx):
video_id = self.video_ids[idx]
return video_id, self.text_processor(video_id)
def __len__(self):
return len(self.video_ids)
def numpify(shard_idx, video_ids, captions, target_dir, split, prefix, max_cap_len=32):
startends = []
caps_ids = []
for video_id in video_ids:
caption = captions[video_id]
startend = []
cap_ids = []
for start, end, cap in zip(
caption["start"], caption["end"], caption["cap"]):
startend.append(np.array([start, end]).astype("float32"))
cap_id = np.full((max_cap_len,), -1, dtype=np.int32)
cap = cap[:max_cap_len]
cap_id[:len(cap)] = cap
cap_ids.append(cap_id)
startends.append(np.stack(startend))
caps_ids.append(np.stack(cap_ids))
startends = ShardedTensor.from_list(startends)
target_path = os.path.join(
target_dir,
prefix + split + "_" + str(shard_idx)
)
print("save to", target_path)
startends.save(target_path + ".startends")
caps_ids = ShardedTensor.from_list(caps_ids)
caps_ids.save(target_path + ".caps_ids")
def sharding(config, out_file):
with open(out_file, "rb") as fr:
captions = pickle.load(fr)
target_dir = config.target_dir
prefix = os.path.basename(
os.path.splitext(config.caption_pkl_path)[0]
) + "." + config.bert_name + "."
for split in ["train", "val"]:
target_path = os.path.join(target_dir, split + "_meta")
with open(target_path + ".pkl", "rb") as fr:
meta = pickle.load(fr)
print("load meta", target_path, len(meta))
for shard_id in meta:
numpify(
shard_id, meta[shard_id], captions,
target_dir, split, prefix
)
def tokenize(config, out_file):
def collator(samples):
return samples
dataset = TokenizerDataset(config)
data = {}
for idx, batch in enumerate(
DataLoader(dataset, collate_fn=collator, num_workers=16)):
for video_id, caption in batch:
data[video_id] = caption
if idx % 5000 == 0:
print(idx)
with open(out_file, "wb") as fw:
pickle.dump(data, fw, pickle.HIGHEST_PROTOCOL)
def main(args):
config = recursive_config(args.config).dataset
out_file = os.path.splitext(config.caption_pkl_path)[0] \
+ "." + config.bert_name + ".pkl"
if not os.path.isfile(out_file):
tokenize(config, out_file)
sharding(config, out_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="pretokenize (raw_)caption.json into pkl.")
parser.add_argument('config', type=str)
args = parser.parse_args()
main(args)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/scripts/text_token_extractor/pretokenization.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import argparse
import pprint
import omegaconf
from omegaconf import OmegaConf
from torch.utils.data import DataLoader
from mmpt.utils import load_config, set_seed
from mmpt.evaluators import Evaluator
from mmpt.evaluators import predictor as predictor_path
from mmpt.tasks import Task
from mmpt import processors
from mmpt.datasets import MMDataset
def get_dataloader(config):
meta_processor_cls = getattr(processors, config.dataset.meta_processor)
video_processor_cls = getattr(processors, config.dataset.video_processor)
text_processor_cls = getattr(processors, config.dataset.text_processor)
aligner_cls = getattr(processors, config.dataset.aligner)
meta_processor = meta_processor_cls(config.dataset)
video_processor = video_processor_cls(config.dataset)
text_processor = text_processor_cls(config.dataset)
aligner = aligner_cls(config.dataset)
test_data = MMDataset(
meta_processor,
video_processor,
text_processor,
aligner,
)
print("test_len", len(test_data))
output = test_data[0]
test_data.print_example(output)
test_dataloader = DataLoader(
test_data,
batch_size=config.fairseq.dataset.batch_size,
shuffle=False,
num_workers=6,
collate_fn=test_data.collater,
)
return test_dataloader
def main(args):
config = load_config(args)
if isinstance(config, omegaconf.dictconfig.DictConfig):
print(OmegaConf.to_yaml(config))
else:
pp = pprint.PrettyPrinter(indent=4)
pp.print(config)
mmtask = Task.config_task(config)
mmtask.build_model()
test_dataloader = get_dataloader(config)
checkpoint_search_path = os.path.dirname(config.eval.save_path)
results = []
prefix = os.path.basename(args.taskconfig)
if prefix.startswith("test"):
# loop all checkpoint for datasets without validation set.
if "best" not in config.fairseq.common_eval.path:
print("eval each epoch.")
for checkpoint in glob.glob(checkpoint_search_path + "/checkpoint*"):
model = mmtask.load_checkpoint(checkpoint)
ckpt = os.path.basename(checkpoint)
evaluator = Evaluator(config)
output = evaluator.evaluate(
model, test_dataloader, ckpt + "_merged")
results.append((checkpoint, output))
# use the one specified by the config lastly.
model = mmtask.load_checkpoint(config.fairseq.common_eval.path)
evaluator = Evaluator(config)
output = evaluator.evaluate(model, test_dataloader)
results.append((config.fairseq.common_eval.path, output))
best_result = None
best_metric = 0.
for checkpoint, result in results:
print(checkpoint)
evaluator.metric.print_computed_metrics(result)
best_score = evaluator.metric.best_metric(result)
if best_score > best_metric:
best_result = (checkpoint, result)
best_metric = best_score
print("best results:")
print(best_result[0])
evaluator.metric.print_computed_metrics(best_result[1])
elif prefix.startswith("vis"):
model = mmtask.load_checkpoint(config.fairseq.common_eval.path)
predictor_cls = getattr(predictor_path, config.predictor)
predictor = predictor_cls(config)
predictor.predict_loop(model, test_dataloader, mmtask, None)
else:
raise ValueError("unknown prefix of the config file", args.taskconfig)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("taskconfig", type=str)
args = parser.parse_args()
main(args)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/mmpt_cli/predict.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from mmpt.utils import recursive_config
class BaseJob(object):
def __init__(self, yaml_file, dryrun=False):
self.yaml_file = yaml_file
self.config = recursive_config(yaml_file)
self.dryrun = dryrun
def submit(self, **kwargs):
raise NotImplementedError
def _normalize_cmd(self, cmd_list):
cmd_list = list(cmd_list)
yaml_index = cmd_list.index("[yaml]")
cmd_list[yaml_index] = self.yaml_file
return cmd_list
class LocalJob(BaseJob):
CMD_CONFIG = {
"local_single": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
],
"local_small": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "2"
],
"local_big": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "8"
],
"local_predict": ["python", "mmpt_cli/predict.py", "[yaml]"],
}
def __init__(self, yaml_file, job_type=None, dryrun=False):
super().__init__(yaml_file, dryrun)
if job_type is None:
self.job_type = "local_single"
if self.config.task_type is not None:
self.job_type = self.config.task_type
else:
self.job_type = job_type
if self.job_type in ["local_single", "local_small"]:
if self.config.fairseq.dataset.batch_size > 32:
print("decreasing batch_size to 32 for local testing?")
def submit(self):
cmd_list = self._normalize_cmd(LocalJob.CMD_CONFIG[self.job_type])
if "predict" not in self.job_type:
# append fairseq args.
from mmpt.utils import load_config
config = load_config(config_file=self.yaml_file)
for field in config.fairseq:
for key in config.fairseq[field]:
if key in ["fp16", "reset_optimizer", "reset_dataloader", "reset_meters"]: # a list of binary flag.
param = ["--" + key.replace("_", "-")]
else:
if key == "lr":
value = str(config.fairseq[field][key][0])
elif key == "adam_betas":
value = "'"+str(config.fairseq[field][key])+"'"
else:
value = str(config.fairseq[field][key])
param = [
"--" + key.replace("_", "-"),
value
]
cmd_list.extend(param)
print("launching", " ".join(cmd_list))
if not self.dryrun:
os.system(" ".join(cmd_list))
return JobStatus("12345678")
class JobStatus(object):
def __init__(self, job_id):
self.job_id = job_id
def __repr__(self):
return self.job_id
def __str__(self):
return self.job_id
def done(self):
return False
def running(self):
return False
def result(self):
if self.done():
return "{} is done.".format(self.job_id)
else:
return "{} is running.".format(self.job_id)
def stderr(self):
return self.result()
def stdout(self):
return self.result()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/MMPT/mmpt_cli/localjob.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import rxf_src # noqa
| EXA-1-master | exa/models/unilm-master/edgelm/examples/rxf/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa
| EXA-1-master | exa/models/unilm-master/edgelm/examples/rxf/rxf_src/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("sentence_prediction_r3f")
class SentencePredictionR3F(FairseqCriterion):
def __init__(
self,
task,
eps,
r3f_lambda,
noise_type,
classification_head_name,
regression_target,
):
super().__init__(task)
self.eps = eps
self.r3f_lambda = r3f_lambda
self.noise_type = noise_type
self.classification_head_name = classification_head_name
self.regression_target = regression_target
if self.noise_type in {"normal"}:
self.noise_sampler = torch.distributions.normal.Normal(
loc=0.0, scale=self.eps
)
elif self.noise_type == "uniform":
self.noise_sampler = torch.distributions.uniform.Uniform(
low=-self.eps, high=self.eps
)
else:
raise Exception(f"unrecognized noise type {self.noise_type}")
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--eps', type=float, default=1e-5,
help='noise eps')
parser.add_argument('--r3f-lambda', type=float, default=1.0,
help='lambda for combining logistic loss and noisy KL loss')
parser.add_argument('--noise-type', type=str, default='uniform',
choices=['normal', 'uniform'],
help='type of noises for RXF methods')
parser.add_argument('--classification-head-name',
default='sentence_classification_head',
help='name of the classification head to use')
parser.add_argument('--regression-target', action='store_true')
# fmt: on
def _get_symm_kl(self, noised_logits, input_logits):
return (
F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
+ F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
) / noised_logits.size(0)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.classification_head_name in model.classification_heads
), "model must provide sentence classification head for --criterion=sentence_prediction"
token_embeddings = model.encoder.sentence_encoder.embed_tokens(
sample["net_input"]["src_tokens"]
)
input_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=token_embeddings,
)
if model.training and self.noise_sampler:
noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
token_embeddings
)
noised_embeddings = token_embeddings.detach().clone() + noise
noised_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=noised_embeddings,
)
symm_kl = self._get_symm_kl(noised_logits, input_logits)
else:
symm_kl = 0
targets = model.get_targets(sample, [input_logits]).view(-1)
sample_size = targets.numel()
if not self.regression_target:
loss = F.nll_loss(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
targets,
reduction="sum",
)
if model.training:
symm_kl = symm_kl * sample_size
loss = loss + self.r3f_lambda * symm_kl
else:
logits = input_logits.squeeze().float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction="sum")
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.regression_target:
preds = input_logits.max(dim=1)[1]
logging_output.update(ncorrect=(preds == targets).sum().item())
if model.training and self.noise_sampler:
logging_output.update(
symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"symm_kl": symm_kl_sum / sample_size,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
agg_output.update(accuracy=ncorrect / nsentences)
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
return agg_output
| EXA-1-master | exa/models/unilm-master/edgelm/examples/rxf/rxf_src/sentence_prediction_r3f.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
@register_criterion("label_smoothed_cross_entropy_r3f")
class LabelSmoothedCrossEntropyR3FCriterion(FairseqCriterion):
def __init__(
self, task, sentence_avg, label_smoothing, eps, r3f_lambda, noise_type
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.label_smoothing = label_smoothing
self.eps = eps
self.r3f_lambda = r3f_lambda
self.noise_type = noise_type
if self.noise_type in {"normal"}:
self.noise_sampler = torch.distributions.normal.Normal(
loc=0.0, scale=self.eps
)
elif self.noise_type == "uniform":
self.noise_sampler = torch.distributions.uniform.Uniform(
low=-self.eps, high=self.eps
)
else:
raise Exception(f"unrecognized noise type {self.noise_type}")
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--eps', type=float, default=1e-5,
help='noise eps')
parser.add_argument('--r3f-lambda', type=float, default=1.0,
help='lambda for combining logistic loss and noisy KL loss')
parser.add_argument('--noise-type', type=str, default='normal',
choices=['normal', 'uniform'],
help='type of noises')
# fmt: on
def _get_symm_kl(self, noised_logits, input_logits):
return (
F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
+ F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
) / noised_logits.size(0)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
token_embeddings = model.encoder.embed_tokens(sample["net_input"]["src_tokens"])
input_logits, extra = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(
model, (input_logits, extra), sample, reduce=reduce
)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
if model.training:
noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
token_embeddings
)
noised_embeddings = token_embeddings.clone() + noise
noised_logits, _ = model(
**sample["net_input"], token_embeddings=noised_embeddings
)
symm_kl = self._get_symm_kl(noised_logits, input_logits)
if model.training:
symm_kl = symm_kl * sample_size
loss = loss + self.r3f_lambda * symm_kl
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if model.training:
logging_output.update(
symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
)
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.label_smoothing,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
metrics.log_scalar("symm_kl", symm_kl_sum / sample_size, sample_size, round=3)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| EXA-1-master | exa/models/unilm-master/edgelm/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import transformer_xl_model, truncated_bptt_lm_task # noqa
| EXA-1-master | exa/models/unilm-master/edgelm/examples/truncated_bptt/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
TokenBlockDataset,
data_utils,
iterators,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed import utils as dist_utils
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TruncatedBPTTLMConfig(FairseqDataclass):
data: str = field(default="???", metadata={"help": "path to data directory"})
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sequence"},
)
batch_size: int = II("dataset.batch_size")
# Some models use *max_target_positions* to know how many positional
# embeddings to learn. We use II(...) to make it default to
# *tokens_per_sample*, but in principle there could be more positional
# embeddings than tokens in a single batch. This may also be irrelevant for
# custom model implementations.
max_target_positions: int = II("task.tokens_per_sample")
# these will be populated automatically if not provided
data_parallel_rank: Optional[int] = None
data_parallel_size: Optional[int] = None
@register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig)
class TruncatedBPTTLMTask(FairseqTask):
def __init__(self, cfg: TruncatedBPTTLMConfig):
super().__init__(cfg)
if cfg.data_parallel_rank is None or cfg.data_parallel_size is None:
if torch.distributed.is_initialized():
cfg.data_parallel_rank = dist_utils.get_data_parallel_rank()
cfg.data_parallel_size = dist_utils.get_data_parallel_world_size()
else:
cfg.data_parallel_rank = 0
cfg.data_parallel_size = 1
# load the dictionary
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(self.dictionary)))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)"""
# support sharded datasets
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each element of *data* will be a tensorized line from the original
# text dataset, similar to ``open(split_path).readlines()``
data = data_utils.load_indexed_dataset(
split_path, self.dictionary, combine=combine
)
if data is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# this is similar to ``data.view(-1).split(tokens_per_sample)``
data = TokenBlockDataset(
data,
data.sizes,
block_size=self.cfg.tokens_per_sample,
pad=None, # unused
eos=None, # unused
break_mode="none",
)
self.datasets[split] = TruncatedBPTTDataset(
data=data,
bsz_per_shard=self.cfg.batch_size,
shard_id=self.cfg.data_parallel_rank,
num_shards=self.cfg.data_parallel_size,
)
def dataset(self, split):
return self.datasets[split]
def get_batch_iterator(
self, dataset, num_workers=0, epoch=1, data_buffer_size=0, **kwargs
):
return iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=self._collate_fn,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
# we don't use the batching functionality from EpochBatchIterator;
# instead every item in *dataset* is a whole batch
batch_sampler=[[i] for i in range(len(dataset))],
disable_shuffling=True,
)
def _collate_fn(self, items: List[List[torch.Tensor]]):
# we don't use fairseq's batching functionality, so we expect a single
# Tensor of type List[torch.Tensor]
assert len(items) == 1
# item will have shape B x T (the last batch may have length < T)
id, item = items[0]
item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad())
B, T = item.size()
# shift item one position over and append a padding token for the target
target = torch.nn.functional.pad(
item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad()
)
# fairseq expects batches to have the following structure
return {
"id": torch.tensor([id]*item.size(0)),
"net_input": {
"src_tokens": item,
},
"target": target,
"nsentences": item.size(0),
"ntokens": item.numel(),
}
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
eos = self.source_dictionary.eos()
dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=eos,
break_mode="eos",
)
class Dataset(torch.utils.data.Dataset):
def __getitem__(self, i):
item = dataset[i]
if item[-1] == eos:
# remove eos to support generating with a prefix
item = item[:-1]
return (i, [item])
def __len__(self):
return len(dataset)
return Dataset()
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
if constraints is not None:
raise NotImplementedError
# SequenceGenerator doesn't use *src_tokens* directly, we need to
# pass the *prefix_tokens* argument instead.
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
# begin generation with the end-of-sentence token
bos_token = self.source_dictionary.eos()
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
context_window: int = 0,
):
if context_window > 0:
raise NotImplementedError(
"Transformer-XL doesn't need --context-window, try "
"--model-overrides '{\"mem_len\":42}' instead "
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
).next_epoch_itr(shuffle=False)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class TruncatedBPTTDataset(torch.utils.data.Dataset):
def __init__(
self,
data: List[torch.Tensor], # ordered list of items
bsz_per_shard, # number of items processed per GPUs per forward
shard_id, # current GPU ID
num_shards, # number of GPUs
):
super().__init__()
self.data = data
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).contiguous()
return data
# total number of sequences processed by all GPUs in each forward pass
global_batch_size = bsz_per_shard * num_shards
"""
With a 16 item dataset, bsz_per_shard=2 and num_shards=3,
*indices* might look like:
indices = [[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11]]
The size of the TruncatedBPTTDataset instance will be 2,
and shard 1 will see items:
[(0, [data[4], data[6]]),
(1, [data[5], data[7]])]
"""
indices = batchify(torch.arange(len(data)), global_batch_size)
assert indices.size(0) == global_batch_size
self.my_indices = indices[
shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard
]
assert self.my_indices.size(0) == bsz_per_shard
def __len__(self):
return self.my_indices.size(1)
def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]:
return (i, [self.data[idx] for idx in self.my_indices[:, i]])
| EXA-1-master | exa/models/unilm-master/edgelm/examples/truncated_bptt/truncated_bptt_lm_task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TransformerXLConfig(FairseqDataclass):
# defaults come from the original Transformer-XL code
cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000])
d_model: int = 500
n_head: int = 10
d_head: int = 50
d_inner: int = 1000
div_val: int = 1
n_layer: int = 12
mem_len: int = 0
clamp_len: int = -1
same_length: bool = False
dropout: float = 0.0
dropatt: float = 0.0
checkpoint_activations: bool = False
offload_activations: bool = False
max_target_positions: int = II("task.max_target_positions")
@register_model("transformer_xl", dataclass=TransformerXLConfig)
class TransformerXLLanguageModel(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: TransformerXLConfig, task):
return cls(TransformerXLDecoder(cfg, task))
class TransformerXLDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
try:
from transformers.models.transfo_xl import (
TransfoXLConfig,
TransfoXLLMHeadModel,
)
except ImportError:
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.modeling_transfo_xl import TransfoXLLMHeadModel
super().__init__(task.target_dictionary)
self.cfg = cfg
# remove any cutoffs larger than the vocab size
cutoffs = [
cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary)
]
config = TransfoXLConfig(
vocab_size=len(task.target_dictionary),
cutoffs=cutoffs,
d_model=cfg.d_model,
d_embed=cfg.d_model,
n_head=cfg.n_head,
d_head=cfg.d_head,
d_inner=cfg.d_inner,
div_val=cfg.div_val,
n_layer=cfg.n_layer,
mem_len=cfg.mem_len,
clamp_len=cfg.clamp_len,
same_length=cfg.same_length,
dropout=cfg.dropout,
dropatt=cfg.dropatt,
)
logger.info(config)
self.model = TransfoXLLMHeadModel(config)
# Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax``
# which adds ``None`` values to an ``nn.ParameterList``, which is not
# supported in PyTorch. Instead we can replace this with an
# ``nn.ModuleList``, which does support ``None`` values.
try:
if all(p is None for p in self.model.crit.out_projs._parameters.values()):
self.model.crit.out_projs = torch.nn.ModuleList(
[None] * len(self.model.crit.out_projs._parameters)
)
except Exception:
pass
if cfg.checkpoint_activations or cfg.offload_activations:
for i in range(len(self.model.transformer.layers)):
self.model.transformer.layers[i] = checkpoint_wrapper(
self.model.transformer.layers[i],
offload_to_cpu=cfg.offload_activations,
)
# TODO: may save mem to wrap(layer.pos_ff.CoreNet[3])
self._mems = None
def forward(
self,
src_tokens,
src_lengths=None, # unused
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
if incremental_state is not None: # used during inference
mems = self.get_incremental_state(incremental_state, "mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
output = self.model(
input_ids=src_tokens,
mems=mems,
return_dict=False,
)
if len(output) >= 2:
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.cfg.max_target_positions
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
mems = self.get_incremental_state(incremental_state, "mems")
if mems is not None:
new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
self.set_incremental_state(incremental_state, "mems", new_mems)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/truncated_bptt/transformer_xl_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import tasks, criterions, models # noqa
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_text_joint_to_text/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from argparse import Namespace
from pathlib import Path
import torch
from fairseq.data import (
encoders,
Dictionary,
ResamplingDataset,
TransformEosLangPairDataset,
ConcatDataset,
)
from fairseq.data.iterators import GroupedEpochBatchIterator
from fairseq.data.audio.multi_modality_dataset import (
MultiModalityDataset,
LangPairMaskDataset,
ModalityDatasetItem,
)
from fairseq.data.audio.speech_to_text_dataset import SpeechToTextDataset, SpeechToTextDatasetCreator
from fairseq.data.audio.speech_to_text_joint_dataset import (
S2TJointDataConfig,
SpeechToTextJointDatasetCreator,
)
from fairseq.tasks import register_task
from fairseq.tasks.speech_to_text import SpeechToTextTask
from fairseq.tasks.translation import load_langpair_dataset
logger = logging.getLogger(__name__)
LANG_TAG_TEMPLATE = "<lang:{}>"
@register_task("speech_text_joint_to_text")
class SpeechTextJointToTextTask(SpeechToTextTask):
"""
Task for joint training speech and text to text.
"""
@classmethod
def add_args(cls, parser):
"""Add task-specific arguments to the parser."""
super(SpeechTextJointToTextTask, cls).add_args(parser)
###
parser.add_argument(
"--parallel-text-data",
default="",
help="path to parallel text data directory",
)
parser.add_argument(
"--max-tokens-text",
type=int,
metavar="N",
help="maximum tokens for encoder text input ",
)
parser.add_argument(
"--max-positions-text",
type=int,
metavar="N",
default=400,
help="maximum tokens for per encoder text input ",
)
parser.add_argument(
"--langpairs",
default=None,
metavar="S",
help='language pairs for text training, separated with ","',
)
parser.add_argument(
"--speech-sample-ratio",
default=1,
type=float,
metavar="N",
help="Multiple Ratio for speech dataset with transcripts ",
)
parser.add_argument(
"--text-sample-ratio",
default=1,
type=float,
metavar="N",
help="Multiple Ratio for text set ",
)
parser.add_argument(
"--update-mix-data",
action="store_true",
help="use mixed data in one update when update-freq > 1",
)
parser.add_argument(
"--load-speech-only",
action="store_true",
help="load speech data only",
)
parser.add_argument(
"--mask-text-ratio",
type=float,
metavar="V",
default=0.0,
help="mask V source tokens for text only mode",
)
parser.add_argument(
"--mask-text-type",
default="random",
choices=["random", "tail"],
help="mask text typed",
)
parser.add_argument(
"--noise-token",
default="",
help="noise token for masking src text tokens if mask-text-ratio > 0",
)
parser.add_argument(
"--infer-target-lang",
default="",
metavar="S",
help="target language for inference",
)
def __init__(self, args, src_dict, tgt_dict, infer_tgt_lang_id=None):
super().__init__(args, tgt_dict)
self.src_dict = src_dict
self.data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml)
assert self.tgt_dict.pad() == self.src_dict.pad()
assert self.tgt_dict.eos() == self.src_dict.eos()
self.speech_only = args.load_speech_only
self._infer_tgt_lang_id = infer_tgt_lang_id
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries)."""
data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml)
tgt_dict_path = Path(args.data) / data_cfg.vocab_filename
src_dict_path = Path(args.data) / data_cfg.src_vocab_filename
if (not os.path.isfile(src_dict_path)) or (not os.path.isfile(tgt_dict_path)):
raise FileNotFoundError("Dict not found: {}".format(args.data))
src_dict = Dictionary.load(src_dict_path.as_posix())
tgt_dict = Dictionary.load(tgt_dict_path.as_posix())
print("| src dictionary: {} types".format(len(src_dict)))
print("| tgt dictionary: {} types".format(len(tgt_dict)))
if args.parallel_text_data != "":
if not os.path.isabs(args.parallel_text_data):
args.parallel_text_data = os.path.join(
args.data, args.parallel_text_data
)
if args.langpairs is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
infer_tgt_lang_id = None
if args.infer_target_lang != "" and data_cfg.prepend_tgt_lang_tag_no_change:
tgt_lang_tag = SpeechToTextDataset.LANG_TAG_TEMPLATE.format(
args.infer_target_lang
)
infer_tgt_lang_id = tgt_dict.index(tgt_lang_tag)
assert infer_tgt_lang_id != tgt_dict.unk()
return cls(args, src_dict, tgt_dict, infer_tgt_lang_id=infer_tgt_lang_id)
def load_langpair_dataset(self, prepend_tgt_lang_tag=False, sampling_alpha=1.0, epoch=0):
lang_pairs = []
text_dataset = None
split = "train"
for lp in self.args.langpairs.split(","):
src, tgt = lp.split("-")
text_dataset = load_langpair_dataset(
self.args.parallel_text_data,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=True,
dataset_impl=None,
upsample_primary=1,
left_pad_source=False,
left_pad_target=False,
max_source_positions=self.args.max_positions_text,
max_target_positions=self.args.max_target_positions,
load_alignments=False,
truncate_source=False,
)
if prepend_tgt_lang_tag:
# TODO
text_dataset = TransformEosLangPairDataset(
text_dataset,
src_eos=self.src_dict.eos(),
tgt_bos=self.tgt_dict.eos(), # 'prev_output_tokens' starts with eos
new_tgt_bos=self.tgt_dict.index(LANG_TAG_TEMPLATE.format(tgt)),
)
lang_pairs.append(text_dataset)
if len(lang_pairs) > 1:
if sampling_alpha != 1.0:
size_ratios = SpeechToTextDatasetCreator.get_size_ratios(
self.args.langpairs.split(","),
[len(s) for s in lang_pairs],
alpha=sampling_alpha,
)
lang_pairs = [
ResamplingDataset(
d, size_ratio=r, epoch=epoch, replace=(r >= 1.0)
)
for d, r in zip(lang_pairs, size_ratios)
]
return ConcatDataset(lang_pairs)
return text_dataset
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=self._infer_tgt_lang_id,
)
def build_src_tokenizer(self, args):
logger.info(f"src-pre-tokenizer: {self.data_cfg.src_pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.src_pre_tokenizer))
def build_src_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.src_bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.src_bpe_tokenizer))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
src_pre_tokenizer = self.build_src_tokenizer(self.args)
src_bpe_tokenizer = self.build_src_bpe(self.args)
ast_dataset = SpeechToTextJointDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
src_dict=None if self.speech_only else self.src_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
src_pre_tokenizer=src_pre_tokenizer,
src_bpe_tokenizer=src_bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
)
noise_token_id = -1
text_dataset = None
if self.args.parallel_text_data != "" and is_train_split:
text_dataset = self.load_langpair_dataset(
self.data_cfg.prepend_tgt_lang_tag_no_change,
1.0,
epoch=epoch,
)
if self.args.mask_text_ratio > 0:
# add mask
noise_token_id = (
self.src_dict.unk()
if self.args.noise_token == ""
else self.src_dict.index(self.args.noise_token)
)
text_dataset = LangPairMaskDataset(
text_dataset,
src_bos=self.src_dict.bos(),
src_eos=self.src_dict.eos(),
noise_id=noise_token_id,
mask_ratio=self.args.mask_text_ratio,
mask_type=self.args.mask_text_type,
)
if text_dataset is not None:
mdsets = [
ModalityDatasetItem(
"sup_speech",
ast_dataset,
(self.args.max_source_positions, self.args.max_target_positions),
self.args.max_tokens,
self.args.batch_size,
),
ModalityDatasetItem(
"text",
text_dataset,
(self.args.max_positions_text, self.args.max_target_positions),
self.args.max_tokens_text
if self.args.max_tokens_text is not None
else self.args.max_tokens,
self.args.batch_size,
),
]
ast_dataset = MultiModalityDataset(mdsets)
self.datasets[split] = ast_dataset
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.tgt_dict
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
return None if self.speech_only else self.src_dict
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=0,
data_buffer_size=0,
disable_iterator_cache=False,
):
if not isinstance(dataset, MultiModalityDataset):
return super(SpeechTextJointToTextTask, self).get_batch_iterator(
dataset,
max_tokens,
max_sentences,
max_positions,
ignore_invalid_inputs,
required_batch_size_multiple,
seed,
num_shards,
shard_id,
num_workers,
epoch,
data_buffer_size,
disable_iterator_cache,
)
mult_ratio = [self.args.speech_sample_ratio, self.args.text_sample_ratio]
assert len(dataset.datasets) == 2
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
batch_samplers = dataset.get_batch_samplers(
mult_ratio, required_batch_size_multiple, seed
)
# return a reusable, sharded iterator
epoch_iter = GroupedEpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_samplers=batch_samplers,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
mult_rate=1 if self.args.update_mix_data else max(self.args.update_freq),
buffer_size=data_buffer_size,
)
self.dataset_to_epoch_iter[dataset] = {} # refresh it every epoch
return epoch_iter
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_text_joint_to_text/tasks/speech_text_joint.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_text_joint_to_text/tasks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import namedtuple
import torch
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.models.speech_to_text import (
TransformerDecoder,
S2TTransformerEncoder,
)
from fairseq.models.transformer import TransformerEncoder
from fairseq.modules import (
TransformerEncoderLayer,
GradMultiply,
LayerNorm,
)
logger = logging.getLogger(__name__)
class SpeechEoSEncoder(FairseqEncoder):
def __init__(self, encoder, eos_num, feat_dim, adapter_type="None", adapter_dim=0):
super().__init__(None)
self.encoder = encoder
self.eos_num = eos_num # downsampling rate for speech input feature
self.eos_emb = (
nn.Parameter(torch.zeros(1, feat_dim), requires_grad=True)
if eos_num > 0
else None
)
self.adapter = self.add_adapter(adapter_type, adapter_dim)
def add_adapter(self, adapter_type, adapter_dim):
def _make_identity(linear, eps=1e-5):
assert isinstance(linear, nn.Linear)
linear.weight.data.mul_(eps)
linear.weight.data.fill_diagonal_(1.0)
if linear.bias is not None:
linear.bias.data.mul_(eps)
adapter = None
if adapter_type == "Linear":
assert adapter_dim > 0
adapter = nn.Sequential(
nn.Linear(adapter_dim, adapter_dim), LayerNorm(adapter_dim)
)
# initialize the adapter as identity matrix first
_make_identity(adapter[0])
elif adapter_type == "MLP":
assert adapter_dim > 0
# assume the model is pre-norm model
adapter = nn.Sequential(
nn.Linear(adapter_dim, 2 * adapter_dim),
nn.ReLU(),
nn.Linear(2 * adapter_dim, adapter_dim),
LayerNorm(adapter_dim),
)
_make_identity(adapter[0])
_make_identity(adapter[2])
return adapter
def add_eos(self, src_tokens, src_lengths):
bsz, max_seq_len, fdim = src_tokens.size()
if self.eos_num > 0:
src_token_eos = torch.zeros(
[bsz, max_seq_len + self.eos_num, fdim],
dtype=src_tokens.dtype,
device=src_tokens.device,
)
src_token_eos[:, :max_seq_len] = src_tokens
for bi in range(bsz):
src_token_eos[bi][
src_lengths[bi] : src_lengths[bi] + self.eos_num
] = self.eos_emb.expand(self.eos_num, fdim)
src_lengths = src_lengths + self.eos_num
src_tokens = src_token_eos
return src_tokens, src_lengths
def apply_adapter(self, enc_out):
if self.adapter is None:
return enc_out
rst = self.adapter(enc_out.encoder_out)
if enc_out.encoder_padding_mask is not None:
rst.masked_fill_(
enc_out.encoder_padding_mask.transpose(0, 1).unsqueeze(-1), 0
)
return EncoderOut(
encoder_out=rst,
encoder_padding_mask=enc_out.encoder_padding_mask,
encoder_embedding=enc_out.encoder_embedding,
encoder_states=enc_out.encoder_states,
src_tokens=enc_out.src_tokens,
src_lengths=enc_out.src_lengths,
)
def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
src_tokens, src_lengths = self.add_eos(src_tokens, src_lengths)
enc_out = self.encoder(src_tokens, src_lengths, return_all_hiddens)
enc_out = self.apply_adapter(enc_out)
return enc_out
def reorder_encoder_out(self, encoder_out, new_order):
return self.encoder.reorder_encoder_out(encoder_out, new_order)
class DualInputEncoder(FairseqEncoder):
def __init__(
self,
args,
spch_encoder,
text_encoder,
dictionary,
cross_attentive_loss_before_last_layer=-1,
):
super().__init__(dictionary)
self.spch_encoder = spch_encoder
self.text_encoder = text_encoder
self.enc_grad_mult = args.enc_grad_mult
self.cross_attentive_loss_before_last_layer = (
cross_attentive_loss_before_last_layer
)
self.use_cross_attentive_loss = (
False if cross_attentive_loss_before_last_layer <= -1 else True
)
self.enc2_along_grad_mult = args.enc2_along_grad_mult
@classmethod
def set_shared_layer(cls, share_level, src_layer, tgt_layer):
"""
share parameters from tgt_layer to src_layer
share_level:
0: share everything
1: share everything but different model
2: share weight but not bias, layernorm
"""
if share_level == 0:
return tgt_layer
if isinstance(src_layer, nn.Linear):
return tgt_layer
if isinstance(src_layer, TransformerEncoderLayer):
assert src_layer.embed_dim == tgt_layer.embed_dim
assert src_layer.normalize_before == tgt_layer.normalize_before
if share_level == 1:
src_layer.fc1 = tgt_layer.fc1
src_layer.fc2 = tgt_layer.fc2
src_layer.self_attn = tgt_layer.self_attn
src_layer.final_layer_norm = tgt_layer.final_layer_norm
src_layer.self_attn_layer_norm = tgt_layer.self_attn_layer_norm
src_layer.layernorm_embedding = tgt_layer.layernorm_embedding
else:
src_layer.fc1.weight = tgt_layer.fc1.weight
src_layer.fc2.weight = tgt_layer.fc2.weight
src_layer.self_attn.k_proj.weight = tgt_layer.self_attn.k_proj.weight
src_layer.self_attn.v_proj.weight = tgt_layer.self_attn.v_proj.weight
src_layer.self_attn.q_proj.weight = tgt_layer.self_attn.q_proj.weight
src_layer.self_attn.out_proj.weight = (
tgt_layer.self_attn.out_proj.weight
)
else:
if share_level == 1:
return tgt_layer
return src_layer
@classmethod
def build_spch_encoder(cls, args):
cfg = {
"input_feat_per_channel": args.input_feat_per_channel,
"input_channels": args.input_channels,
"conv_kernel_sizes": args.conv_kernel_sizes,
"conv_channels": args.conv_channels,
"encoder_embed_dim": args.encoder_embed_dim,
"encoder_ffn_embed_dim": args.encoder_ffn_embed_dim,
"encoder_layers": args.speech_encoder_layers,
"encoder_layerdrop": args.encoder_layerdrop,
"encoder_attention_heads": args.encoder_attention_heads,
"max_source_positions": args.max_source_positions,
"dropout": args.dropout,
"encoder_normalize_before": args.encoder_normalize_before,
"activation_dropout": args.activation_dropout,
"attention_dropout": args.attention_dropout,
"activation_fn": args.activation_fn,
"layernorm_embedding": args.layernorm_embedding,
"no_token_positional_embeddings": args.no_token_positional_embeddings,
"no_scale_embedding": args.no_scale_embedding,
"quant_noise_pq": args.quant_noise_pq,
"encoder_freezing_updates": 0,
}
model_args = namedtuple("args", cfg.keys())(*cfg.values())
spch_encoder = S2TTransformerEncoder(model_args)
if args.add_speech_eos:
spch_encoder = SpeechEoSEncoder(
spch_encoder,
2 * len(args.conv_kernel_sizes.split(",")),
args.input_feat_per_channel,
adapter_type=getattr(args, "speech_encoder_adapter_type", "None"),
adapter_dim=args.encoder_embed_dim,
)
return spch_encoder
@classmethod
def build_text_encoder(cls, args, src_dictionary, spch_encoder):
if args.encoder_shared_layers > 0:
mx_shared_layers = (
args.speech_encoder_layers
if args.speech_encoder_layers < args.text_encoder_layers
else args.text_encoder_layers
)
args.encoder_shared_layers = (
args.encoder_shared_layers
if args.encoder_shared_layers <= mx_shared_layers
else mx_shared_layers
)
cfg = {
"encoder_embed_dim": args.encoder_text_embed_dim,
"encoder_ffn_embed_dim": args.encoder_ffn_embed_dim,
"encoder_layers": args.text_encoder_layers,
"encoder_layerdrop": args.encoder_layerdrop,
"encoder_attention_heads": args.encoder_attention_heads,
"encoder_learned_pos": args.encoder_learned_pos,
"max_source_positions": args.max_source_positions,
"dropout": args.dropout,
"encoder_normalize_before": args.encoder_normalize_before,
"activation_dropout": args.activation_dropout,
"attention_dropout": args.attention_dropout,
"activation_fn": args.activation_fn,
"adaptive_input": args.adaptive_input,
"no_token_positional_embeddings": args.no_token_positional_embeddings,
"no_scale_embedding": args.no_scale_embedding,
"quant_noise_pq": args.quant_noise_pq,
}
model_args = namedtuple("args", cfg.keys())(*cfg.values())
enc_emb = nn.Embedding(
len(src_dictionary), model_args.encoder_embed_dim, src_dictionary.pad()
)
text_encoder = TransformerEncoder(model_args, src_dictionary, enc_emb)
if args.add_speech_eos:
spch_encoder = spch_encoder.encoder
if args.encoder_shared_layers > 0:
text_encoder.layer_norm = cls.set_shared_layer(
args.encoder_shared_layer_level,
text_encoder.layer_norm,
spch_encoder.layer_norm,
)
for i, ly in enumerate(
spch_encoder.transformer_layers[-args.encoder_shared_layers :]
):
ly_id = i + args.text_encoder_layers - args.encoder_shared_layers
if not isinstance(text_encoder.layers[ly_id], type(ly)):
if text_encoder.layers[ly_id]._get_name() not in ('TransformerEncoderLayerBase', 'TransformerEncoderLayer'):
raise ValueError("The shared layers are expected from the same class")
text_encoder.layers[ly_id] = cls.set_shared_layer(
args.encoder_shared_layer_level,
text_encoder.layers[ly_id],
ly,
)
return text_encoder
def mult_rst_grad(self, rst, ratio):
assert isinstance(rst, dict) # instead of EncoderOut
assert len(rst["encoder_out"]) == 1
rst["encoder_out"][0] = GradMultiply.apply(rst["encoder_out"][0], ratio)
return rst
def process_attentive_loss_states(self, rst, interstates):
assert isinstance(rst, dict) # instead of EncoderOut
rst["encoder_states"] = interstates
return rst
def forward(
self,
src_tokens,
src_lengths=None,
src_txt_tokens=None,
src_txt_lengths=None,
**kwargs
):
"""
Args:
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (speech) (B,)
src_txt_tokens: padded tensor (B, T)
src_txt_lengths: tensor of original lengths of input utterances (text) (B,)
"""
# src_tokens only: inference
# src_tokens, src_lengths: speech only training
# src_txt_tokens, src_txt_lengths: text only training
# all valid: speech + text training
if src_tokens is None and src_txt_tokens is None:
raise ValueError(
"src_tokens and src_txt_tokens cannot be None at the same time"
)
ret1 = None
ret2 = None
return_all_hiddens = False
if src_tokens is not None:
if (
self.use_cross_attentive_loss and src_txt_tokens is not None
): # remove self.training so we can get attn score during validation step
return_all_hiddens = True
ret1 = self.spch_encoder(
src_tokens, src_lengths, return_all_hiddens=return_all_hiddens
)
if self.use_cross_attentive_loss and src_txt_tokens is not None:
assert self.cross_attentive_loss_before_last_layer < len(
ret1["encoder_states"]
)
ret1 = self.process_attentive_loss_states(
ret1,
ret1["encoder_states"][
-self.cross_attentive_loss_before_last_layer - 1
],
)
if src_txt_tokens is not None:
ret2 = self.text_encoder(
src_txt_tokens, src_txt_lengths, return_all_hiddens=return_all_hiddens
)
if return_all_hiddens:
if self.cross_attentive_loss_before_last_layer == len(
self.text_encoder.layers
):
text_embedding, _ = self.text_encoder.forward_embedding(
src_txt_tokens
)
text_embedding = text_embedding.transpose(0, 1)
ret2 = self.process_attentive_loss_states(ret2, text_embedding)
else:
assert self.cross_attentive_loss_before_last_layer < len(
self.text_encoder.layers
)
ret2 = self.process_attentive_loss_states(
ret2,
ret2["encoder_states"][
-self.cross_attentive_loss_before_last_layer - 1
],
)
def merge_output(rst1, rst2):
if rst1 is None:
if not (self.enc2_along_grad_mult == 1.0 or self.training):
rst2 = self.mult_rst_grad(rst2, self.enc2_along_grad_mult)
return rst2
if rst2 is None:
return rst1
if self.enc_grad_mult != 1.0 and self.training:
rst1 = self.mult_rst_grad(rst1, self.enc_grad_mult)
rst2 = self.mult_rst_grad(rst2, self.enc_grad_mult)
rst = (rst1, rst2)
return rst
return merge_output(ret1, ret2)
def reorder_encoder_out(self, encoder_out, new_order):
assert self.training is False # used for inference only
return self.spch_encoder.reorder_encoder_out(encoder_out, new_order)
# TransformerMultiInputDecoder: take one or two encoder inputs
class TransformerMultiInputDecoder(FairseqDecoder):
def __init__(
self,
dictionary,
spch_decoder,
text_decoder,
compute_cross_attentive_loss=False,
cross_attentive_loss_with_norm=True,
cross_attentive_loss_reverse=False,
):
super().__init__(dictionary)
self.spch_decoder = spch_decoder
self.text_decoder = text_decoder
self.compute_cross_attentive_loss = compute_cross_attentive_loss
self.cross_attentive_loss_with_norm = cross_attentive_loss_with_norm
self.cross_attentive_loss_reverse = cross_attentive_loss_reverse
@classmethod
def share_spchdecoder(cls, task_args, text_decoder, spch_decoder):
if task_args.decoder_shared_layer_level == 0:
return text_decoder
assert text_decoder.embed_tokens == spch_decoder.embed_tokens
spch_decoder.project_in_dim = text_decoder.project_in_dim
spch_decoder.embed_positions = text_decoder.embed_positions
spch_decoder.layernorm_embedding = text_decoder.layernorm_embedding
spch_decoder.project_out_dim = text_decoder.project_out_dim
spch_decoder.adaptive_softmax = text_decoder.adaptive_softmax
if task_args.decoder_shared_layer_level == 1:
spch_decoder.output_projection = text_decoder.output_projection
spch_decoder.layer_norm = text_decoder.layer_norm
else: # 2
spch_decoder.output_projection.weight = (
text_decoder.output_projection.weight
)
for i, ly in enumerate(text_decoder.layers):
sly = spch_decoder.layers[i]
sly.self_attn = ly.self_attn
sly.self_attn_layer_norm = ly.self_attn_layer_norm
# sly.encoder_attn = ly.encoder_attn
if (
task_args.decoder_shared_layer_level == 1
): # share everything, but under different models
sly.encoder_attn = ly.encoder_attn
sly.encoder_attn_layer_norm = ly.encoder_attn_layer_norm
sly.fc1 = ly.fc1
sly.fc2 = ly.fc2
sly.final_layer_norm = ly.final_layer_norm
else: # task_args.decoder_shared_layer_level == 2: #separated encoder_attn_layer_norm and bias
sly.encoder_attn.k_proj.weight = ly.encoder_attn.k_proj.weight
sly.encoder_attn.v_proj.weight = ly.encoder_attn.v_proj.weight
sly.encoder_attn.q_proj.weight = ly.encoder_attn.q_proj.weight
sly.encoder_attn.out_proj.weight = ly.encoder_attn.out_proj.weight
sly.fc1.weight = ly.fc1.weight
sly.fc2.weight = ly.fc2.weight
return spch_decoder
def cross_attentive_loss(
self, teacher_states, student_states, teacher_masking, student_masking, eps=1e-6
):
x = teacher_states.transpose(0, 1) # from T X B X D to B X T X D
y = student_states.transpose(0, 1)
if self.cross_attentive_loss_with_norm:
x = x / (x.norm(dim=2, keepdim=True) + eps)
y = y / (y.norm(dim=2, keepdim=True) + eps)
dim = x.size(-1)
# lengths: batch X seqLen
sim_scores_xy = torch.bmm(x, y.transpose(1, 2)) # batch X lenx X leny ]
if y.dtype == torch.float16:
sim_scores_xy = sim_scores_xy.float()
y = y.float()
x = x.float()
if teacher_masking != []:
assert len(teacher_masking) == 1
sim_scores_xy = sim_scores_xy.masked_fill(
teacher_masking[0].unsqueeze(-1), float("-inf")
)
if student_masking != []:
sim_scores_xy = sim_scores_xy.masked_fill(
student_masking[0].unsqueeze(1), float("-inf")
)
# do masking
y_weights = utils.softmax(sim_scores_xy, dim=-1)
if teacher_masking != []:
y_weights = y_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0)
x_reconstruct_from_y = torch.bmm(y_weights, y)
sim_scores_xx = torch.bmm(x, x.transpose(1, 2)) # batch X lenx X lenx ]
x_weights = utils.softmax(sim_scores_xx, dim=-1)
if teacher_masking != []:
x_weights = x_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0)
# no gradient for teacher state
x_reconstruct_from_x = torch.bmm(x_weights, x).detach()
cost = (x_reconstruct_from_x - x_reconstruct_from_y).norm(dim=2)
if teacher_masking != []:
cost = cost.masked_fill(teacher_masking[0], 0)
if not self.cross_attentive_loss_with_norm:
cost = cost / dim
return cost
def forward(
self,
prev_output_tokens,
encoder_out,
incremental_state=None,
has_txt_input=False,
**kwargs
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing. If there are
two or more input during training, they will share the same prev_output_tokens
encoder_out (tuple[Tensor]): output from the encoder, used for
encoder-side attention. It will be tuple if there are more inputs, but a tensor
if only one input
incremental_state ([dict]): dictionary used for storing state during
:ref:`Incremental decoding`. It is only valid for inference, only from single
input
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`. If there are N inputs, batch will be N bigger than a single input
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
assert not isinstance(encoder_out, EncoderOut)
if isinstance(encoder_out, tuple): # training with mulitple input
rst = []
assert len(encoder_out) == 2
for i, eo in enumerate(encoder_out):
assert incremental_state is None
if i == 0:
rst.append(
self.spch_decoder(prev_output_tokens, eo, incremental_state)
)
else:
rst.append(
self.text_decoder(prev_output_tokens, eo, incremental_state)
)
dec_out = torch.cat([r[0] for r in rst], dim=0)
attn_cost = None
if self.compute_cross_attentive_loss:
assert isinstance(encoder_out[0], dict)
if self.cross_attentive_loss_reverse:
attn_cost = self.cross_attentive_loss(
teacher_states=encoder_out[1]["encoder_states"], # text_states
student_states=encoder_out[0]["encoder_states"], # spch_states
teacher_masking=encoder_out[1]["encoder_padding_mask"],
student_masking=encoder_out[0]["encoder_padding_mask"],
)
else:
attn_cost = self.cross_attentive_loss(
teacher_states=encoder_out[0]["encoder_states"], # spch_states
student_states=encoder_out[1]["encoder_states"], # text_states
teacher_masking=encoder_out[0]["encoder_padding_mask"],
student_masking=encoder_out[1]["encoder_padding_mask"],
)
return (dec_out, {"attn_cost": attn_cost})
else: # inference or training with one input
if has_txt_input:
return self.text_decoder(
prev_output_tokens, encoder_out, incremental_state
)
return self.spch_decoder(prev_output_tokens, encoder_out, incremental_state)
# Note:
# dual input transformer:
# encoder: S2TTransformerEncoder for speech + TransformerEncoder for text
# decoder: TransformerDecoder for text
@register_model("dual_input_s2t_transformer")
class DualInputS2TTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.num_updates = 0
def max_positions(self):
return None # it is provided in task
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# encoder 1: S2TTransformerEncoder for speech
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
# standard Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-text-embed-dim",
type=int,
metavar="N",
help="encoder text embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
# non-standard transformer parameters
parser.add_argument(
"--speech-encoder-layers",
type=int,
metavar="N",
help="num speech encoder layers",
)
parser.add_argument(
"--text-encoder-layers",
type=int,
metavar="N",
help="num text encoder layers",
)
parser.add_argument(
"--encoder-shared-layers",
type=int,
metavar="N",
help="num shared encoder layers",
)
parser.add_argument(
"--encoder-shared-layer-level",
type=int,
metavar="N",
default=0,
choices=[0, 1, 2],
help="share layer level 0: all share 1: all share with separate model 2: share weight but not bias and layernorm",
)
parser.add_argument(
"--decoder-shared-layer-level",
default=0,
choices=[0, 1, 2],
type=int,
metavar="N",
help="0: share everything; 1: share everything with different model 2: no share layer_norm and bias",
)
###
parser.add_argument(
"--text-input-cost-ratio",
type=float,
default=1.0,
metavar="V",
help="text input cost ratio relative to speech input cost",
)
parser.add_argument(
"--init-scale",
type=float,
default=1.0,
metavar="V",
help="scale the initial weight by given factor",
)
parser.add_argument(
"--enc-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc1 and enc2 gradient by V",
)
parser.add_argument(
"--enc2-along-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc2 gradient by V if only enc2 is used",
)
parser.add_argument(
"--load-pretrain-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained encoder """,
)
parser.add_argument(
"--load-pretrain-speech-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained speech encoder """,
)
parser.add_argument(
"--load-pretrain-text-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained text encoder """,
)
parser.add_argument(
"--load-pretrain-text-encoder-last",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained text encoder """,
)
parser.add_argument(
"--load-pretrain-decoder",
type=str,
metavar="EXPR",
default="",
help=""" path to the pretrained encoder """,
)
parser.add_argument(
"--add-speech-eos",
action="store_true",
help="add eos token at the end of input feature",
)
parser.add_argument(
"--speech-encoder-adapter-type",
type=str,
metavar="EXPR",
default="None",
choices=["None", "Linear", "MLP"],
help="add speech encoder adapter",
)
@classmethod
def build_encoder(cls, args, task):
spch_encoder = DualInputEncoder.build_spch_encoder(args)
text_encoder = DualInputEncoder.build_text_encoder(
args, task.src_dict, spch_encoder
)
cross_attentive_loss_before_last_layer = (
0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1
)
encoder = DualInputEncoder(
args,
spch_encoder,
text_encoder,
task.src_dict,
cross_attentive_loss_before_last_layer,
)
if args.init_scale != 1.0:
with torch.no_grad():
for param in encoder.parameters():
param.data.mul_(args.init_scale)
if args.load_pretrain_text_encoder != "":
checkpoint_utils.load_pretrained_component_from_model(
text_encoder, args.load_pretrain_text_encoder
)
if args.load_pretrain_speech_encoder != "":
if hasattr(spch_encoder, "encoder"):
checkpoint_utils.load_pretrained_component_from_model(
spch_encoder.encoder, args.load_pretrain_speech_encoder
)
else:
checkpoint_utils.load_pretrained_component_from_model(
spch_encoder, args.load_pretrain_speech_encoder
)
if (
args.load_pretrain_text_encoder_last != ""
): # if share encoder, speech encoder parameters will be used.
# It provides a chance to use pre-trained mt encoder instead
checkpoint_utils.load_pretrained_component_from_model(
text_encoder, args.load_pretrain_text_encoder_last
)
if args.load_pretrain_encoder != "":
checkpoint_utils.load_pretrained_component_from_model(
encoder, args.load_pretrain_encoder
)
return encoder
@classmethod
def build_decoder(cls, args, task):
dec_cfg = {
"decoder_layerdrop": args.decoder_layerdrop,
"share_decoder_input_output_embed": args.share_decoder_input_output_embed,
"decoder_embed_dim": args.decoder_embed_dim,
"max_target_positions": args.max_target_positions,
"dropout": args.dropout,
"encoder_learned_pos": args.encoder_learned_pos,
"decoder_learned_pos": args.decoder_learned_pos,
"layernorm_embedding": args.layernorm_embedding,
"decoder_normalize_before": args.decoder_normalize_before,
"activation_dropout": args.activation_dropout,
"attention_dropout": args.attention_dropout,
"decoder_ffn_embed_dim": args.decoder_ffn_embed_dim,
"decoder_layers": args.decoder_layers,
"decoder_attention_heads": args.decoder_attention_heads,
"decoder_output_dim": args.decoder_embed_dim,
"no_scale_embedding": args.no_scale_embedding,
"adaptive_input": args.adaptive_input,
"quant_noise_pq": args.quant_noise_pq,
"adaptive_softmax_cutoff": args.adaptive_softmax_cutoff,
"tie_adaptive_weights": args.tie_adaptive_weights,
"no_token_positional_embeddings": args.no_token_positional_embeddings,
}
dec_cfg = namedtuple("args", dec_cfg.keys())(*dec_cfg.values())
dec_emb = nn.Embedding(
len(task.target_dictionary),
args.decoder_embed_dim,
task.target_dictionary.pad(),
)
compute_cross_attentive_loss = (
True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False
)
cross_attentive_loss_without_norm = getattr(
args, "attentive_cost_without_normalize", False
)
cross_attentive_loss_reverse = (
False # getattr(args, "attentive_cost_reverse", False)
)
text_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb)
spch_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb)
spch_decoder = TransformerMultiInputDecoder.share_spchdecoder(
args, text_decoder, spch_decoder
)
decoder = TransformerMultiInputDecoder(
dictionary=task.target_dictionary,
spch_decoder=spch_decoder,
text_decoder=text_decoder,
compute_cross_attentive_loss=compute_cross_attentive_loss,
cross_attentive_loss_with_norm=True
if not cross_attentive_loss_without_norm
else False,
cross_attentive_loss_reverse=cross_attentive_loss_reverse,
)
if args.init_scale != 1.0:
with torch.no_grad():
for param in decoder.parameters():
param.data.mul_(args.init_scale)
if args.load_pretrain_decoder != "":
try:
checkpoint_utils.load_pretrained_component_from_model(
decoder, args.load_pretrain_decoder
)
except RuntimeError:
checkpoint_utils.load_pretrained_component_from_model(
decoder.text_decoder, args.load_pretrain_decoder
)
if args.decoder_shared_layer_level > 0:
checkpoint_utils.load_pretrained_component_from_model(
decoder.spch_decoder, args.load_pretrain_decoder
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
dualinputs2ttransformer_base(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
use_encoder_outputs=False,
src_txt_tokens=None,
src_txt_lengths=None,
mode="sup_speech",
**kwargs
):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., teacher forcing) to
the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
mode = 'sup_speech' or 'text'
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
if mode == "text":
assert src_txt_tokens is None
src_txt_tokens = src_tokens
src_txt_lengths = src_lengths
src_tokens = None
src_lengths = None
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
src_txt_tokens=src_txt_tokens,
src_txt_lengths=src_txt_lengths,
**kwargs
)
has_txt_input = True if src_txt_tokens is not None else False
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
has_txt_input=has_txt_input,
**kwargs
)
if use_encoder_outputs:
return decoder_out, encoder_out
return decoder_out
@register_model_architecture(
"dual_input_s2t_transformer", "dualinputs2ttransformer_base"
)
def dualinputs2ttransformer_base(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_text_embed_dim = getattr(
args, "encoder_text_embed_dim", args.encoder_embed_dim
)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.encoder_shared_layers = getattr(args, "encoder_shared_layers", 0)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.add_speech_eos = getattr(args, "add_speech_eos", False)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_s")
def dualinputs2ttransformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 7)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 7)
args.decoder_layers = getattr(args, "decoder_layers", 7)
dualinputs2ttransformer_base(args)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_m")
def dualinputs2ttransformer_m(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.dropout = getattr(args, "dropout", 0.15)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
dualinputs2ttransformer_base(args)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_b")
def dualinputs2ttransformer_b(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.dropout = getattr(args, "dropout", 0.15)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
dualinputs2ttransformer_base(args)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_l")
def dualinputs2ttransformer_l(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.2)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
dualinputs2ttransformer_base(args)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_text_joint_to_text/models/s2t_dualinputtransformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_text_joint_to_text/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
register_model,
register_model_architecture,
FairseqEncoder,
)
from fairseq.models.speech_to_text import XMTransformerModel, Wav2VecEncoderWithAdaptor
from fairseq.models.speech_to_text.xm_transformer import (
set_default_adaptor_args,
set_default_w2v_encoder_args,
)
from fairseq.models.transformer import TransformerEncoder, TransformerDecoder
from fairseq.models.wav2vec import TransformerSentenceEncoderLayer
from fairseq.utils import safe_hasattr
from .s2t_dualinputtransformer import (
DualInputS2TTransformerModel,
TransformerMultiInputDecoder,
DualInputEncoder,
)
class TransformerSentenceEncoderLayerStd(TransformerSentenceEncoderLayer):
def __init__(self, sent_enc_layer):
super(TransformerSentenceEncoderLayer, self).__init__()
self.embedding_dim = sent_enc_layer.embedding_dim
self.dropout = sent_enc_layer.dropout
self.activation_dropout = sent_enc_layer.activation_dropout
# Initialize blocks
self.activation_fn = sent_enc_layer.activation_fn
self.self_attn = sent_enc_layer.self_attn
self.dropout1 = sent_enc_layer.dropout1
self.dropout2 = sent_enc_layer.dropout2
self.dropout3 = sent_enc_layer.dropout3
self.layer_norm_first = sent_enc_layer.layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = sent_enc_layer.self_attn_layer_norm
self.fc1 = sent_enc_layer.fc1
self.fc2 = sent_enc_layer.fc2
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = sent_enc_layer.final_layer_norm
def forward(
self,
x,
self_attn_mask=None,
self_attn_padding_mask=None,
need_weights=None,
att_args=None,
):
x, attn = super().forward(
x, self_attn_mask, self_attn_padding_mask, need_weights, att_args
)
return x
# TODO retire SharedEncoder
class SharedEncoder(FairseqEncoder):
def __init__(self, wav2vec_enc, mbart_enc, adaptor, shared_layers):
super().__init__(None)
self.w2v_encoder = wav2vec_enc
self.shared_layers = self.w2v_encoder.w2v_model.encoder.layers[-shared_layers:]
self.w2v_encoder.w2v_model.encoder.layers = (
self.w2v_encoder.w2v_model.encoder.layers[:-shared_layers]
)
self.adaptor = adaptor
if self.shared_layers[-1].layer_norm_first:
self.final_layer_norm = mbart_enc.layer_norm
else:
mbart_enc.layer_norm = None
self.final_layer_norm = None
shared_layer_from = len(mbart_enc.layers) - shared_layers
if shared_layer_from < 0:
shared_layer_from = 0
for layer_id, layer in enumerate(self.shared_layers):
mbart_enc.layers[
shared_layer_from + layer_id
] = TransformerSentenceEncoderLayerStd(layer)
def forward(self, src_tokens, src_lengths=None, **kwargs):
padding_mask = lengths_to_padding_mask(src_lengths)
if not padding_mask.any():
padding_mask = None
out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True)
x = out["encoder_out"]
enc_padding_mask = None
if out["encoder_padding_mask"] is not None:
enc_padding_mask = out["encoder_padding_mask"].transpose(
0, 1
) # T X B --> B X T
x, enc_padding_mask = self.adaptor(x, enc_padding_mask)
for layer in self.shared_layers:
x, _ = layer(x, enc_padding_mask)
if self.final_layer_norm is not None:
x = self.final_layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [enc_padding_mask]
if enc_padding_mask is not None
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
class StackedWav2VecEncoderWithAdaptor(FairseqEncoder):
def __init__(
self,
wav2vec_enc,
mbart_enc_layers,
mbart_layer_norm,
adaptor,
drop_w2v_layers=0,
):
super().__init__(None)
self.w2v_encoder = wav2vec_enc
self.adaptor = adaptor
self.mbart_encoder_layers = mbart_enc_layers
self.final_layer_norm = mbart_layer_norm
if drop_w2v_layers > 0:
self.w2v_encoder.w2v_model.encoder.layers = (
self.w2v_encoder.w2v_model.encoder.layers[:-drop_w2v_layers]
)
def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs):
padding_mask = lengths_to_padding_mask(src_lengths)
if not padding_mask.any():
padding_mask = None
out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True)
x = out["encoder_out"]
enc_padding_mask = None
if out["padding_mask"] is not None:
enc_padding_mask = out["padding_mask"] # B X T
x, enc_padding_mask = self.adaptor(x, enc_padding_mask)
encoder_states = []
for layer in self.mbart_encoder_layers:
x = layer(x, enc_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.final_layer_norm is not None:
x = self.final_layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [enc_padding_mask]
if enc_padding_mask is not None
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
# Note:
# dual input transformer:
# encoder: wav2vec for speech + mbart encoder for text
# decoder: mbart decoder for text
@register_model("dual_input_xm_transformer")
class DualInputXMTransformerModel(DualInputS2TTransformerModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# wav2vec encoder
Wav2VecEncoderWithAdaptor.add_args(parser)
# add_decoder_args(parser)
# mbart Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--mbart-dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--mbart-attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--mbart-activation-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-mbart-from",
type=str,
metavar="STR",
help="model to take text encoder decoder weights from (for initialization)",
)
# parser.add_argument("--finetune-w2v-params", type=str, metavar="STR",
# help="comma-separated param strings to finetune.")
parser.add_argument(
"--finetune-mbart-decoder-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
parser.add_argument(
"--finetune-mbart-encoder-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
parser.add_argument(
"--skip-encoder-projection",
action="store_true",
help="skip the projection layer in encoder",
)
parser.add_argument(
"--enc-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc1 and enc2 gradient by V",
)
parser.add_argument(
"--enc2-along-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc2 gradient by V if only enc2 is used",
)
parser.add_argument(
"--text-input-cost-ratio",
type=float,
default=1.0,
metavar="V",
help="text input cost ratio relative to speech input cost",
)
parser.add_argument(
"--stack-w2v-mbart-encoder",
action="store_true",
help="stack w2v and mbart encoder",
)
parser.add_argument(
"--stack-w2v-mbart-nonorm-encoder",
action="store_true",
help="stack w2v and mbart encoder",
)
parser.add_argument(
"--no-final-norm-decoder", action="store_true", help="no layer norm"
)
parser.add_argument(
"--drop-w2v-layers",
type=int,
default=0,
metavar="N",
help="drop w2v encoder layers",
)
parser.add_argument(
"--share-w2v-text-encoder",
action="store_true",
help="share w2v encoder layers with text encoder",
)
parser.add_argument(
"--shared-w2v-layers",
type=int,
default=0,
metavar="N",
help="shared encoder layers from w2v encoder",
)
@classmethod
def build_encoder(cls, args, task):
_args = copy.deepcopy(args)
_args.dropout = args.mbart_dropout
_args.attention_dropout = args.mbart_attention_dropout
_args.activation_dropout = args.mbart_activation_dropout
_args.max_source_positions = 1024
enc_emb = nn.Embedding(
len(task.src_dict), _args.encoder_embed_dim, task.src_dict.pad()
)
text_encoder = TransformerEncoder(_args, task.src_dict, enc_emb)
spch_encoder = Wav2VecEncoderWithAdaptor(args)
if getattr(args, "load_pretrained_mbart_from", None):
text_encoder = checkpoint_utils.load_pretrained_component_from_model(
component=text_encoder, checkpoint=args.load_pretrained_mbart_from
)
if getattr(args, "stack_w2v_mbart_encoder", False):
assert getattr(args, "share_w2v_text_encoder", False) is False
spch_encoder = StackedWav2VecEncoderWithAdaptor(
spch_encoder.w2v_encoder,
text_encoder.layers,
text_encoder.layer_norm,
spch_encoder.adaptor,
args.drop_w2v_layers,
)
elif getattr(args, "stack_w2v_mbart_nonorm_encoder", False):
text_encoder.layer_norm = None
spch_encoder = StackedWav2VecEncoderWithAdaptor(
spch_encoder.w2v_encoder,
text_encoder.layers,
text_encoder.layer_norm,
spch_encoder.adaptor,
args.drop_w2v_layers,
)
elif getattr(args, "share_w2v_text_encoder", False):
spch_encoder = SharedEncoder(
spch_encoder.w2v_encoder,
text_encoder,
spch_encoder.adaptor,
args.shared_w2v_layers,
)
for k, p in spch_encoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_w2v_params"
) and XMTransformerModel.finetune_params(args.finetune_w2v_params, k):
p.requires_grad = True
else:
p.requires_grad = False
for k, p in text_encoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_mbart_encoder_params"
) and XMTransformerModel.finetune_params(
args.finetune_mbart_encoder_params, k
):
p.requires_grad = True
else:
p.requires_grad = False
cross_attentive_loss_before_last_layer = (
0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1
)
encoder = DualInputEncoder(
args,
spch_encoder,
text_encoder,
task.src_dict,
cross_attentive_loss_before_last_layer,
)
return encoder
@classmethod
def build_decoder(cls, args, task):
_args = copy.deepcopy(args)
_args.dropout = args.mbart_dropout
_args.attention_dropout = args.mbart_attention_dropout
_args.activation_dropout = args.mbart_activation_dropout
_args.max_target_positions = 1024
dec_emb = nn.Embedding(
len(task.tgt_dict), _args.encoder_embed_dim, task.tgt_dict.pad()
)
decoder = TransformerDecoder(_args, task.tgt_dict, dec_emb)
if getattr(args, "load_pretrained_mbart_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_mbart_from
)
if getattr(args, "no_final_norm_decoder", False):
decoder.layer_norm = None
for k, p in decoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_mbart_decoder_params"
) and XMTransformerModel.finetune_params(
args.finetune_mbart_decoder_params, k
):
p.requires_grad = True
else:
p.requires_grad = False
compute_cross_attentive_loss = (
True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False
)
cross_attentive_loss_without_norm = getattr(
args, "attentive_cost_without_normalize", False
)
cross_attentive_loss_reverse = (
False # getattr(args, "attentive_cost_reverse", False)
)
decoder = TransformerMultiInputDecoder(
dictionary=task.target_dictionary,
spch_decoder=decoder,
text_decoder=decoder,
compute_cross_attentive_loss=compute_cross_attentive_loss,
cross_attentive_loss_with_norm=True
if not cross_attentive_loss_without_norm
else False,
cross_attentive_loss_reverse=cross_attentive_loss_reverse,
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
dualinputxmtransformer_base(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
@register_model_architecture("dual_input_xm_transformer", "dualinputxmtransformer_base")
def dualinputxmtransformer_base(args):
# wav2vec encoder
set_default_w2v_encoder_args(args)
set_default_adaptor_args(args)
# mbart model
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(
args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim
)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * 1024)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.mbart_attention_dropout = getattr(args, "mbart_attention_dropout", 0.0)
args.mbart_activation_dropout = getattr(args, "mbart_activation_dropout", 0.0)
args.mbart_dropout = getattr(args, "mbart_dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_text_joint_to_text/models/s2t_dualinputxmtransformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import itertools
import logging
import re
import time
from g2p_en import G2p
logger = logging.getLogger(__name__)
FAIL_SENT = "FAILED_SENTENCE"
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("--data-path", type=str, required=True)
parser.add_argument("--out-path", type=str, required=True)
parser.add_argument("--lower-case", action="store_true")
parser.add_argument("--do-filter", action="store_true")
parser.add_argument("--use-word-start", action="store_true")
parser.add_argument("--dup-vowel", default=1, type=int)
parser.add_argument("--dup-consonant", default=1, type=int)
parser.add_argument("--no-punc", action="store_true")
parser.add_argument("--reserve-word", type=str, default="")
parser.add_argument(
"--reserve-first-column",
action="store_true",
help="first column is sentence id",
)
###
parser.add_argument("--parallel-process-num", default=1, type=int)
parser.add_argument("--logdir", default="")
args = parser.parse_args()
return args
def process_sent(sent, g2p, res_wrds, args):
sents = pre_process_sent(sent, args.do_filter, args.lower_case, res_wrds)
pho_seqs = [do_g2p(g2p, s, res_wrds, i == 0) for i, s in enumerate(sents)]
pho_seq = (
[FAIL_SENT]
if [FAIL_SENT] in pho_seqs
else list(itertools.chain.from_iterable(pho_seqs))
)
if args.no_punc:
pho_seq = remove_punc(pho_seq)
if args.dup_vowel > 1 or args.dup_consonant > 1:
pho_seq = dup_pho(pho_seq, args.dup_vowel, args.dup_consonant)
if args.use_word_start:
pho_seq = add_word_start(pho_seq)
return " ".join(pho_seq)
def remove_punc(sent):
ns = []
regex = re.compile("[^a-zA-Z0-9 ]")
for p in sent:
if (not regex.search(p)) or p == FAIL_SENT:
if p == " " and (len(ns) == 0 or ns[-1] == " "):
continue
ns.append(p)
return ns
def do_g2p(g2p, sent, res_wrds, is_first_sent):
if sent in res_wrds:
pho_seq = [res_wrds[sent]]
else:
pho_seq = g2p(sent)
if not is_first_sent:
pho_seq = [" "] + pho_seq # add space to separate
return pho_seq
def pre_process_sent(sent, do_filter, lower_case, res_wrds):
if do_filter:
sent = re.sub("-", " ", sent)
sent = re.sub("—", " ", sent)
if len(res_wrds) > 0:
wrds = sent.split()
wrds = ["SPLIT_ME " + w + " SPLIT_ME" if w in res_wrds else w for w in wrds]
sents = [x.strip() for x in " ".join(wrds).split("SPLIT_ME") if x.strip() != ""]
else:
sents = [sent]
if lower_case:
sents = [s.lower() if s not in res_wrds else s for s in sents]
return sents
def dup_pho(sent, dup_v_num, dup_c_num):
"""
duplicate phoneme defined as cmudict
http://www.speech.cs.cmu.edu/cgi-bin/cmudict
"""
if dup_v_num == 1 and dup_c_num == 1:
return sent
ns = []
for p in sent:
ns.append(p)
if re.search(r"\d$", p):
for i in range(1, dup_v_num):
ns.append(f"{p}-{i}P")
elif re.search(r"\w", p):
for i in range(1, dup_c_num):
ns.append(f"{p}-{i}P")
return ns
def add_word_start(sent):
ns = []
do_add = True
ws = "▁"
for p in sent:
if do_add:
p = ws + p
do_add = False
if p == " ":
do_add = True
else:
ns.append(p)
return ns
def load_reserve_word(reserve_word):
if reserve_word == "":
return []
with open(reserve_word, "r") as fp:
res_wrds = [x.strip().split() for x in fp.readlines() if x.strip() != ""]
assert sum([0 if len(x) == 2 else 1 for x in res_wrds]) == 0
res_wrds = dict(res_wrds)
return res_wrds
def process_sents(sents, args):
g2p = G2p()
out_sents = []
res_wrds = load_reserve_word(args.reserve_word)
for sent in sents:
col1 = ""
if args.reserve_first_column:
col1, sent = sent.split(None, 1)
sent = process_sent(sent, g2p, res_wrds, args)
if args.reserve_first_column and col1 != "":
sent = f"{col1} {sent}"
out_sents.append(sent)
return out_sents
def main():
args = parse()
out_sents = []
with open(args.data_path, "r") as fp:
sent_list = [x.strip() for x in fp.readlines()]
if args.parallel_process_num > 1:
try:
import submitit
except ImportError:
logger.warn(
"submitit is not found and only one job is used to process the data"
)
submitit = None
if args.parallel_process_num == 1 or submitit is None:
out_sents = process_sents(sent_list, args)
else:
# process sentences with parallel computation
lsize = len(sent_list) // args.parallel_process_num + 1
executor = submitit.AutoExecutor(folder=args.logdir)
executor.update_parameters(timeout_min=1000, cpus_per_task=4)
jobs = []
for i in range(args.parallel_process_num):
job = executor.submit(
process_sents, sent_list[lsize * i : lsize * (i + 1)], args
)
jobs.append(job)
is_running = True
while is_running:
time.sleep(5)
is_running = sum([job.done() for job in jobs]) < len(jobs)
out_sents = list(itertools.chain.from_iterable([job.result() for job in jobs]))
with open(args.out_path, "w") as fp:
fp.write("\n".join(out_sents) + "\n")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_text_joint_to_text/scripts/g2p_encode.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
from fairseq import metrics, utils
@register_criterion("guided_label_smoothed_cross_entropy_with_accuracy")
class GuidedCrossEntAccCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
guide_alpha,
text_input_cost_ratio,
label_smoothing,
disable_text_guide_update_num=0,
attentive_cost_regularization=0,
):
"""
guide_alpha: alpha to inteplate nll and kd loss
text_input_cost_ratio: loss ratio for text only input data
label_smoothing: label smoothing ratio
disable_text_guide_update_num: only use nll loss for the first N updates
attentive_cost_regularization: ratio fo attentive cost
"""
super().__init__(task)
self.alpha = guide_alpha
self.attn_beta = attentive_cost_regularization
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.text_input_cost_ratio = text_input_cost_ratio
self.disable_update_num = disable_text_guide_update_num
assert self.alpha >= 0 and self.alpha <= 1.0
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
# fmt: off
parser.add_argument('--guide-alpha', default=0., type=float, metavar='D',
help='alpha to merge kd cost from text to speech input with ce loss')
# fmt: off
parser.add_argument('--disable-text-guide-update-num', default=0, type=int, metavar='D',
help='disable guided target from text for the first N updates.')
parser.add_argument("--attentive-cost-regularization", default=0.0, type=float, metavar='D',
help="use encoder attentive loss regularization with cost ratio D")
parser.add_argument("--attentive-cost-without-normalize", action='store_true',
help="Don't do normalization during attentive cost computation")
def forward(self, model, sample, reduce=True):
reduction = 'sum' if reduce else 'none'
net_input = sample["net_input"]
net_output = model(**net_input)
attn_cost = None
lprobs = model.get_normalized_probs(net_output, log_probs=True)
is_dual_input = True if net_input['src_tokens'] is not None and net_input.get('src_txt_tokens') is not None else False
target = model.get_targets(sample, net_output)
src_token_num = 0
if is_dual_input:
# lprobs_spch from speech encoder and lprobs_text from text encoder
lprobs_spch, lprobs_text = torch.chunk(lprobs, 2)
lprobs_spch.batch_first = lprobs.batch_first
lprobs_text.batch_first = lprobs.batch_first
speech_loss, speech_nll_loss, speech_correct, speech_total = \
self.guide_loss_and_acc(model, lprobs_spch, lprobs_text, target, reduce=(reduction == 'sum'))
text_loss, text_nll_loss, text_correct, text_total = self.compute_loss_and_acc(model, lprobs_text, target, reduction=reduction)
loss = (speech_loss + text_loss)
nll_loss = (speech_nll_loss + text_nll_loss)
correct = speech_correct + text_correct
total = speech_total + text_total
attn_cost = net_output[1].get('attn_cost')
if attn_cost is not None:
# attn_cost is batch_first and padding tokens have been masked already
src_token_num = attn_cost.ne(0).sum()
attn_cost = attn_cost.sum()
loss = loss + attn_cost * self.attn_beta
else:
attn_cost = 0
else:
loss, nll_loss, correct, total = self.compute_loss_and_acc(model, lprobs, target, reduction=reduction)
if sample["net_input"]['src_tokens'] is None: # text input only
loss = loss * self.text_input_cost_ratio
speech_loss = None
speech_nll_loss = None
sample_size, logging_output = self.get_logging_output(
sample, loss, nll_loss, correct, total, src_token_num, speech_loss, speech_nll_loss, attn_cost, is_dual_input
)
return loss, sample_size, logging_output
def compute_loss_and_acc(self, model, lprobs, target, reduction='sum'):
if not lprobs.batch_first:
lprobs = lprobs.transpose(0, 1)
lprobs = lprobs.view(-1, lprobs.size(-1)) # -> (B x T) x C
target = target.view(-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=(reduction == 'sum'),
)
mask = target.ne(self.padding_idx)
correct = torch.sum(lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask)))
total = torch.sum(mask)
return loss, nll_loss, correct, total
def guide_loss_and_acc(self, model, lprobs, lprobs_teacher, target, reduce=True):
""" lprobs_teacher is used as guide for lprobs """
if self.alpha == 0.0 or model.num_updates < self.disable_update_num:
return self.compute_loss_and_acc(model, lprobs, target, reduction=('sum' if reduce else 'none'))
if not lprobs.batch_first:
lprobs = lprobs.transpose(0, 1)
lprobs_teacher = lprobs_teacher.transpose(0, 1)
lprobs = lprobs.view(-1, lprobs.size(-1)).float() # -> (B x T) x C
lprobs_teacher = lprobs_teacher.view(-1, lprobs_teacher.size(-1)).float() # -> (B x T) x C
target = target.view(-1)
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction='sum' if reduce else 'none')
nll_loss = loss
probs_teacher = lprobs_teacher.exp().masked_fill_(target.unsqueeze(-1).eq(self.padding_idx), 0)
probs_teacher = probs_teacher.detach()
guide_loss = -(probs_teacher*lprobs).sum() if reduce else -(probs_teacher*lprobs).sum(-1, keepdim=True)
loss = self.alpha*guide_loss + (1.0 - self.alpha)*loss
mask = target.ne(self.padding_idx)
correct = torch.sum(lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask)))
total = torch.sum(mask)
return loss, nll_loss, correct, total
def get_logging_output(
self,
sample,
loss,
nll_loss,
correct,
total,
src_token_num=0,
speech_loss=None,
speech_nll_loss=None,
attn_cost=None,
is_dual_input=False,
):
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
mul_size = 2 if is_dual_input else 1
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"nll_loss": utils.item(nll_loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"]*mul_size,
"nsentences": sample["target"].size(0)*mul_size,
"sample_size": sample_size*mul_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"src_token_num": utils.item(src_token_num.data) if src_token_num > 0 else 0,
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
if speech_loss is not None:
logging_output["speech_loss"] = utils.item(speech_loss.data)
logging_output["speech_nll_loss"] = utils.item(speech_nll_loss.data)
logging_output["sample_size_speech_cost"] = sample_size
logging_output["speech_attn_loss"] = attn_cost
return sample_size*mul_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
src_token_sum = sum(log.get("src_token_num", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
speech_loss_sum = sum(log.get("speech_loss", 0) for log in logging_outputs)
speech_nll_loss_sum = sum(log.get("speech_nll_loss", 0) for log in logging_outputs)
speech_attn_loss_sum = sum(log.get("speech_attn_loss", 0) for log in logging_outputs)
sample_size_speech = sum(log.get("sample_size_speech_cost", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
"nll_loss": nll_loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, and loss
# is per-sentence loss; else sample_size is ntokens, and the loss
# becomes per-output token loss
"speech_loss": speech_loss_sum / sample_size_speech / math.log(2) if sample_size_speech > 0 else 0.0,
"speech_nll_loss": speech_nll_loss_sum / sample_size_speech / math.log(2) if sample_size_speech > 0 else 0.0,
"speech_attn_loss": speech_attn_loss_sum / src_token_sum / math.log(2) if src_token_sum > 0 else 0.0,
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
"src_token_num": src_token_sum,
# total is the number of validate tokens
}
return agg_output
@classmethod
def reduce_metrics(cls, logging_outputs):
"""Aggregate logging outputs from data parallel training."""
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for k, v in agg_logging_outputs.items():
if k in {'nsentences', 'ntokens', 'sample_size'}:
continue
metrics.log_scalar(k, v, round=3)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_text_joint_to_text/criterions/text_guide_cross_entropy_acc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_text_joint_to_text.criterions." + criterion_name
)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_text_joint_to_text/criterions/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import numpy as np
import soundfile as sf
import torch
import tqdm
import fairseq
from torch import nn
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fname])
model = model[0]
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for flashlight datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i", help="Input Directory", **kwargs)
self.add_argument("--output", "-o", help="Output Directory", **kwargs)
self.add_argument("--model", help="Path to model checkpoint", **kwargs)
self.add_argument("--split", help="Dataset Splits", nargs="+", **kwargs)
self.add_argument(
"--ext", default="wav", required=False, help="Audio file extension"
)
self.add_argument(
"--no-copy-labels",
action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in flashlight then.",
)
self.add_argument(
"--use-feat",
action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features",
)
self.add_argument("--gpu", help="GPU to use", default=0, type=int)
class Prediction:
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer:
""" Write features as hdf5 file in flashlight compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
"""Given a model and a flashlight dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the flashlight dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(
self,
input_root,
output_root,
split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), "Input path '{}' does not exist".format(
self.input_path
)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(
filter(
lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))
)
)
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(
lambda x: os.path.join(
self.output_path, x.replace("." + self.extension, ".h5context")
),
map(os.path.basename, paths),
)
for name, target_fname in self._progress(
zip(paths, fnames_context), total=len(self)
):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__
)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/wav2vec_featurize.py |
EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import argparse
import glob
import os
import random
import soundfile
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"root", metavar="DIR", help="root directory containing flac files to index"
)
parser.add_argument(
"--valid-percent",
default=0.01,
type=float,
metavar="D",
help="percentage of data to use as validation set (between 0 and 1)",
)
parser.add_argument(
"--dest", default=".", type=str, metavar="DIR", help="output directory"
)
parser.add_argument(
"--ext", default="flac", type=str, metavar="EXT", help="extension to look for"
)
parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed")
parser.add_argument(
"--path-must-contain",
default=None,
type=str,
metavar="FRAG",
help="if set, path must contain this substring for a file to be included in the manifest",
)
return parser
def main(args):
assert args.valid_percent >= 0 and args.valid_percent <= 1.0
if not os.path.exists(args.dest):
os.makedirs(args.dest)
dir_path = os.path.realpath(args.root)
search_path = os.path.join(dir_path, "**/*." + args.ext)
rand = random.Random(args.seed)
valid_f = (
open(os.path.join(args.dest, "valid.tsv"), "w")
if args.valid_percent > 0
else None
)
with open(os.path.join(args.dest, "train.tsv"), "w") as train_f:
print(dir_path, file=train_f)
if valid_f is not None:
print(dir_path, file=valid_f)
for fname in glob.iglob(search_path, recursive=True):
file_path = os.path.realpath(fname)
if args.path_must_contain and args.path_must_contain not in file_path:
continue
frames = soundfile.info(fname).frames
dest = train_f if rand.random() > args.valid_percent else valid_f
print(
"{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest
)
if valid_f is not None:
valid_f.close()
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(args)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/wav2vec_manifest.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument("tsv")
parser.add_argument("--output-dir", required=True)
parser.add_argument("--output-name", required=True)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
transcriptions = {}
with open(args.tsv, "r") as tsv, open(
os.path.join(args.output_dir, args.output_name + ".ltr"), "w"
) as ltr_out, open(
os.path.join(args.output_dir, args.output_name + ".wrd"), "w"
) as wrd_out:
root = next(tsv).strip()
for line in tsv:
line = line.strip()
dir = os.path.dirname(line)
if dir not in transcriptions:
parts = dir.split(os.path.sep)
trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt"
path = os.path.join(root, dir, trans_path)
assert os.path.exists(path)
texts = {}
with open(path, "r") as trans_f:
for tline in trans_f:
items = tline.strip().split()
texts[items[0]] = " ".join(items[1:])
transcriptions[dir] = texts
part = os.path.basename(line).split(".")[0]
assert part in transcriptions[dir]
print(transcriptions[dir][part], file=wrd_out)
print(
" ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |",
file=ltr_out,
)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/libri_labels.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
import os.path as osp
import pprint
import soundfile as sf
import torch
import fairseq
from torch import nn
from torch.utils.data import DataLoader
try:
import tqdm
except:
print("Install tqdm to use --log-format=tqdm")
class FilesDataset:
def __init__(self, files, labels):
self.files = files
if labels and osp.exists(labels):
with open(labels, "r") as lbl_f:
self.labels = [line.rstrip() for line in lbl_f]
else:
self.labels = labels
def __len__(self):
return len(self.files)
def __getitem__(self, index):
fname = self.files[index]
wav, sr = sf.read(fname)
assert sr == 16000
wav = torch.from_numpy(wav).float()
lbls = None
if self.labels:
if isinstance(self.labels, str):
lbl_file = osp.splitext(fname)[0] + "." + self.labels
with open(lbl_file, "r") as lblf:
lbls = lblf.readline()
assert lbls is not None
else:
lbls = self.labels[index]
return wav, lbls
def collate(self, batch):
return batch
class ArgTypes:
@staticmethod
def existing_path(arg):
arg = str(arg)
assert osp.exists(arg), f"File {arg} does not exist"
return arg
@staticmethod
def mkdir(arg):
arg = str(arg)
os.makedirs(arg, exist_ok=True)
return arg
class DatasetWriter:
def __init__(self):
self.args = self.load_config()
pprint.pprint(self.args.__dict__)
self.model = self.load_model()
def __getattr__(self, attr):
return getattr(self.args, attr)
def read_manifest(self, fname):
with open(fname, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
fnames = [
osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0
]
return fnames
def process_splits(self):
if self.args.shard is not None or self.args.num_shards is not None:
assert self.args.shard is not None and self.args.num_shards is not None
for split in self.splits:
print(split)
if self.extension == "tsv":
datadir = osp.join(self.data_dir, f"{split}.{self.extension}")
print("Reading manifest file: ", datadir)
files = self.read_manifest(datadir)
else:
datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}")
files = glob.glob(datadir, recursive=True)
assert len(files) > 0
if self.args.shard is not None:
files = files[self.args.shard :: self.args.num_shards]
lbls = []
with open(self.data_file(split), "w") as srcf:
for line, lbl in self.iterate(files):
print(line, file=srcf)
if self.args.labels:
lbls.append(lbl + "\n")
if self.args.labels:
assert all(a is not None for a in lbls)
with open(self.lbl_file(split), "w") as lblf:
lblf.writelines(lbls)
def iterate(self, files):
data = self.load_data(files)
for samples in tqdm.tqdm(data, total=len(files) // 32):
for wav, lbl in samples:
x = wav.unsqueeze(0).float().cuda()
div = 1
while x.size(-1) // div > self.args.max_size:
div += 1
xs = x.chunk(div, dim=-1)
result = []
for x in xs:
torch.cuda.empty_cache()
x = self.model.feature_extractor(x)
if self.quantize_location == "encoder":
with torch.no_grad():
_, idx = self.model.vector_quantizer.forward_idx(x)
idx = idx.squeeze(0).cpu()
else:
with torch.no_grad():
z = self.model.feature_aggregator(x)
_, idx = self.model.vector_quantizer.forward_idx(z)
idx = idx.squeeze(0).cpu()
result.append(idx)
idx = torch.cat(result, dim=0)
yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl
def lbl_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.lbl{shard_part}")
def data_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.src{shard_part}")
def var_file(self):
return osp.join(self.output_dir, f"vars.pt")
def load_config(self):
parser = argparse.ArgumentParser("Vector Quantized wav2vec features")
# Model Arguments
parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True)
parser.add_argument("--data-parallel", action="store_true")
# Output Arguments
parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True)
# Data Arguments
parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True)
parser.add_argument("--splits", type=str, nargs="+", required=True)
parser.add_argument("--extension", type=str, required=True)
parser.add_argument("--labels", type=str, required=False)
parser.add_argument("--shard", type=int, default=None)
parser.add_argument("--num-shards", type=int, default=None)
parser.add_argument("--max-size", type=int, default=1300000)
# Logger Arguments
parser.add_argument(
"--log-format", type=str, choices=["none", "simple", "tqdm"]
)
return parser.parse_args()
def load_data(self, fnames):
dataset = FilesDataset(fnames, self.args.labels)
loader = DataLoader(
dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8
)
return loader
def load_model(self):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint])
model = model[0]
self.quantize_location = getattr(cfg.model, "vq", "encoder")
model.eval().float()
model.cuda()
if self.data_parallel:
model = nn.DataParallel(model)
return model
def __call__(self):
self.process_splits()
if hasattr(self.model.feature_extractor, "vars") and (
self.args.shard is None or self.args.shard == 0
):
vars = (
self.model.feature_extractor.vars.view(
self.model.feature_extractor.banks,
self.model.feature_extractor.num_vars,
-1,
)
.cpu()
.detach()
)
print("writing learned latent variable embeddings: ", vars.shape)
torch.save(vars, self.var_file())
if __name__ == "__main__":
write_data = DatasetWriter()
write_data()
print("Done.")
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/vq-wav2vec_featurize.py |
EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/__init__.py |
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
from collections import namedtuple
from dataclasses import dataclass, field
from enum import Enum, auto
import hydra
from hydra.core.config_store import ConfigStore
import logging
import math
import os
from omegaconf import OmegaConf
from typing import Optional
import sys
import editdistance
import torch
from hydra.core.hydra_config import HydraConfig
from fairseq import checkpoint_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import FairseqDataclass, FairseqConfig
from fairseq.logging.meters import StopwatchMeter
from omegaconf import open_dict
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoderConfig
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
class DecoderType(Enum):
VITERBI = auto()
KENLM = auto()
FAIRSEQ = auto()
KALDI = auto()
@dataclass
class UnsupGenerateConfig(FairseqDataclass):
fairseq: FairseqConfig = FairseqConfig()
lm_weight: float = field(
default=2.0,
metadata={"help": "language model weight"},
)
w2l_decoder: DecoderType = field(
default=DecoderType.VITERBI,
metadata={"help": "type of decoder to use"},
)
kaldi_decoder_config: Optional[KaldiDecoderConfig] = None
lexicon: Optional[str] = field(
default=None,
metadata={
"help": "path to lexicon. This is also used to 'phonemize' for unsupvised param tuning"
},
)
lm_model: Optional[str] = field(
default=None,
metadata={"help": "path to language model (kenlm or fairseq)"},
)
unit_lm: bool = field(
default=False,
metadata={"help": "whether to use unit lm"},
)
beam_threshold: float = field(
default=50.0,
metadata={"help": "beam score threshold"},
)
beam_size_token: float = field(
default=100.0,
metadata={"help": "max tokens per beam"},
)
beam: int = field(
default=5,
metadata={"help": "decoder beam size"},
)
nbest: int = field(
default=1,
metadata={"help": "number of results to return"},
)
word_score: float = field(
default=1.0,
metadata={"help": "word score to add at end of word"},
)
unk_weight: float = field(
default=-math.inf,
metadata={"help": "unknown token weight"},
)
sil_weight: float = field(
default=0.0,
metadata={"help": "silence token weight"},
)
targets: Optional[str] = field(
default=None,
metadata={"help": "extension of ground truth labels to compute UER"},
)
results_path: Optional[str] = field(
default=None,
metadata={"help": "where to store results"},
)
post_process: Optional[str] = field(
default=None,
metadata={"help": "how to post process results"},
)
vocab_usage_power: float = field(
default=2,
metadata={"help": "for unsupervised param tuning"},
)
viterbi_transcript: Optional[str] = field(
default=None,
metadata={"help": "for unsupervised param tuning"},
)
min_lm_ppl: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
min_vt_uer: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
blank_weight: float = field(
default=0,
metadata={"help": "value to add or set for blank emission"},
)
blank_mode: str = field(
default="set",
metadata={
"help": "can be add or set, how to modify blank emission with blank weight"
},
)
sil_is_blank: bool = field(
default=False,
metadata={"help": "if true, <SIL> token is same as blank token"},
)
unsupervised_tuning: bool = field(
default=False,
metadata={
"help": "if true, returns a score based on unsupervised param selection metric instead of UER"
},
)
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def get_dataset_itr(cfg, task):
return task.get_batch_iterator(
dataset=task.dataset(cfg.fairseq.dataset.gen_subset),
max_tokens=cfg.fairseq.dataset.max_tokens,
max_sentences=cfg.fairseq.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=cfg.fairseq.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.fairseq.dataset.required_batch_size_multiple,
num_shards=cfg.fairseq.dataset.num_shards,
shard_id=cfg.fairseq.dataset.shard_id,
num_workers=cfg.fairseq.dataset.num_workers,
data_buffer_size=cfg.fairseq.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
cfg: UnsupGenerateConfig,
hypos,
tgt_dict,
target_tokens,
res_files,
):
retval = []
word_preds = []
transcriptions = []
dec_scores = []
for i, hypo in enumerate(hypos[: min(len(hypos), cfg.nbest)]):
if torch.is_tensor(hypo["tokens"]):
tokens = hypo["tokens"].int().cpu()
tokens = tokens[tokens >= tgt_dict.nspecial]
hyp_pieces = tgt_dict.string(tokens)
else:
hyp_pieces = " ".join(hypo["tokens"])
if "words" in hypo and len(hypo["words"]) > 0:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, cfg.post_process)
to_write = {}
if res_files is not None:
to_write[res_files["hypo.units"]] = hyp_pieces
to_write[res_files["hypo.words"]] = hyp_words
tgt_words = ""
if target_tokens is not None:
if isinstance(target_tokens, str):
tgt_pieces = tgt_words = target_tokens
else:
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, cfg.post_process)
if res_files is not None:
to_write[res_files["ref.units"]] = tgt_pieces
to_write[res_files["ref.words"]] = tgt_words
if not cfg.fairseq.common_eval.quiet:
logger.info(f"HYPO {i}:" + hyp_words)
if tgt_words:
logger.info("TARGET:" + tgt_words)
if "am_score" in hypo and "lm_score" in hypo:
logger.info(
f"DECODER AM SCORE: {hypo['am_score']}, DECODER LM SCORE: {hypo['lm_score']}, DECODER SCORE: {hypo['score']}"
)
elif "score" in hypo:
logger.info(f"DECODER SCORE: {hypo['score']}")
logger.info("___________________")
hyp_words_arr = hyp_words.split()
tgt_words_arr = tgt_words.split()
retval.append(
(
editdistance.eval(hyp_words_arr, tgt_words_arr),
len(hyp_words_arr),
len(tgt_words_arr),
hyp_pieces,
hyp_words,
)
)
word_preds.append(hyp_words_arr)
transcriptions.append(to_write)
dec_scores.append(-hypo.get("score", 0)) # negate cuz kaldi returns NLL
if len(retval) > 1:
best = None
for r, t in zip(retval, transcriptions):
if best is None or r[0] < best[0][0]:
best = r, t
for dest, tran in best[1].items():
print(tran, file=dest)
dest.flush()
return best[0]
assert len(transcriptions) == 1
for dest, tran in transcriptions[0].items():
print(tran, file=dest)
return retval[0]
def prepare_result_files(cfg: UnsupGenerateConfig):
def get_res_file(file_prefix):
if cfg.fairseq.dataset.num_shards > 1:
file_prefix = f"{cfg.fairseq.dataset.shard_id}_{file_prefix}"
path = os.path.join(
cfg.results_path,
"{}{}.txt".format(
cfg.fairseq.dataset.gen_subset,
file_prefix,
),
)
return open(path, "w", buffering=1)
if not cfg.results_path:
return None
return {
"hypo.words": get_res_file(""),
"hypo.units": get_res_file("_units"),
"ref.words": get_res_file("_ref"),
"ref.units": get_res_file("_ref_units"),
"hypo.nbest.words": get_res_file("_nbest_words"),
}
def optimize_models(cfg: UnsupGenerateConfig, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.eval()
if cfg.fairseq.common.fp16:
model.half()
if use_cuda:
model.cuda()
GenResult = namedtuple(
"GenResult",
[
"count",
"errs_t",
"gen_timer",
"lengths_hyp_unit_t",
"lengths_hyp_t",
"lengths_t",
"lm_score_t",
"num_feats",
"num_sentences",
"num_symbols",
"vt_err_t",
"vt_length_t",
],
)
def generate(cfg: UnsupGenerateConfig, models, saved_cfg, use_cuda):
task = tasks.setup_task(cfg.fairseq.task)
saved_cfg.task.labels = cfg.fairseq.task.labels
task.load_dataset(cfg.fairseq.dataset.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
cfg.fairseq.task.data,
cfg.fairseq.dataset.gen_subset,
len(task.dataset(cfg.fairseq.dataset.gen_subset)),
)
)
# Load dataset (possibly sharded)
itr = get_dataset_itr(cfg, task)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(cfg: UnsupGenerateConfig):
w2l_decoder = cfg.w2l_decoder
if w2l_decoder == DecoderType.VITERBI:
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KENLM:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.FAIRSEQ:
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KALDI:
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoder
assert cfg.kaldi_decoder_config is not None
return KaldiDecoder(
cfg.kaldi_decoder_config,
cfg.beam,
)
else:
raise NotImplementedError(
"only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment but found "
+ str(w2l_decoder)
)
generator = build_generator(cfg)
kenlm = None
fairseq_lm = None
if cfg.lm_model is not None:
import kenlm
kenlm = kenlm.Model(cfg.lm_model)
num_sentences = 0
if cfg.results_path is not None and not os.path.exists(cfg.results_path):
os.makedirs(cfg.results_path)
res_files = prepare_result_files(cfg)
errs_t = 0
lengths_hyp_t = 0
lengths_hyp_unit_t = 0
lengths_t = 0
count = 0
num_feats = 0
all_hyp_pieces = []
all_hyp_words = []
num_symbols = (
len([s for s in tgt_dict.symbols if not s.startswith("madeup")])
- tgt_dict.nspecial
)
targets = None
if cfg.targets is not None:
tgt_path = os.path.join(
cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset + "." + cfg.targets
)
if os.path.exists(tgt_path):
with open(tgt_path, "r") as f:
targets = f.read().splitlines()
viterbi_transcript = None
if cfg.viterbi_transcript is not None and len(cfg.viterbi_transcript) > 0:
logger.info(f"loading viterbi transcript from {cfg.viterbi_transcript}")
with open(cfg.viterbi_transcript, "r") as vf:
viterbi_transcript = vf.readlines()
viterbi_transcript = [v.rstrip().split() for v in viterbi_transcript]
gen_timer.start()
start = 0
end = len(itr)
hypo_futures = None
if cfg.w2l_decoder == DecoderType.KALDI:
logger.info("Extracting features")
hypo_futures = []
samples = []
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if "net_input" not in sample or i < start or i >= end:
continue
if "padding_mask" not in sample["net_input"]:
sample["net_input"]["padding_mask"] = None
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
hypo_futures.append(hypos)
samples.append(sample)
itr = list(zip(hypo_futures, samples))
start = 0
end = len(itr)
logger.info("Finished extracting features")
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if i < start or i >= end:
continue
if hypo_futures is not None:
hypos, sample = sample
hypos = [h.result() for h in hypos]
else:
if "net_input" not in sample:
continue
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
for i, sample_id in enumerate(sample["id"].tolist()):
if targets is not None:
target_tokens = targets[sample_id]
elif "target" in sample or "target_label" in sample:
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
else:
target_tokens = None
# Process top predictions
(
errs,
length_hyp,
length,
hyp_pieces,
hyp_words,
) = process_predictions(
cfg,
hypos[i],
tgt_dict,
target_tokens,
res_files,
)
errs_t += errs
lengths_hyp_t += length_hyp
lengths_hyp_unit_t += (
len(hyp_pieces) if len(hyp_pieces) > 0 else len(hyp_words)
)
lengths_t += length
count += 1
all_hyp_pieces.append(hyp_pieces)
all_hyp_words.append(hyp_words)
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
lm_score_sum = 0
if kenlm is not None:
if cfg.unit_lm:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_pieces)
else:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_words)
elif fairseq_lm is not None:
lm_score_sum = sum(fairseq_lm.score([h.split() for h in all_hyp_words])[0])
vt_err_t = 0
vt_length_t = 0
if viterbi_transcript is not None:
unit_hyps = []
if cfg.targets is not None and cfg.lexicon is not None:
lex = {}
with open(cfg.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
lex[items[0]] = items[1:]
for h in all_hyp_pieces:
hyp_ws = []
for w in h.split():
assert w in lex, w
hyp_ws.extend(lex[w])
unit_hyps.append(hyp_ws)
else:
unit_hyps.extend([h.split() for h in all_hyp_words])
vt_err_t = sum(
editdistance.eval(vt, h) for vt, h in zip(viterbi_transcript, unit_hyps)
)
vt_length_t = sum(len(h) for h in viterbi_transcript)
if res_files is not None:
for r in res_files.values():
r.close()
gen_timer.stop(lengths_hyp_t)
return GenResult(
count,
errs_t,
gen_timer,
lengths_hyp_unit_t,
lengths_hyp_t,
lengths_t,
lm_score_sum,
num_feats,
num_sentences,
num_symbols,
vt_err_t,
vt_length_t,
)
def gen_hypos(generator, models, num_feats, sample, task, use_cuda):
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "features" in sample["net_input"]:
sample["net_input"]["dense_x_only"] = True
num_feats += (
sample["net_input"]["features"].shape[0]
* sample["net_input"]["features"].shape[1]
)
hypos = task.inference_step(generator, models, sample, None)
return hypos, num_feats
def main(cfg: UnsupGenerateConfig, model=None):
if (
cfg.fairseq.dataset.max_tokens is None
and cfg.fairseq.dataset.batch_size is None
):
cfg.fairseq.dataset.max_tokens = 1024000
use_cuda = torch.cuda.is_available() and not cfg.fairseq.common.cpu
task = tasks.setup_task(cfg.fairseq.task)
overrides = ast.literal_eval(cfg.fairseq.common_eval.model_overrides)
if cfg.fairseq.task._name == "unpaired_audio_text":
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
"blank_is_sil": cfg.sil_is_blank,
"no_softmax": True,
"segmentation": {
"type": "NONE",
},
}
else:
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
}
if model is None:
# Load ensemble
logger.info("| loading model(s) from {}".format(cfg.fairseq.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
cfg.fairseq.common_eval.path.split("\\"),
arg_overrides=overrides,
task=task,
suffix=cfg.fairseq.checkpoint.checkpoint_suffix,
strict=(cfg.fairseq.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.fairseq.checkpoint.checkpoint_shard_count,
)
optimize_models(cfg, use_cuda, models)
else:
models = [model]
saved_cfg = cfg.fairseq
with open_dict(saved_cfg.task):
saved_cfg.task.shuffle = False
saved_cfg.task.sort_by_length = False
gen_result = generate(cfg, models, saved_cfg, use_cuda)
wer = None
if gen_result.lengths_t > 0:
wer = gen_result.errs_t * 100.0 / gen_result.lengths_t
logger.info(f"WER: {wer}")
lm_ppl = float("inf")
if gen_result.lm_score_t != 0 and gen_result.lengths_hyp_t > 0:
hyp_len = gen_result.lengths_hyp_t
lm_ppl = math.pow(
10, -gen_result.lm_score_t / (hyp_len + gen_result.num_sentences)
)
logger.info(f"LM PPL: {lm_ppl}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
" sentences/s, {:.2f} tokens/s)".format(
gen_result.num_sentences,
gen_result.gen_timer.n,
gen_result.gen_timer.sum,
gen_result.num_sentences / gen_result.gen_timer.sum,
1.0 / gen_result.gen_timer.avg,
)
)
vt_diff = None
if gen_result.vt_length_t > 0:
vt_diff = gen_result.vt_err_t / gen_result.vt_length_t
vt_diff = max(cfg.min_vt_uer, vt_diff)
lm_ppl = max(cfg.min_lm_ppl, lm_ppl)
if not cfg.unsupervised_tuning == 0:
weighted_score = wer
else:
weighted_score = math.log(lm_ppl) * (vt_diff or 1.0)
res = (
f"| Generate {cfg.fairseq.dataset.gen_subset} with beam={cfg.beam}, "
f"lm_weight={cfg.kaldi_decoder_config.acoustic_scale if cfg.kaldi_decoder_config else cfg.lm_weight}, "
f"word_score={cfg.word_score}, sil_weight={cfg.sil_weight}, blank_weight={cfg.blank_weight}, "
f"WER: {wer}, LM_PPL: {lm_ppl}, num feats: {gen_result.num_feats}, "
f"length: {gen_result.lengths_hyp_t}, UER to viterbi: {(vt_diff or 0) * 100}, score: {weighted_score}"
)
logger.info(res)
# print(res)
return task, weighted_score
@hydra.main(
config_path=os.path.join("../../..", "fairseq", "config"), config_name="config"
)
def hydra_main(cfg):
with open_dict(cfg):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
cfg.job_logging_cfg = OmegaConf.to_container(
HydraConfig.get().job_logging, resolve=True
)
cfg = OmegaConf.create(
OmegaConf.to_container(cfg, resolve=False, enum_to_str=False)
)
OmegaConf.set_struct(cfg, True)
logger.info(cfg)
utils.import_user_module(cfg.fairseq.common)
_, score = main(cfg)
if cfg.is_ax:
return score, None
return score
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=UnsupGenerateConfig)
hydra_main()
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/w2vu_generate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .unpaired_audio_text import UnpairedAudioText
__all__ = [
"UnpairedAudioText",
]
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/tasks/__init__.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from dataclasses import dataclass, field
import logging
import math
import os
from typing import Optional
import torch
from fairseq.logging import metrics
from fairseq.tasks import FairseqTask, register_task
from ..data import ExtractedFeaturesDataset, RandomInputDataset
from fairseq.data import (
Dictionary,
data_utils,
StripTokenDataset,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed.utils import get_data_parallel_world_size
from omegaconf import MISSING
from examples.speech_recognition.kaldi.kaldi_decoder import (
KaldiDecoder,
KaldiDecoderConfig,
)
logger = logging.getLogger(__name__)
@dataclass
class DecodingConfig(FairseqDataclass):
kenlm_path: Optional[str] = None
lm_weight: float = 0
blank_weight: float = 0
@dataclass
class UnpairedAudioTextConfig(FairseqDataclass):
data: str = field(
default=MISSING, metadata={"help": "path to data directory containing audio"}
)
text_data: str = field(
default=MISSING, metadata={"help": "path to data directory containing text"}
)
max_length: Optional[int] = None
labels: Optional[str] = field(
default=None,
metadata={"help": "extension of the label file to load, used for fine-tuning"},
)
unfiltered: bool = field(
default=False, metadata={"help": "load data with _unfiltered suffix"}
)
ctc_eval: bool = field(
default=False, metadata={"help": "eval UER as if computed by CTC"}
)
sort_by_length: bool = field(
default=True, metadata={"help": "sort examples by length of audio timesteps"}
)
shuffle: bool = field(default=True, metadata={"help": "shuffle examples"})
append_eos: bool = field(default=False, metadata={"help": "append eos"})
uppercase: Optional[bool] = field(
default=False, metadata={"help": "uppercase for LM score computation"}
)
skipwords: Optional[str] = field(
default="",
metadata={
"help": "comma-separated words to be removed for LM score computation"
},
)
kenlm_path: Optional[str] = None
vocab_usage_power: float = 2
word_decoder_config: Optional[KaldiDecoderConfig] = None
word_kenlm_path: Optional[str] = None
decoding_config: DecodingConfig = DecodingConfig()
@register_task("unpaired_audio_text", dataclass=UnpairedAudioTextConfig)
class UnpairedAudioText(FairseqTask):
""" """
cfg: UnpairedAudioTextConfig
def __init__(
self,
cfg: UnpairedAudioTextConfig,
source_dictionary=None,
target_dictionary=None,
):
super().__init__(cfg)
self._target_dictionary = target_dictionary
self._source_dictionary = source_dictionary
self.num_symbols = (
len([s for s in target_dictionary.symbols if not s.startswith("madeup")])
- target_dictionary.nspecial
)
self.sil_id = (
target_dictionary.index("<SIL>") if "<SIL>" in target_dictionary else -1
)
self.kenlm = None
if cfg.kenlm_path is not None:
import kenlm
self.kenlm = kenlm.Model(cfg.kenlm_path)
self.word_kenlm = None
if cfg.word_kenlm_path is not None:
import kenlm
self.word_kenlm = kenlm.Model(cfg.word_kenlm_path)
self.uppercase = cfg.uppercase
self.skipwords = set(cfg.skipwords.split(","))
def str_postprocess(s):
s = " ".join(w for w in s.split() if w not in self.skipwords)
s = s.upper() if self.uppercase else s
return s
self.str_postprocess = str_postprocess
self.compute_lm_score = lambda s: self.kenlm.score(self.str_postprocess(s))
self.compute_word_score = None
if cfg.word_decoder_config is not None:
self.kaldi_decoder = KaldiDecoder(cfg.word_decoder_config, beam=10)
def compute_word_score(logits, padding):
res = self.kaldi_decoder.decode(logits, padding)
for r in res:
r = r.result()
assert len(r) == 1
r = r[0]
yield r["score"], r["words"]
self.compute_word_score = compute_word_score
@classmethod
def setup_task(cls, cfg: UnpairedAudioTextConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
dict_path = os.path.join(cfg.text_data, "dict.txt")
if os.path.exists(dict_path):
target_dictionary = Dictionary.load(dict_path)
else:
dict_path = os.path.join(cfg.data, f"dict.{cfg.labels}.txt")
target_dictionary = Dictionary.load(dict_path)
return cls(cfg, target_dictionary=target_dictionary)
def optimizer_step(self, optimizer, model, update_num):
if hasattr(model, "get_groups_for_update"):
groups = model.get_groups_for_update(update_num)
optimizer.step(groups={groups})
else:
optimizer.step()
def valid_step(self, sample, model, criterion):
res = model(
**sample["net_input"],
dense_x_only=True,
)
dense_x = res["logits"]
padding_mask = res["padding_mask"]
word_scores = None
if self.compute_word_score is not None:
word_scores = self.compute_word_score(dense_x.cpu(), padding_mask.cpu())
z = dense_x.argmax(-1)
z[padding_mask] = self.target_dictionary.pad()
vocab_seen = torch.zeros(self.num_symbols, dtype=torch.bool)
import editdistance
c_err = 0
c_len = 0
pred_c_len = 0
lm_score_sum = 0
for i, (x, t, id) in enumerate(
zip(
z,
sample["target"] if "target" in sample else [None] * len(z),
sample["id"],
)
):
if t is not None:
t = t[(t >= self.target_dictionary.nspecial)]
x = x[
(x >= self.target_dictionary.nspecial)
& (x < (self.num_symbols + self.target_dictionary.nspecial))
]
if self.sil_id >= 0:
x = x[x != self.sil_id]
vocab_seen[x - self.target_dictionary.nspecial] = True
pred_units_arr = x
if self.cfg.ctc_eval:
pred_units_arr = pred_units_arr.unique_consecutive()
pred_units_arr = pred_units_arr[pred_units_arr != 0]
if id == 0:
if t is not None:
logger.info(f"REF: {self.target_dictionary.string(t)}")
logger.info(f"HYP: {self.target_dictionary.string(pred_units_arr)}")
if self.kenlm is not None:
if t is not None:
ref_lm_s = self.compute_lm_score(
self.target_dictionary.string(t)
)
logger.info(
f"LM [REF]: {ref_lm_s}, {math.pow(10, -ref_lm_s / (len(t) + 1))}"
)
hyp_lm_s = self.compute_lm_score(
self.target_dictionary.string(pred_units_arr)
)
logger.info(
f"LM [HYP]: {hyp_lm_s}, {math.pow(10, -hyp_lm_s / (len(pred_units_arr) + 1))}"
)
pred_units_arr = pred_units_arr.tolist()
pred_c_len += len(pred_units_arr)
if t is not None:
t = t.tolist()
c_err += editdistance.eval(pred_units_arr, t)
c_len += len(t)
else:
c_len = pred_c_len
if self.kenlm is not None:
pred_str = self.target_dictionary.string(pred_units_arr)
lm_score = self.compute_lm_score(pred_str)
lm_score_sum += lm_score
kaldi_score_sum = 0
word_lm_sum = 0
num_words = 0
if word_scores is not None:
for score, words in word_scores:
kaldi_score_sum += score
num_words += len(words)
if self.word_kenlm is not None:
word_lm_sum += self.kenlm.score(" ".join(words))
try:
world_size = get_data_parallel_world_size()
except:
world_size = 1
logging_output = {
"loss": c_err,
"_num_char_errors": c_err,
"_num_chars": c_len,
"_num_pred_chars": pred_c_len,
"ntokens": c_len,
"nsentences": z.size(0),
"sample_size": c_len,
"_world_size": world_size,
"_lm_score_sum": lm_score_sum,
"_kaldi_score_sum": kaldi_score_sum,
"_word_lm_sum": word_lm_sum,
"_num_words": num_words,
"_vocab_seen": vocab_seen,
}
return c_err, c_len, logging_output
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
data_path = self.cfg.data
task_cfg = task_cfg or self.cfg
has_unpaired_text = os.path.exists(
os.path.join(self.cfg.text_data, f"{split}.idx")
)
self.datasets[split] = ExtractedFeaturesDataset(
path=data_path,
split=split,
min_length=3,
max_length=task_cfg.max_length,
labels=None if has_unpaired_text else task_cfg.labels,
label_dict=self.target_dictionary,
shuffle=getattr(task_cfg, "shuffle", True),
sort_by_length=task_cfg.sort_by_length,
)
logger.info(f"split {split} has unpaired text? {has_unpaired_text}")
if has_unpaired_text:
text_dataset = data_utils.load_indexed_dataset(
os.path.join(self.cfg.text_data, split), self.target_dictionary
)
text_dataset = StripTokenDataset(text_dataset, self.target_dictionary.eos())
self.datasets[split] = RandomInputDataset(
self.datasets[split],
text_dataset,
["random_label"],
add_to_input=True,
pad_idx=self.target_dictionary.pad(),
)
@property
def source_dictionary(self):
return self._source_dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self._target_dictionary
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
zero = torch.scalar_tensor(0.0)
num_char_errors = sum(
log.get("_num_char_errors", zero) for log in logging_outputs
)
num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs)
num_word_errors = sum(
log.get("_num_word_errors", zero) for log in logging_outputs
)
num_words = sum(log.get("_num_words", zero) for log in logging_outputs)
num_pred_chars = sum(
log.get("_num_pred_chars", zero) for log in logging_outputs
)
lm_score_sum = sum(log.get("_lm_score_sum", zero) for log in logging_outputs)
vocab_seen = (
sum(log.get("_vocab_seen", zero) for log in logging_outputs)
.bool()
.sum()
.item()
)
kaldi_score_sum = sum(
log.get("_kaldi_score_sum", zero) for log in logging_outputs
)
word_lm_sum = sum(log.get("_word_lm_sum", zero) for log in logging_outputs)
metrics.log_scalar_sum("_num_char_errors", num_char_errors)
metrics.log_scalar_sum("_num_chars", num_chars)
metrics.log_scalar_sum("_num_word_errors", num_word_errors)
metrics.log_scalar_sum("_num_words", num_words)
metrics.log_scalar_sum("lm_score_sum", lm_score_sum)
metrics.log_scalar_sum("num_pred_chars", num_pred_chars)
if self.cfg.word_kenlm_path is not None:
metrics.log_scalar_sum("kaldi_score_sum", kaldi_score_sum)
metrics.log_scalar_sum("word_lm_sum", word_lm_sum)
if num_chars > 0:
metrics.log_derived(
"uer",
lambda meters: meters["_num_char_errors"].sum
* 100.0
/ meters["_num_chars"].sum
if meters["_num_chars"].sum > 0
else float("nan"),
)
if lm_score_sum < 0 and vocab_seen > 0:
metrics.log_scalar("vocab_seen_pct", vocab_seen / self.num_symbols)
metrics.log_derived(
"weighted_lm_ppl",
lambda meters: math.pow(
10,
-meters["lm_score_sum"].sum
/ (
meters["num_pred_chars"].sum + meters["nsentences"].sum
), # account for </s>
)
/ meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power,
)
metrics.log_derived(
"lm_ppl",
lambda meters: math.pow(
10,
-meters["lm_score_sum"].sum
/ (
meters["num_pred_chars"].sum + meters["nsentences"].sum
), # account for </s>
),
)
else:
metrics.log_derived("weighted_lm_ppl", lambda meters: float("inf"))
if num_words > 0:
if word_lm_sum != 0:
metrics.log_derived(
"word_lm_ppl",
lambda meters: math.pow(
10,
-meters["word_lm_sum"].sum
/ (
meters["_num_words"].sum + meters["nsentences"].sum
), # account for </s>
),
)
metrics.log_derived(
"weighted_word_lm_ppl",
lambda meters: math.pow(
10,
-meters["word_lm_sum"].sum
/ (
meters["_num_words"].sum + meters["nsentences"].sum
), # account for </s>
)
/ meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power,
)
if self.cfg.word_kenlm_path is not None:
metrics.log_derived(
"kaldi_score",
lambda meters: meters["kaldi_score_sum"].sum
/ meters["nsentences"].sum,
)
def build_model(self, cfg: FairseqDataclass):
model = super().build_model(cfg)
return model
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/tasks/unpaired_audio_text.py |
import kaldi_io
import numpy as np
import os
def get_parser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("w2v_dir", help="wav2vec feature and text directory")
parser.add_argument("tar_root", help="output data directory in kaldi's format")
parser.add_argument("split", help="name of the subset")
parser.add_argument("--label", default="", help="if specified, copy labels too")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
tar_dir = os.path.join(args.tar_root, args.split)
os.makedirs(tar_dir, exist_ok=True)
lengths_path = os.path.join(args.w2v_dir, f"{args.split}.lengths")
with open(lengths_path) as f:
lengths = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengths[:-1]).tolist()
feats = np.load(
os.path.join(args.w2v_dir, f"{args.split}.npy"),
mmap_mode="r"
)
assert feats.shape[0] == sum(lengths), \
f"lengths mismatch {feats.shape[0]} != {sum(lengths)}"
ark_path = os.path.join(tar_dir, "feats.ark")
scp_path = os.path.join(tar_dir, "feats.scp")
wspec = f"ark:| copy-feats --compress=true ark:- ark,scp:{ark_path},{scp_path}"
with kaldi_io.open_or_fd(wspec, "wb") as f:
for idx, (offset, length) in enumerate(zip(offsets, lengths)):
feat = feats[offset:offset+length]
kaldi_io.write_mat(f, feat, key=f"utt{idx:010d}")
u2s_path = os.path.join(tar_dir, "utt2spk")
s2u_path = os.path.join(tar_dir, "spk2utt")
with open(u2s_path, "w") as f_u2s, open(s2u_path, "w") as f_s2u:
for idx in range(len(lengths)):
f_u2s.write(f"utt{idx:010d} utt{idx:010d}\n")
f_s2u.write(f"utt{idx:010d} utt{idx:010d}\n")
if bool(args.label):
lab_path = os.path.join(args.w2v_dir, f"{args.split}.{args.label}")
txt_path = os.path.join(tar_dir, "text")
with open(lab_path) as f_lab, open(txt_path, "w") as f_txt:
for idx, line in enumerate(f_lab):
f_txt.write(f"utt{idx:010d} {line}")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py |
"""
Implement unsupervised metric for decoding hyperparameter selection:
$$ alpha * LM_PPL + ViterbitUER(%) * 100 $$
"""
import argparse
import logging
import math
import sys
import kenlm
import editdistance
from g2p_en import G2p
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("ref_tra", help="reference pseudo labels")
parser.add_argument("hyp_tra", help="decoded pseudo labels to be assess")
parser.add_argument("--kenlm_path", default="/checkpoint/abaevski/data/speech/libri/librispeech_lm_novox.phnc_o5.bin", help="")
parser.add_argument("--uppercase", action="store_true", help="")
parser.add_argument("--skipwords", default="", help="")
parser.add_argument("--gt_tra", default="", help="ground truth pseudo labels for computing oracle WER")
parser.add_argument("--min_vt_uer", default=0.0, type=float)
parser.add_argument("--phonemize", action="store_true", help="phonemize word hypotheses, used when reference is phone transcript")
parser.add_argument("--phonemize_lexicon", default="", type=str, help="use a lexicon for phonemizing")
return parser
def load_tra(tra_path):
with open(tra_path, "r") as f:
uid_to_tra = {}
for line in f:
toks = line.rstrip().split()
uid, tra = toks[0], " ".join(toks[1:])
uid_to_tra[uid] = tra
logger.debug(f"loaded {len(uid_to_tra)} utterances from {tra_path}")
return uid_to_tra
def load_lex(lex_path):
with open(lex_path, "r") as f:
w2p = {}
for line in f:
w, p = line.rstrip().split(None, 1)
w2p[w] = p.split()
return w2p
def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p, g2p_dict):
d_cnt = 0
w_cnt = 0
w_cnt_h = 0
for uid in hyp_uid_to_tra:
ref = ref_uid_to_tra[uid].split()
if g2p_dict is not None:
hyp = []
for word in hyp_uid_to_tra[uid].split():
if word in g2p_dict:
hyp = hyp + g2p_dict[word]
else:
logger.warning(f"{word} not in g2p_dict")
elif g2p is not None:
hyp = g2p(hyp_uid_to_tra[uid])
hyp = [p for p in hyp if p != "'" and p != " "]
hyp = [p[:-1] if p[-1].isnumeric() else p for p in hyp]
else:
hyp = hyp_uid_to_tra[uid].split()
logger.debug((
f"======================\n"
f"HYP: {' '.join(hyp)}\n"
f"REF: {' '.join(ref)}"
))
d_cnt += editdistance.eval(ref, hyp)
w_cnt += len(ref)
w_cnt_h += len(hyp)
wer = float(d_cnt) / w_cnt
logger.debug((
f"wer = {wer*100:.2f}%; num. of ref words = {w_cnt}; "
f"num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}"
))
return wer
def compute_lm_ppl(hyp_uid_to_tra, score_fn):
lm_score = 0.
w_cnt = 0
for hyp in hyp_uid_to_tra.values():
cur_score = score_fn(hyp)
cur_cnt = len(hyp.split()) + 1 # plus one for </s>
lm_score += cur_score
w_cnt += cur_cnt
logger.debug((
f"======================\n"
f"score sum/avg = {cur_score:.2f}/{cur_score/cur_cnt:.2f}\n"
f"hyp = {hyp}"
))
lm_ppl = math.pow(10, -lm_score / w_cnt)
logger.debug(f"lm ppl = {lm_ppl:.2f}; num. of words = {w_cnt}")
return lm_ppl
def main():
args = get_parser().parse_args()
logger.debug(f"Args: {args}")
ref_uid_to_tra = load_tra(args.ref_tra)
hyp_uid_to_tra = load_tra(args.hyp_tra)
assert not bool(set(hyp_uid_to_tra.keys()) - set(ref_uid_to_tra.keys()))
lm = kenlm.Model(args.kenlm_path)
skipwords = set(args.skipwords.split(","))
def compute_lm_score(s):
s = " ".join(w for w in s.split() if w not in skipwords)
s = s.upper() if args.uppercase else s
return lm.score(s)
g2p, g2p_dict = None, None
if args.phonemize:
if args.phonemize_lexicon:
g2p_dict = load_lex(args.phonemize_lexicon)
else:
g2p = G2p()
wer = compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p, g2p_dict)
lm_ppl = compute_lm_ppl(hyp_uid_to_tra, compute_lm_score)
gt_wer = -math.inf
if args.gt_tra:
gt_uid_to_tra = load_tra(args.gt_tra)
gt_wer = compute_wer(gt_uid_to_tra, hyp_uid_to_tra, None, None)
score = math.log(lm_ppl) * max(wer, args.min_vt_uer)
logging.info(f"{args.hyp_tra}: score={score:.4f}; wer={wer*100:.2f}%; lm_ppl={lm_ppl:.4f}; gt_wer={gt_wer*100:.2f}%")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select.py |
import sys
for idx, line in enumerate(sys.stdin):
print(f"utt{idx:010d} {line}", end='') | EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import Enum, auto
import math
import numpy as np
from typing import Tuple, List, Optional, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
from fairseq import checkpoint_utils, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
SamePad,
TransposeLast,
)
class SegmentationType(Enum):
NONE = auto()
RANDOM = auto()
UNIFORM_RANDOM = auto()
UNIFORM_RANDOM_JOIN = auto()
JOIN = auto()
@dataclass
class SegmentationConfig(FairseqDataclass):
type: SegmentationType = SegmentationType.NONE
subsample_rate: float = 0.25
mean_pool: bool = True
mean_pool_join: bool = False
remove_zeros: bool = False
@dataclass
class Wav2vec_UConfig(FairseqDataclass):
discriminator_kernel: int = 3
discriminator_dilation: int = 1
discriminator_dim: int = 256
discriminator_causal: bool = True
discriminator_linear_emb: bool = False
discriminator_depth: int = 1
discriminator_max_pool: bool = False
discriminator_act_after_linear: bool = False
discriminator_dropout: float = 0.0
discriminator_spectral_norm: bool = False
discriminator_weight_norm: bool = False
generator_kernel: int = 4
generator_dilation: int = 1
generator_stride: int = 1
generator_bias: bool = False
generator_dropout: float = 0.0
blank_weight: float = 0
blank_mode: str = "add"
blank_is_sil: bool = False
no_softmax: bool = False
smoothness_weight: float = 0.0
smoothing: float = 0.0
smoothing_one_sided: bool = False
gradient_penalty: float = 0.0
probabilistic_grad_penalty_slicing: bool = False
code_penalty: float = 0.0
gumbel: bool = False
hard_gumbel: bool = True
temp: Tuple[float, float, float] = (2, 0.1, 0.99995)
input_dim: int = 128
segmentation: SegmentationConfig = SegmentationConfig()
class Segmenter(nn.Module):
cfg: SegmentationConfig
def __init__(self, cfg: SegmentationConfig):
super().__init__()
self.cfg = cfg
self.subsample_rate = cfg.subsample_rate
def pre_segment(self, dense_x, dense_padding_mask):
return dense_x, dense_padding_mask
def logit_segment(self, logits, padding_mask):
return logits, padding_mask
class RandomSegmenter(Segmenter):
def pre_segment(self, dense_x, dense_padding_mask):
target_num = math.ceil(dense_x.size(1) * self.subsample_rate)
ones = torch.ones(dense_x.shape[:-1], device=dense_x.device)
indices, _ = ones.multinomial(target_num).sort(dim=-1)
indices_ld = indices.unsqueeze(-1).expand(-1, -1, dense_x.size(-1))
dense_x = dense_x.gather(1, indices_ld)
dense_padding_mask = dense_padding_mask.gather(1, index=indices)
return dense_x, dense_padding_mask
class UniformRandomSegmenter(Segmenter):
def pre_segment(self, dense_x, dense_padding_mask):
bsz, tsz, fsz = dense_x.shape
target_num = math.ceil(tsz * self.subsample_rate)
rem = tsz % target_num
if rem > 0:
dense_x = F.pad(dense_x, [0, 0, 0, target_num - rem])
dense_padding_mask = F.pad(
dense_padding_mask, [0, target_num - rem], value=True
)
dense_x = dense_x.view(bsz, target_num, -1, fsz)
dense_padding_mask = dense_padding_mask.view(bsz, target_num, -1)
if self.cfg.mean_pool:
dense_x = dense_x.mean(dim=-2)
dense_padding_mask = dense_padding_mask.all(dim=-1)
else:
ones = torch.ones((bsz, dense_x.size(2)), device=dense_x.device)
indices = ones.multinomial(1)
indices = indices.unsqueeze(-1).expand(-1, target_num, -1)
indices_ld = indices.unsqueeze(-1).expand(-1, -1, -1, fsz)
dense_x = dense_x.gather(2, indices_ld).reshape(bsz, -1, fsz)
dense_padding_mask = dense_padding_mask.gather(2, index=indices).reshape(
bsz, -1
)
return dense_x, dense_padding_mask
class JoinSegmenter(Segmenter):
def logit_segment(self, logits, padding_mask):
preds = logits.argmax(dim=-1)
if padding_mask.any():
preds[padding_mask] = -1 # mark pad
uniques = []
bsz, tsz, csz = logits.shape
for p in preds:
uniques.append(
p.cpu().unique_consecutive(return_inverse=True, return_counts=True)
)
new_tsz = max(u[0].numel() for u in uniques)
new_logits = logits.new_zeros(bsz, new_tsz, csz)
new_pad = padding_mask.new_zeros(bsz, new_tsz)
for b in range(bsz):
u, idx, c = uniques[b]
keep = u != -1
if self.cfg.remove_zeros:
keep.logical_and_(u != 0)
if self.training and not self.cfg.mean_pool_join:
u[0] = 0
u[1:] = c.cumsum(0)[:-1]
m = c > 1
r = torch.rand(m.sum())
o = (c[m] * r).long()
u[m] += o
new_logits[b, : u.numel()] = logits[b, u]
else:
new_logits[b].index_add_(
dim=0, index=idx.to(new_logits.device), source=logits[b]
)
new_logits[b, : c.numel()] /= c.unsqueeze(-1).to(new_logits.device)
new_sz = keep.sum()
if not keep.all():
kept_logits = new_logits[b, : c.numel()][keep]
new_logits[b, :new_sz] = kept_logits
if new_sz < new_tsz:
pad = new_tsz - new_sz
new_logits[b, -pad:] = 0
new_pad[b, -pad:] = True
return new_logits, new_pad
class UniformRandomJoinSegmenter(UniformRandomSegmenter, JoinSegmenter):
pass
SEGMENT_FACTORY = {
SegmentationType.NONE: Segmenter,
SegmentationType.RANDOM: RandomSegmenter,
SegmentationType.UNIFORM_RANDOM: UniformRandomSegmenter,
SegmentationType.UNIFORM_RANDOM_JOIN: UniformRandomJoinSegmenter,
SegmentationType.JOIN: JoinSegmenter,
}
class Discriminator(nn.Module):
def __init__(self, dim, cfg: Wav2vec_UConfig):
super().__init__()
inner_dim = cfg.discriminator_dim
kernel = cfg.discriminator_kernel
dilation = cfg.discriminator_dilation
self.max_pool = cfg.discriminator_max_pool
if cfg.discriminator_causal:
padding = kernel - 1
else:
padding = kernel // 2
def make_conv(in_d, out_d, k, p=0, has_dilation=True):
conv = nn.Conv1d(
in_d,
out_d,
kernel_size=k,
padding=p,
dilation=dilation if has_dilation else 1,
)
if cfg.discriminator_spectral_norm:
conv = nn.utils.spectral_norm(conv)
elif cfg.discriminator_weight_norm:
conv = nn.utils.weight_norm(conv)
return conv
inner_net = [
nn.Sequential(
make_conv(inner_dim, inner_dim, kernel, padding),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
nn.Dropout(cfg.discriminator_dropout),
nn.GELU(),
)
for _ in range(cfg.discriminator_depth - 1)
] + [
make_conv(inner_dim, 1, kernel, padding, has_dilation=False),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
]
if cfg.discriminator_linear_emb:
emb_net = [make_conv(dim, inner_dim, 1)]
else:
emb_net = [
make_conv(dim, inner_dim, kernel, padding),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
]
if cfg.discriminator_act_after_linear:
emb_net.append(nn.GELU())
self.net = nn.Sequential(
*emb_net,
nn.Dropout(cfg.discriminator_dropout),
*inner_net,
)
def forward(self, x, padding_mask):
x = x.transpose(1, 2) # BTC -> BCT
x = self.net(x)
x = x.transpose(1, 2)
x_sz = x.size(1)
if padding_mask is not None and padding_mask.any() and padding_mask.dim() > 1:
padding_mask = padding_mask[:, : x.size(1)]
x[padding_mask] = float("-inf") if self.max_pool else 0
x_sz = x_sz - padding_mask.sum(dim=-1)
x = x.squeeze(-1)
if self.max_pool:
x, _ = x.max(dim=-1)
else:
x = x.sum(dim=-1)
x = x / x_sz
return x
class Generator(nn.Module):
def __init__(self, input_dim, output_dim, cfg: Wav2vec_UConfig):
super().__init__()
self.cfg = cfg
self.output_dim = output_dim
self.stride = cfg.generator_stride
self.dropout = nn.Dropout(cfg.generator_dropout)
padding = cfg.generator_kernel // 2
self.proj = nn.Sequential(
TransposeLast(),
nn.Conv1d(
input_dim,
output_dim,
kernel_size=cfg.generator_kernel,
stride=cfg.generator_stride,
dilation=cfg.generator_dilation,
padding=padding,
bias=cfg.generator_bias,
),
TransposeLast(),
)
def forward(self, dense_x, tokens, dense_padding_mask):
dense_x = self.dropout(dense_x)
dense_x = self.proj(dense_x)
if self.stride > 1:
dense_padding_mask = dense_padding_mask[:, :: self.stride]
if dense_padding_mask.size(1) != dense_x.size(1):
new_padding = dense_padding_mask.new_zeros(dense_x.shape[:-1])
diff = new_padding.size(1) - dense_padding_mask.size(1)
assert (
diff > 0
), f"{new_padding.shape}, {dense_padding_mask.shape}, {dense_x.shape}, {diff}"
if diff > 0:
new_padding[:, diff:] = dense_padding_mask
else:
assert diff < 0
new_padding = dense_padding_mask[:, :diff]
dense_padding_mask = new_padding
result = {}
token_x = None
if tokens is not None:
token_x = dense_x.new_zeros(tokens.numel(), self.output_dim)
token_x.scatter_(1, tokens.view(-1, 1).long(), 1)
token_x = token_x.view(tokens.shape + (self.output_dim,))
result["dense_x"] = dense_x
result["token_x"] = token_x
result["dense_padding_mask"] = dense_padding_mask
return result
@register_model("wav2vec_u", dataclass=Wav2vec_UConfig)
class Wav2vec_U(BaseFairseqModel):
def calc_gradient_penalty(self, real_data, fake_data):
b_size = min(real_data.size(0), fake_data.size(0))
t_size = min(real_data.size(1), fake_data.size(1))
if self.cfg.probabilistic_grad_penalty_slicing:
def get_slice(data, dim, target_size):
size = data.size(dim)
diff = size - target_size
if diff <= 0:
return data
start = np.random.randint(0, diff + 1)
return data.narrow(dim=dim, start=start, length=target_size)
real_data = get_slice(real_data, 0, b_size)
real_data = get_slice(real_data, 1, t_size)
fake_data = get_slice(fake_data, 0, b_size)
fake_data = get_slice(fake_data, 1, t_size)
else:
real_data = real_data[:b_size, :t_size]
fake_data = fake_data[:b_size, :t_size]
alpha = torch.rand(real_data.size(0), 1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.to(real_data.device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
disc_interpolates = self.discriminator(interpolates, None)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size(), device=real_data.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradient_penalty = (gradients.norm(2, dim=1) - 1) ** 2
return gradient_penalty
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.update_num = num_updates
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def discrim_step(self, num_updates):
return num_updates % 2 == 1
def get_groups_for_update(self, num_updates):
return "discriminator" if self.discrim_step(num_updates) else "generator"
def __init__(self, cfg: Wav2vec_UConfig, target_dict):
super().__init__()
self.cfg = cfg
self.zero_index = target_dict.index("<SIL>") if "<SIL>" in target_dict else 0
self.smoothness_weight = cfg.smoothness_weight
output_size = len(target_dict)
self.pad = target_dict.pad()
self.eos = target_dict.eos()
self.smoothing = cfg.smoothing
self.smoothing_one_sided = cfg.smoothing_one_sided
self.no_softmax = cfg.no_softmax
self.gumbel = cfg.gumbel
self.hard_gumbel = cfg.hard_gumbel
self.last_acc = None
self.gradient_penalty = cfg.gradient_penalty
self.code_penalty = cfg.code_penalty
self.blank_weight = cfg.blank_weight
self.blank_mode = cfg.blank_mode
self.blank_index = target_dict.index("<SIL>") if cfg.blank_is_sil else 0
assert self.blank_index != target_dict.unk()
self.discriminator = Discriminator(output_size, cfg)
for p in self.discriminator.parameters():
p.param_group = "discriminator"
self.pca_A = self.pca_b = None
d = cfg.input_dim
self.segmenter = SEGMENT_FACTORY[cfg.segmentation.type](cfg.segmentation)
self.generator = Generator(d, output_size, cfg)
for p in self.generator.parameters():
p.param_group = "generator"
for p in self.segmenter.parameters():
p.param_group = "generator"
self.max_temp, self.min_temp, self.temp_decay = cfg.temp
self.curr_temp = self.max_temp
self.update_num = 0
@classmethod
def build_model(cls, cfg, task):
return cls(cfg, task.target_dictionary)
def get_logits(
self,
net_output: Optional[Dict[str, List[Optional[torch.Tensor]]]],
normalize: bool = False,
):
logits = net_output["logits"]
if self.blank_weight != 0:
if self.blank_mode == "add":
logits[..., self.blank_index] += self.blank_weight
elif self.blank_mode == "set":
logits[..., self.blank_index] = self.blank_weight
else:
raise Exception(f"invalid blank mode {self.blank_mode}")
padding = net_output["padding_mask"]
if padding.any():
logits[padding] = float("-inf")
logits[padding][..., self.blank_index] = float("inf")
if normalize:
logits = utils.log_softmax(logits.float(), dim=-1)
return logits.transpose(0, 1)
def get_normalized_probs(
self,
net_output: Tuple[
torch.Tensor, Optional[Dict[str, List[Optional[torch.Tensor]]]]
],
log_probs: bool,
sample: Optional[Dict[str, torch.Tensor]] = None,
):
logits = self.get_logits(net_output)
probs = super().get_normalized_probs(logits, log_probs, sample)
# BTC -> TBC for ctc
probs = probs.transpose(0, 1)
return probs
def normalize(self, dense_x):
bsz, tsz, csz = dense_x.shape
if dense_x.numel() == 0:
raise Exception(dense_x.shape)
_, k = dense_x.max(-1)
hard_x = (
dense_x.new_zeros(bsz * tsz, csz)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(-1, csz)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
code_perplexity = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
)
avg_probs = torch.softmax(dense_x.reshape(-1, csz).float(), dim=-1).mean(dim=0)
prob_perplexity = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
)
if not self.no_softmax:
if self.training and self.gumbel:
dense_x = F.gumbel_softmax(
dense_x.float(), tau=self.curr_temp, hard=self.hard_gumbel
).type_as(dense_x)
else:
dense_x = dense_x.softmax(-1)
return dense_x, code_perplexity, prob_perplexity
def forward(
self,
features,
padding_mask,
random_label=None,
dense_x_only=False,
segment=True,
):
if segment:
features, padding_mask = self.segmenter.pre_segment(features, padding_mask)
orig_size = features.size(0) * features.size(1) - padding_mask.sum()
gen_result = self.generator(features, random_label, padding_mask)
orig_dense_x, token_x = gen_result["dense_x"], gen_result["token_x"]
orig_dense_padding_mask = gen_result["dense_padding_mask"]
if segment:
dense_x, dense_padding_mask = self.segmenter.logit_segment(
orig_dense_x, orig_dense_padding_mask
)
else:
dense_x = orig_dense_x
dense_padding_mask = orig_dense_padding_mask
dense_logits = dense_x
prob_perplexity = None
code_perplexity = None
if not (self.no_softmax and dense_x_only):
dense_x, code_perplexity, prob_perplexity = self.normalize(dense_logits)
if dense_x_only or self.discriminator is None:
return {
"logits": dense_x,
"padding_mask": dense_padding_mask,
}
token_padding_mask = random_label == self.pad
dense_y = self.discriminator(dense_x, dense_padding_mask)
token_y = self.discriminator(token_x, token_padding_mask)
sample_size = features.size(0)
d_step = self.discrim_step(self.update_num)
fake_smooth = self.smoothing
real_smooth = self.smoothing
if self.smoothing_one_sided:
fake_smooth = 0
zero_loss = None
smoothness_loss = None
code_pen = None
if d_step:
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_ones(dense_y.shape) - fake_smooth,
reduction="sum",
)
loss_token = F.binary_cross_entropy_with_logits(
token_y,
token_y.new_zeros(token_y.shape) + real_smooth,
reduction="sum",
)
if self.training and self.gradient_penalty > 0:
grad_pen = self.calc_gradient_penalty(token_x, dense_x)
grad_pen = grad_pen.sum() * self.gradient_penalty
else:
grad_pen = None
else:
grad_pen = None
loss_token = None
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_zeros(dense_y.shape) + fake_smooth,
reduction="sum",
)
num_vars = dense_x.size(-1)
if prob_perplexity is not None:
code_pen = (num_vars - prob_perplexity) / num_vars
code_pen = code_pen * sample_size * self.code_penalty
if self.smoothness_weight > 0:
smoothness_loss = F.mse_loss(
dense_logits[:, :-1], dense_logits[:, 1:], reduction="none"
)
smoothness_loss[dense_padding_mask[:, 1:]] = 0
smoothness_loss = (
smoothness_loss.mean() * sample_size * self.smoothness_weight
)
result = {
"losses": {
"grad_pen": grad_pen,
"code_pen": code_pen,
"smoothness": smoothness_loss,
},
"temp": self.curr_temp,
"code_ppl": code_perplexity,
"prob_ppl": prob_perplexity,
"d_steps": int(d_step),
"sample_size": sample_size,
}
suff = "_d" if d_step else "_g"
result["losses"]["dense" + suff] = loss_dense
result["losses"]["token" + suff] = loss_token
return result
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/models/wav2vec_u.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .wav2vec_u import Wav2vec_U
__all__ = [
"Wav2vec_U",
]
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/models/__init__.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from copy import deepcopy
from scipy.signal import lfilter
import numpy as np
from tqdm import tqdm
import soundfile as sf
import os.path as osp
def get_parser():
parser = argparse.ArgumentParser(description="compute vad segments")
parser.add_argument(
"--rvad-home",
"-r",
help="path to rvad home (see https://github.com/zhenghuatan/rVADfast)",
required=True,
)
return parser
def rvad(speechproc, path):
winlen, ovrlen, pre_coef, nfilter, nftt = 0.025, 0.01, 0.97, 20, 512
ftThres = 0.5
vadThres = 0.4
opts = 1
data, fs = sf.read(path)
assert fs == 16_000, "sample rate must be 16khz"
ft, flen, fsh10, nfr10 = speechproc.sflux(data, fs, winlen, ovrlen, nftt)
# --spectral flatness --
pv01 = np.zeros(ft.shape[0])
pv01[np.less_equal(ft, ftThres)] = 1
pitch = deepcopy(ft)
pvblk = speechproc.pitchblockdetect(pv01, pitch, nfr10, opts)
# --filtering--
ENERGYFLOOR = np.exp(-50)
b = np.array([0.9770, -0.9770])
a = np.array([1.0000, -0.9540])
fdata = lfilter(b, a, data, axis=0)
# --pass 1--
noise_samp, noise_seg, n_noise_samp = speechproc.snre_highenergy(
fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk
)
# sets noisy segments to zero
for j in range(n_noise_samp):
fdata[range(int(noise_samp[j, 0]), int(noise_samp[j, 1]) + 1)] = 0
vad_seg = speechproc.snre_vad(
fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk, vadThres
)
return vad_seg, data
def main():
parser = get_parser()
args = parser.parse_args()
sys.path.append(args.rvad_home)
import speechproc
stride = 160
lines = sys.stdin.readlines()
root = lines[0].rstrip()
for fpath in tqdm(lines[1:]):
path = osp.join(root, fpath.split()[0])
vads, wav = rvad(speechproc, path)
start = None
vad_segs = []
for i, v in enumerate(vads):
if start is None and v == 1:
start = i * stride
elif start is not None and v == 0:
vad_segs.append((start, i * stride))
start = None
if start is not None:
vad_segs.append((start, len(wav)))
print(" ".join(f"{v[0]}:{v[1]}" for v in vad_segs))
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/vads.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import tqdm
import torch
import sys
import faiss
import torch.nn.functional as F
from wav2vec_cluster_faiss import parse_faiss_specs, Wav2VecFeatureReader
def get_parser():
parser = argparse.ArgumentParser(description="apply clusters")
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--split', help='split to process', required=True)
parser.add_argument('--labels', help='split to process', default="phn")
parser.add_argument('--path', help='path to pca and centroids', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True)
parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14)
parser.add_argument('--max-tsz', type=int, help='batch kmeans up to this much', default=14)
# fmt: on
return parser
def get_iterator(args):
label_path = osp.join(args.data, f"{args.split}.{args.labels}")
if osp.exists(label_path):
lp = open(label_path, "r")
else:
lp = None
with open(osp.join(args.data, f"{args.split}.tsv"), "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [line.rstrip() for line in lines if len(line) > 0]
if lp is not None:
lbls = [line.rstrip() for line in lp]
else:
lbls = [None] * len(files)
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname, lbl in zip(files, lbls):
file = osp.join(root, fname.split("\t")[0])
feats = reader.get_feats(file)
yield feats.data, fname, lbl
return iterate, num, root
def main():
parser = get_parser()
args = parser.parse_args()
spec = osp.basename(args.path)
try:
faiss_spec = parse_faiss_specs(spec.rstrip("/"))[0]
except:
print(spec)
raise
print("Faiss Spec:", faiss_spec, file=sys.stderr)
if faiss_spec.pca:
A = torch.from_numpy(np.load(osp.join(args.path, "pca_A.npy"))).cuda()
b = torch.from_numpy(np.load(osp.join(args.path, "pca_b.npy"))).cuda()
print("Loaded PCA", file=sys.stderr)
centroids = np.load(osp.join(args.path, "centroids.npy"))
print("Loaded centroids", centroids.shape, file=sys.stderr)
res = faiss.StandardGpuResources()
index_flat = (
faiss.IndexFlatL2(centroids.shape[1])
if not faiss_spec.sphere
else faiss.IndexFlatIP(centroids.shape[1])
)
faiss_index = faiss.index_cpu_to_gpu(res, 0, index_flat)
faiss_index.add(centroids)
generator, num, root = get_iterator(args)
iterator = generator()
had_labels = False
label_path = osp.join(args.path, f"{args.split}.{args.labels}")
with torch.no_grad():
with open(osp.join(args.path, f"{args.split}.src"), "w") as fp, open(
osp.join(args.path, f"{args.split}.tsv"), "w"
) as pp, open(label_path, "w") as lp:
print(root, file=pp)
for f, fname, lbl in tqdm.tqdm(iterator, total=num):
if faiss_spec.pca:
f = torch.mm(f, A) + b
if faiss_spec.norm:
f = F.normalize(f, p=2, dim=-1)
f = f.cpu().numpy()
_, z = faiss_index.search(f, 1)
print(" ".join(str(x.item()) for x in z), file=fp)
print(fname, file=pp)
if lbl is not None:
print(lbl, file=lp)
had_labels = True
if not had_labels:
os.remove(label_path)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gc
import os
import os.path as osp
import random
import numpy as np
import tqdm
import torch
from collections import namedtuple
import faiss
import fairseq
import soundfile as sf
def get_parser():
parser = argparse.ArgumentParser(
description="compute kmeans codebook from kaldi-computed feats"
)
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True)
parser.add_argument('--sample-pct', '-r', type=float, help='percentage of timesteps to sample', default=0)
parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14)
parser.add_argument('--faiss-specs', '-f', type=str,
help='faiss index specs; separated by space '
'format is: PCAx_NORM_CLUSx_SPHERICAL -> '
'PCAx if exists first apply PCA '
'NORM if exists, normalize the vector by L2 norm '
'CLUSx must exist, cluster to x clusters '
'SPEHRICAL if exists, apply spherical kmeans',
default='l2')
# fmt: on
return parser
faiss_spec = namedtuple("faiss_spec", ["pca", "norm", "n_clus", "sphere", "spec_str"])
def parse_faiss_specs(specs_str):
specs = []
for ss in specs_str.split():
comps = ss.split("_")
pca = 0
norm = False
n_clus = 0
sphere = False
for c in comps:
if c.startswith("PCA"):
pca = int(c[3:])
elif c == "NORM":
norm = True
elif c.startswith("CLUS"):
n_clus = int(c[4:])
elif c == "SPHERICAL":
sphere = True
assert n_clus > 0
specs.append(
faiss_spec(pca=pca, norm=norm, n_clus=n_clus, sphere=sphere, spec_str=ss)
)
return specs
class Wav2VecFeatureReader(object):
def __init__(self, cp_file, layer):
state = fairseq.checkpoint_utils.load_checkpoint_to_cpu(cp_file)
self.layer = layer
if "cfg" in state:
w2v_args = state["cfg"]
task = fairseq.tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
else:
w2v_args = state["args"]
task = fairseq.tasks.setup_task(w2v_args)
model = task.build_model(w2v_args)
model.load_state_dict(state["model"], strict=True)
model.eval()
model.cuda()
self.model = model
def read_audio(self, fname):
"""Load an audio file and return PCM along with the sample rate"""
wav, sr = sf.read(fname)
assert sr == 16e3
return wav
def get_feats(self, loc):
x = self.read_audio(loc)
with torch.no_grad():
source = torch.from_numpy(x).view(1, -1).float().cuda()
res = self.model(
source=source, mask=False, features_only=True, layer=self.layer
)
return res["layer_results"][self.layer][0].squeeze(1)
def get_iterator(args):
with open(args.data, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0]
if getattr(args, "sample_pct", 0) > 0:
files = random.sample(files, int(args.sample_pct * len(files)))
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname in files:
feats = reader.get_feats(fname)
yield feats.cpu().numpy()
return iterate, num
def main():
parser = get_parser()
args = parser.parse_args()
faiss_specs = parse_faiss_specs(args.faiss_specs)
print("Faiss Specs:", faiss_specs)
feat_path = osp.join(args.save_dir, "features")
if osp.exists(feat_path + ".npy"):
feats = np.load(feat_path + ".npy")
else:
generator, num = get_iterator(args)
iterator = generator()
feats = []
for f in tqdm.tqdm(iterator, total=num):
feats.append(f)
del iterator
del generator
feats = np.concatenate(feats)
print(feats.shape)
os.makedirs(args.save_dir, exist_ok=True)
# np.save(feat_path, feats)
gc.collect()
torch.cuda.empty_cache()
reload = False
for spec in faiss_specs:
print("Processing spec", spec)
if reload:
print("Reloading...")
del feats
gc.collect()
feats = np.load(feat_path + ".npy")
save_path = osp.join(args.save_dir, spec.spec_str)
os.makedirs(save_path, exist_ok=True)
d = feats.shape[-1]
x = feats
if spec.pca > 0:
print("Computing PCA")
pca = faiss.PCAMatrix(d, spec.pca)
pca.train(x)
d = spec.pca
b = faiss.vector_to_array(pca.b)
A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in)
np.save(osp.join(save_path, "pca_A"), A.T)
np.save(osp.join(save_path, "pca_b"), b)
print("Applying PCA")
x = pca.apply_py(x)
if spec.norm:
reload = spec.pca <= 0
print("Normalizing")
faiss.normalize_L2(x)
print("Computing kmeans")
kmeans = faiss.Kmeans(
d,
spec.n_clus,
niter=50,
verbose=True,
spherical=spec.sphere,
max_points_per_centroid=feats.shape[0],
gpu=True,
nredo=3,
)
kmeans.train(x)
np.save(osp.join(save_path, "centroids"), kmeans.centroids)
del kmeans
del x
gc.collect()
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("--tsv", required=True, type=str)
parser.add_argument("--no-skip", action="store_true")
parser.add_argument("--keep", action="store_true")
params = parser.parse_args()
def get_fname(line):
p = os.path.basename(line.split("\t")[0])
p = os.path.splitext(p)[0]
return p
# filenames to exclude
seen = set()
with open(params.tsv) as f:
if not params.no_skip:
root = next(f).rstrip()
for line in f:
seen.add(get_fname(line))
for i, line in enumerate(sys.stdin):
exists = get_fname(line) in seen
keep = (exists and params.keep) or (not exists and not params.keep)
if i == 0 or keep:
print(line, end="")
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/filter_tsv.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import tqdm
import torch
import random
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--cluster-dir', help='where the clusters are')
parser.add_argument('--pooling', type=str, default='mean', choices=['mean', 'sample'], help='how to pool')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
cluster_path = osp.join(args.cluster_dir, args.split + ".src")
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
sizes = []
offsets = []
offset = 0
with open(source_path + ".lengths", "r") as len_f:
for line in len_f:
length = int(line.rstrip())
sizes.append(length)
offsets.append(offset)
offset += length
clusters = []
with open(cluster_path, "r") as cf:
for line in cf:
line = line.rstrip()
items = line.split()
items = list(map(int, items))
clusters.append(items)
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
def merge(feats, clust):
feats = torch.from_numpy(feats.copy())
clust = torch.LongTensor(clust)
_, counts = clust.unique_consecutive(return_counts=True)
curr = 0
merged = []
for c in counts:
c = c.item()
start = curr
end = curr + c
curr += c
if args.pooling == "mean":
new_x = feats[start:end].mean(dim=0)
elif args.pooling == "sample":
new_x = feats[start + int(random.random() * c)]
else:
raise NotImplementedError()
merged.append(new_x)
return torch.stack(merged, dim=0).numpy()
with open(save_path + ".lengths", "w") as l_f:
for size, offset, clust in tqdm.tqdm(
zip(sizes, offsets, clusters), total=len(sizes)
):
end = size + offset
feats = features[offset:end]
feats = merge(feats, clust)
print(len(feats), file=l_f)
npaa.append(feats)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/merge_clusters.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
def main():
for line in sys.stdin:
print(line.replace(" ", "").replace("|", " ").strip())
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import sys
def get_parser():
parser = argparse.ArgumentParser(
description="converts words to phones adding optional silences around in between words"
)
parser.add_argument(
"--sil-prob",
"-s",
type=float,
default=0,
help="probability of inserting silence between each word",
)
parser.add_argument(
"--surround",
action="store_true",
help="if set, surrounds each example with silence",
)
parser.add_argument(
"--lexicon",
help="lexicon to convert to phones",
required=True,
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
sil_prob = args.sil_prob
surround = args.surround
sil = "<SIL>"
wrd_to_phn = {}
with open(args.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
assert len(items) > 1, line
assert items[0] not in wrd_to_phn, items
wrd_to_phn[items[0]] = items[1:]
for line in sys.stdin:
words = line.strip().split()
if not all(w in wrd_to_phn for w in words):
continue
phones = []
if surround:
phones.append(sil)
sample_sil_probs = None
if sil_prob > 0 and len(words) > 1:
sample_sil_probs = np.random.random(len(words) - 1)
for i, w in enumerate(words):
phones.extend(wrd_to_phn[w])
if (
sample_sil_probs is not None
and i < len(sample_sil_probs)
and sample_sil_probs[i] < sil_prob
):
phones.append(sil)
if surround:
phones.append(sil)
print(" ".join(phones))
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="mean pools representations by compressing uniform splits of the data"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to')
parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
with open(source_path + ".lengths", "r") as lf:
lengths = lf.readlines()
fsz = features.shape[-1]
start = 0
with torch.no_grad():
with open(save_path + ".lengths", "w") as lengths_out:
for length in tqdm.tqdm(lengths):
length = int(length)
end = start + length
feats = features[start:end]
start += length
x = torch.from_numpy(feats).cuda()
target_num = math.ceil(length * args.subsample_rate)
rem = length % target_num
if rem > 0:
if args.remove_extra:
to_rem = target_num - rem
target_num -= 1
x = x[:-to_rem]
else:
to_add = target_num - rem
x = F.pad(x, [0, 0, 0, to_add])
x[-to_add:] = x[-to_add - 1]
x = x.view(target_num, -1, fsz)
x = x.mean(dim=-2)
print(target_num, file=lengths_out)
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/mean_pool.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
def main():
for line in sys.stdin:
print(" ".join(list(line.strip().replace(" ", "|"))) + " |")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import regex
import sys
def main():
filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]")
for line in sys.stdin:
line = line.strip()
line = filter_r.sub(" ", line)
line = " ".join(line.split())
print(line)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/normalize_text.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Implement unsupervised metric for decoding hyperparameter selection:
$$ alpha * LM_PPL + ViterbitUER(%) * 100 $$
"""
import argparse
import logging
import sys
import editdistance
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--hypo", help="hypo transcription", required=True)
parser.add_argument(
"-r", "--reference", help="reference transcription", required=True
)
return parser
def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p):
d_cnt = 0
w_cnt = 0
w_cnt_h = 0
for uid in hyp_uid_to_tra:
ref = ref_uid_to_tra[uid].split()
if g2p is not None:
hyp = g2p(hyp_uid_to_tra[uid])
hyp = [p for p in hyp if p != "'" and p != " "]
hyp = [p[:-1] if p[-1].isnumeric() else p for p in hyp]
else:
hyp = hyp_uid_to_tra[uid].split()
d_cnt += editdistance.eval(ref, hyp)
w_cnt += len(ref)
w_cnt_h += len(hyp)
wer = float(d_cnt) / w_cnt
logger.debug(
(
f"wer = {wer * 100:.2f}%; num. of ref words = {w_cnt}; "
f"num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}"
)
)
return wer
def main():
args = get_parser().parse_args()
errs = 0
count = 0
with open(args.hypo, "r") as hf, open(args.reference, "r") as rf:
for h, r in zip(hf, rf):
h = h.rstrip().split()
r = r.rstrip().split()
errs += editdistance.eval(r, h)
count += len(r)
logger.info(f"UER: {errs / count * 100:.2f}%")
if __name__ == "__main__":
main()
def load_tra(tra_path):
with open(tra_path, "r") as f:
uid_to_tra = {}
for line in f:
uid, tra = line.split(None, 1)
uid_to_tra[uid] = tra
logger.debug(f"loaded {len(uid_to_tra)} utterances from {tra_path}")
return uid_to_tra
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/wer.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
for idx, line in enumerate(sys.stdin):
print(f"utt{idx:010d} {line}", end="")
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/copy_labels.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import faiss
def get_parser():
parser = argparse.ArgumentParser(
description="compute a pca matrix given an array of numpy features"
)
# fmt: off
parser.add_argument('data', help='numpy file containing features')
parser.add_argument('--output', help='where to save the pca matrix', required=True)
parser.add_argument('--dim', type=int, help='dim for pca reduction', required=True)
parser.add_argument('--eigen-power', type=float, default=0, help='eigen power, -0.5 for whitening')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
print("Reading features")
x = np.load(args.data, mmap_mode="r")
print("Computing PCA")
pca = faiss.PCAMatrix(x.shape[-1], args.dim, args.eigen_power)
pca.train(x)
b = faiss.vector_to_array(pca.b)
A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in)
os.makedirs(args.output, exist_ok=True)
prefix = str(args.dim)
if args.eigen_power != 0:
prefix += f"_{args.eigen_power}"
np.save(osp.join(args.output, f"{prefix}_pca_A"), A.T)
np.save(osp.join(args.output, f"{prefix}_pca_b"), b)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/pca.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fasttext as ft
import os
import regex
import sys
def get_parser():
parser = argparse.ArgumentParser(
description="reads text from stdin and outputs normalized, lid-filtered version to stdout"
)
parser.add_argument(
"--fasttext-model",
help="path to fasttext model",
default="lid.187.bin",
)
parser.add_argument("--lang", help="language id", required=True)
parser.add_argument(
"--lid-threshold",
type=float,
help="threshold for this lang id probability",
default=0.4,
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]")
lg = args.lang.lower()
lg_label = f"__label__{lg}"
thresh = args.lid_threshold
if os.path.exists(args.fasttext_model):
model = ft.load_model(args.fasttext_model)
else:
print(
f"fasttext language id model {args.fasttext_model} not found. Proceeding without language filtering. "
f"To enable language filtering, please download the latest language id model "
f"from https://fasttext.cc/docs/en/language-identification.html",
file=sys.stderr,
)
model = None
for line in sys.stdin:
line = line.strip()
line = filter_r.sub(" ", line)
line = " ".join(line.split())
if model is not None:
lid, prob = model.predict(line, k=100)
try:
target_idx = lid.index(lg_label)
except ValueError:
continue
if target_idx == 0 or prob[target_idx] >= thresh:
print(line)
else:
print(line)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from fairseq.data import Dictionary
def get_parser():
parser = argparse.ArgumentParser(
description="filters a lexicon given a unit dictionary"
)
parser.add_argument("-d", "--unit-dict", help="unit dictionary", required=True)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
d = Dictionary.load(args.unit_dict)
symbols = set(d.symbols)
for line in sys.stdin:
items = line.rstrip().split()
skip = len(items) < 2
for x in items[1:]:
if x not in symbols:
skip = True
break
if not skip:
print(line, end="")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/filter_lexicon.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--pca-path', type=str, help='pca location. will append _A.npy and _b.npy', required=True)
parser.add_argument('--batch-size', type=int, default=2048000, help='batch size')
parser.add_argument('--unfiltered', action='store_true', help='process the unfiltered version')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
data_poth = source_path + "_unfiltered" if args.unfiltered else source_path
print(f"data path: {data_poth}")
features = np.load(data_poth + ".npy", mmap_mode="r")
pca_A = torch.from_numpy(np.load(args.pca_path + "_A.npy")).cuda()
pca_b = torch.from_numpy(np.load(args.pca_path + "_b.npy")).cuda()
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
copyfile(data_poth + ".lengths", save_path + ".lengths")
if osp.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if osp.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
batches = math.ceil(features.shape[0] / args.batch_size)
with torch.no_grad():
for b in tqdm.trange(batches):
start = b * args.batch_size
end = start + args.batch_size
x = torch.from_numpy(features[start:end]).cuda()
x = torch.matmul(x, pca_A) + pca_b
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/apply_pca.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from g2p_en import G2p
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--compact",
action="store_true",
help="if set, compacts phones",
)
args = parser.parse_args()
compact = args.compact
wrd_to_phn = {}
g2p = G2p()
for line in sys.stdin:
words = line.strip().split()
phones = []
for w in words:
if w not in wrd_to_phn:
wrd_to_phn[w] = g2p(w)
if compact:
wrd_to_phn[w] = [
p[:-1] if p[-1].isnumeric() else p for p in wrd_to_phn[w]
]
phones.extend(wrd_to_phn[w])
try:
print(" ".join(phones))
except:
print(wrd_to_phn, words, phones, file=sys.stderr)
raise
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
get intervals from .vads file, specify output data, and this script removes silences and saves the audio data in out path folder
paths=shards/train.tsv
vads=shards/train.vads
python remove_silence.py --paths $paths --vads $vads
"""
import os
import argparse
import torch
import torchaudio
import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--tsv", default="", type=str)
parser.add_argument("--vads", default="", type=str)
parser.add_argument("--out", type=str)
params = parser.parse_args()
# load paths
paths = []
with open(params.tsv) as f:
root = next(f).rstrip()
for line in f:
paths.append(os.path.join(root, line.rstrip().split("\t")[0]))
# load vads
list_intervals = []
with open(params.vads) as f:
for line in f:
interval = [
[int(w.split(":")[0]), int(w.split(":")[1])] for w in line.rstrip().split()
]
list_intervals.append(interval)
# load audio and keep only intervals (i.e. remove silences)
for i in tqdm.trange(len(paths)):
data, _ = torchaudio.load(paths[i])
if len(list_intervals[i]) > 0:
data_filtered = torch.cat(
[data[0][int(it[0]) : int(it[1])] for it in list_intervals[i]]
).unsqueeze(0)
else:
data_filtered = data
# YOU MAY NEED TO MODIFY THIS TO GET THE RIGHT SUBPATH
# outpath = params.out + '/'.join(paths[i].split('/')[-1])
outpath = params.out + "/" + "/".join(paths[i].split("/")[-2:])
if not os.path.isdir("/".join(outpath.split("/")[:-1])):
os.makedirs("/".join(outpath.split("/")[:-1]))
if not os.path.exists(outpath):
torchaudio.save(outpath, data_filtered, sample_rate=16000)
else:
print(outpath, "exists!")
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/remove_silence.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
import fairseq
import soundfile as sf
def get_parser():
parser = argparse.ArgumentParser(
description="compute kmeans codebook from kaldi-computed feats"
)
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec ctc model', required=True)
parser.add_argument('--layer', type=int, default=14, help='which layer to use')
# fmt: on
return parser
class Wav2VecFeatureReader(object):
def __init__(self, cp_file, layer):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[cp_file]
)
model = model[0]
model.eval()
model.cuda()
self.model = model
self.task = task
self.layer = layer
def read_audio(self, fname):
"""Load an audio file and return PCM along with the sample rate"""
wav, sr = sf.read(fname)
assert sr == 16e3
return wav
def get_feats(self, loc):
x = self.read_audio(loc)
with torch.no_grad():
source = torch.from_numpy(x).float().cuda()
if self.task.cfg.normalize:
assert source.dim() == 1, source.dim()
with torch.no_grad():
source = F.layer_norm(source, source.shape)
source = source.view(1, -1)
m_res = self.model(source=source, mask=False, features_only=True, layer=self.layer)
return m_res["x"].squeeze(0).cpu()
def get_iterator(args):
with open(osp.join(args.data, args.split) + ".tsv", "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0]
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname in files:
w2v_feats = reader.get_feats(fname)
yield w2v_feats
return iterate, num
def main():
parser = get_parser()
args = parser.parse_args()
os.makedirs(args.save_dir, exist_ok=True)
def create_files(dest):
copyfile(osp.join(args.data, args.split) + ".tsv", dest + ".tsv")
if osp.exists(osp.join(args.data, args.split) + ".wrd"):
copyfile(osp.join(args.data, args.split) + ".wrd", dest + ".wrd")
if osp.exists(osp.join(args.data, args.split) + ".phn"):
copyfile(osp.join(args.data, args.split) + ".phn", dest + ".phn")
if osp.exists(dest + ".npy"):
os.remove(dest + ".npy")
npaa = NpyAppendArray(dest + ".npy")
return npaa
save_path = osp.join(args.save_dir, args.split)
npaa = create_files(save_path)
generator, num = get_iterator(args)
iterator = generator()
with open(save_path + ".lengths", "w") as l_f:
for w2v_feats in tqdm.tqdm(iterator, total=num):
print(len(w2v_feats), file=l_f)
if len(w2v_feats) > 0:
npaa.append(w2v_feats.numpy())
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .extracted_features_dataset import ExtractedFeaturesDataset
from .random_input_dataset import RandomInputDataset
__all__ = [
"ExtractedFeaturesDataset",
"RandomInputDataset",
]
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import contextlib
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
logger = logging.getLogger(__name__)
class ExtractedFeaturesDataset(FairseqDataset):
def __init__(
self,
path,
split,
min_length=3,
max_length=None,
labels=None,
label_dict=None,
shuffle=True,
sort_by_length=True,
):
super().__init__()
self.min_length = min_length
self.max_length = max_length
self.shuffle = shuffle
self.sort_by_length = sort_by_length
self.label_dict = label_dict
if labels is not None:
assert label_dict is not None
self.sizes = []
self.offsets = []
self.labels = []
path = os.path.join(path, split)
data_path = path
self.data = np.load(data_path + ".npy", mmap_mode="r")
offset = 0
skipped = 0
if not os.path.exists(path + f".{labels}"):
labels = None
with open(data_path + ".lengths", "r") as len_f, open(
path + f".{labels}", "r"
) if labels is not None else contextlib.ExitStack() as lbl_f:
for line in len_f:
length = int(line.rstrip())
lbl = None if labels is None else next(lbl_f).rstrip().split()
if length >= min_length and (
max_length is None or length <= max_length
):
self.sizes.append(length)
self.offsets.append(offset)
if lbl is not None:
self.labels.append(lbl)
offset += length
self.sizes = np.asarray(self.sizes)
self.offsets = np.asarray(self.offsets)
logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples")
def __getitem__(self, index):
offset = self.offsets[index]
end = self.sizes[index] + offset
feats = torch.from_numpy(self.data[offset:end].copy()).float()
res = {"id": index, "features": feats}
if len(self.labels) > 0:
res["target"] = self.label_dict.encode_line(
self.labels[index],
line_tokenizer=lambda x: x,
append_eos=False,
)
return res
def __len__(self):
return len(self.sizes)
def collater(self, samples):
if len(samples) == 0:
return {}
features = [s["features"] for s in samples]
sizes = [len(s) for s in features]
target_size = max(sizes)
collated_features = features[0].new_zeros(
len(features), target_size, features[0].size(-1)
)
padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False)
for i, (f, size) in enumerate(zip(features, sizes)):
collated_features[i, :size] = f
padding_mask[i, size:] = True
res = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {"features": collated_features, "padding_mask": padding_mask},
}
if len(self.labels) > 0:
target = data_utils.collate_tokens(
[s["target"] for s in samples],
pad_idx=self.label_dict.pad(),
left_pad=False,
)
res["target"] = target
return res
def num_tokens(self, index):
return self.size(index)
def size(self, index):
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
if self.sort_by_length:
order.append(self.sizes)
return np.lexsort(order)[::-1]
else:
return order[0]
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/data/extracted_features_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import List
from fairseq.data import BaseWrapperDataset, data_utils
class RandomInputDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
random_input_dataset,
input_key_path: List[str],
add_to_input,
pad_idx,
):
super().__init__(dataset)
self.random_input_dataset = random_input_dataset
if isinstance(input_key_path, str):
input_key_path = [input_key_path]
assert len(input_key_path) > 0
self.input_key_path = input_key_path
self.add_to_input = add_to_input
self.pad_idx = pad_idx
def get_target(self, item):
target_loc = item
for p in self.input_key_path[:-1]:
target_loc = target_loc[p]
return self.input_key_path[-1], target_loc
def get_target_value(self, item):
k, target_loc = self.get_target(item)
return target_loc[k]
def __getitem__(self, index):
item = self.dataset[index]
k, target_loc = self.get_target(item)
target_loc[k] = random.choice(self.random_input_dataset)
return item
def collater(self, samples):
collated = self.dataset.collater(samples)
if len(collated) == 0:
return collated
indices = set(collated["id"].tolist())
random_inputs = data_utils.collate_tokens(
[self.get_target_value(s) for s in samples if s["id"] in indices],
pad_idx=self.pad_idx,
left_pad=False,
)
k, target_loc = self.get_target(
collated if not self.add_to_input else collated["net_input"]
)
target_loc[k] = random_inputs
return collated
| EXA-1-master | exa/models/unilm-master/edgelm/examples/wav2vec/unsupervised/data/random_input_dataset.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import sys
from collections import Counter
from multiprocessing import Pool
from fairseq.data.encoders.gpt2_bpe import get_encoder
def main():
"""
Helper script to encode raw text with the GPT-2 BPE using multiple processes.
The encoder.json and vocab.bpe files can be obtained here:
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--encoder-json",
help="path to encoder.json",
)
parser.add_argument(
"--vocab-bpe",
type=str,
help="path to vocab.bpe",
)
parser.add_argument(
"--inputs",
nargs="+",
default=["-"],
help="input files to filter/encode",
)
parser.add_argument(
"--outputs",
nargs="+",
default=["-"],
help="path to save encoded outputs",
)
parser.add_argument(
"--keep-empty",
action="store_true",
help="keep empty lines",
)
parser.add_argument("--workers", type=int, default=20)
args = parser.parse_args()
assert len(args.inputs) == len(
args.outputs
), "number of input and output paths should match"
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-"
else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-"
else sys.stdout
for output in args.outputs
]
encoder = MultiprocessingEncoder(args)
pool = Pool(args.workers, initializer=encoder.initializer)
encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100)
stats = Counter()
for i, (filt, enc_lines) in enumerate(encoded_lines, start=1):
if filt == "PASS":
for enc_line, output_h in zip(enc_lines, outputs):
print(enc_line, file=output_h)
else:
stats["num_filtered_" + filt] += 1
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
for k, v in stats.most_common():
print("[{}] filtered {} lines".format(k, v), file=sys.stderr)
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global bpe
bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe)
def encode(self, line):
global bpe
ids = bpe.encode(line)
return list(map(str, ids))
def decode(self, tokens):
global bpe
return bpe.decode(tokens)
def encode_lines(self, lines):
"""
Encode a set of lines. All lines will be encoded together.
"""
enc_lines = []
for line in lines:
line = line.strip()
if len(line) == 0 and not self.args.keep_empty:
return ["EMPTY", None]
tokens = self.encode(line)
enc_lines.append(" ".join(tokens))
return ["PASS", enc_lines]
def decode_lines(self, lines):
dec_lines = []
for line in lines:
tokens = map(int, line.strip().split())
dec_lines.append(self.decode(tokens))
return ["PASS", dec_lines]
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/roberta/multiprocessing_bpe_encoder.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import re
class InputExample:
def __init__(self, paragraph, qa_list, label):
self.paragraph = paragraph
self.qa_list = qa_list
self.label = label
def get_examples(data_dir, set_type):
"""
Extract paragraph and question-answer list from each json file
"""
examples = []
levels = ["middle", "high"]
set_type_c = set_type.split("-")
if len(set_type_c) == 2:
levels = [set_type_c[1]]
set_type = set_type_c[0]
for level in levels:
cur_dir = os.path.join(data_dir, set_type, level)
for filename in os.listdir(cur_dir):
cur_path = os.path.join(cur_dir, filename)
with open(cur_path, "r") as f:
cur_data = json.load(f)
answers = cur_data["answers"]
options = cur_data["options"]
questions = cur_data["questions"]
context = cur_data["article"].replace("\n", " ")
context = re.sub(r"\s+", " ", context)
for i in range(len(answers)):
label = ord(answers[i]) - ord("A")
qa_list = []
question = questions[i]
for j in range(4):
option = options[i][j]
if "_" in question:
qa_cat = question.replace("_", option)
else:
qa_cat = " ".join([question, option])
qa_cat = re.sub(r"\s+", " ", qa_cat)
qa_list.append(qa_cat)
examples.append(InputExample(context, qa_list, label))
return examples
def main():
"""
Helper script to extract paragraphs questions and answers from RACE datasets.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-dir",
help="input directory for downloaded RACE dataset",
)
parser.add_argument(
"--output-dir",
help="output directory for extracted data",
)
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
for set_type in ["train", "dev", "test-middle", "test-high"]:
examples = get_examples(args.input_dir, set_type)
qa_file_paths = [
os.path.join(args.output_dir, set_type + ".input" + str(i + 1))
for i in range(4)
]
qa_files = [open(qa_file_path, "w") for qa_file_path in qa_file_paths]
outf_context_path = os.path.join(args.output_dir, set_type + ".input0")
outf_label_path = os.path.join(args.output_dir, set_type + ".label")
outf_context = open(outf_context_path, "w")
outf_label = open(outf_label_path, "w")
for example in examples:
outf_context.write(example.paragraph + "\n")
for i in range(4):
qa_files[i].write(example.qa_list[i] + "\n")
outf_label.write(str(example.label) + "\n")
for f in qa_files:
f.close()
outf_label.close()
outf_context.close()
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/roberta/preprocess_RACE.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from functools import lru_cache
def convert_sentence_to_json(sentence):
if "_" in sentence:
prefix, rest = sentence.split("_", 1)
query, rest = rest.split("_", 1)
query_index = len(prefix.rstrip().split(" "))
else:
query, query_index = None, None
prefix, rest = sentence.split("[", 1)
pronoun, rest = rest.split("]", 1)
pronoun_index = len(prefix.rstrip().split(" "))
sentence = sentence.replace("_", "").replace("[", "").replace("]", "")
return {
"idx": 0,
"text": sentence,
"target": {
"span1_index": query_index,
"span1_text": query,
"span2_index": pronoun_index,
"span2_text": pronoun,
},
}
def extended_noun_chunks(sentence):
noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks}
np_start, cur_np = 0, "NONE"
for i, token in enumerate(sentence):
np_type = token.pos_ if token.pos_ in {"NOUN", "PROPN"} else "NONE"
if np_type != cur_np:
if cur_np != "NONE":
noun_chunks.add((np_start, i))
if np_type != "NONE":
np_start = i
cur_np = np_type
if cur_np != "NONE":
noun_chunks.add((np_start, len(sentence)))
return [sentence[s:e] for (s, e) in sorted(noun_chunks)]
def find_token(sentence, start_pos):
found_tok = None
for tok in sentence:
if tok.idx == start_pos:
found_tok = tok
break
return found_tok
def find_span(sentence, search_text, start=0):
search_text = search_text.lower()
for tok in sentence[start:]:
remainder = sentence[tok.i :].text.lower()
if remainder.startswith(search_text):
len_to_consume = len(search_text)
start_idx = tok.idx
for next_tok in sentence[tok.i :]:
end_idx = next_tok.idx + len(next_tok.text)
if end_idx - start_idx == len_to_consume:
span = sentence[tok.i : next_tok.i + 1]
return span
return None
@lru_cache(maxsize=1)
def get_detokenizer():
from sacremoses import MosesDetokenizer
detok = MosesDetokenizer(lang="en")
return detok
@lru_cache(maxsize=1)
def get_spacy_nlp():
import en_core_web_lg
nlp = en_core_web_lg.load()
return nlp
def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False):
detok = get_detokenizer()
nlp = get_spacy_nlp()
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
if positive_only and "label" in sample and not sample["label"]:
# only consider examples where the query is correct
continue
target = sample["target"]
# clean up the query
query = target["span1_text"]
if query is not None:
if "\n" in query:
continue
if query.endswith(".") or query.endswith(","):
query = query[:-1]
# split tokens
tokens = sample["text"].split(" ")
def strip_pronoun(x):
return x.rstrip('.,"')
# find the pronoun
pronoun_idx = target["span2_index"]
pronoun = strip_pronoun(target["span2_text"])
if strip_pronoun(tokens[pronoun_idx]) != pronoun:
# hack: sometimes the index is misaligned
if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun:
pronoun_idx += 1
else:
raise Exception("Misaligned pronoun!")
assert strip_pronoun(tokens[pronoun_idx]) == pronoun
# split tokens before and after the pronoun
before = tokens[:pronoun_idx]
after = tokens[pronoun_idx + 1 :]
# the GPT BPE attaches leading spaces to tokens, so we keep track
# of whether we need spaces before or after the pronoun
leading_space = " " if pronoun_idx > 0 else ""
trailing_space = " " if len(after) > 0 else ""
# detokenize
before = detok.detokenize(before, return_str=True)
pronoun = detok.detokenize([pronoun], return_str=True)
after = detok.detokenize(after, return_str=True)
# hack: when the pronoun ends in a period (or comma), move the
# punctuation to the "after" part
if pronoun.endswith(".") or pronoun.endswith(","):
after = pronoun[-1] + trailing_space + after
pronoun = pronoun[:-1]
# hack: when the "after" part begins with a comma or period, remove
# the trailing space
if after.startswith(".") or after.startswith(","):
trailing_space = ""
# parse sentence with spacy
sentence = nlp(before + leading_space + pronoun + trailing_space + after)
# find pronoun span
start = len(before + leading_space)
first_pronoun_tok = find_token(sentence, start_pos=start)
pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i)
assert pronoun_span.text == pronoun
if eval:
# convert to format where pronoun is surrounded by "[]" and
# query is surrounded by "_"
query_span = find_span(sentence, query)
query_with_ws = "_{}_{}".format(
query_span.text,
(" " if query_span.text_with_ws.endswith(" ") else ""),
)
pronoun_with_ws = "[{}]{}".format(
pronoun_span.text,
(" " if pronoun_span.text_with_ws.endswith(" ") else ""),
)
if query_span.start < pronoun_span.start:
first = (query_span, query_with_ws)
second = (pronoun_span, pronoun_with_ws)
else:
first = (pronoun_span, pronoun_with_ws)
second = (query_span, query_with_ws)
sentence = (
sentence[: first[0].start].text_with_ws
+ first[1]
+ sentence[first[0].end : second[0].start].text_with_ws
+ second[1]
+ sentence[second[0].end :].text
)
yield sentence, sample.get("label", None)
else:
yield sentence, pronoun_span, query, sample.get("label", None)
def winogrande_jsonl_iterator(input_fname, eval=False):
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
sentence, option1, option2 = (
sample["sentence"],
sample["option1"],
sample["option2"],
)
pronoun_span = (sentence.index("_"), sentence.index("_") + 1)
if eval:
query, cand = option1, option2
else:
query = option1 if sample["answer"] == "1" else option2
cand = option2 if sample["answer"] == "1" else option1
yield sentence, pronoun_span, query, cand
def filter_noun_chunks(
chunks, exclude_pronouns=False, exclude_query=None, exact_match=False
):
if exclude_pronouns:
chunks = [
np
for np in chunks
if (np.lemma_ != "-PRON-" and not all(tok.pos_ == "PRON" for tok in np))
]
if exclude_query is not None:
excl_txt = [exclude_query.lower()]
filtered_chunks = []
for chunk in chunks:
lower_chunk = chunk.text.lower()
found = False
for excl in excl_txt:
if (
not exact_match and (lower_chunk in excl or excl in lower_chunk)
) or lower_chunk == excl:
found = True
break
if not found:
filtered_chunks.append(chunk)
chunks = filtered_chunks
return chunks
| EXA-1-master | exa/models/unilm-master/edgelm/examples/roberta/wsc/wsc_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from fairseq.data import encoders
@register_criterion("wsc")
class WSCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, "w")
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args.bpe)
self.tokenizer = encoders.build_tokenizer(args.tokenizer)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0)
parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0)
parser.add_argument(
"--wsc-cross-entropy",
action="store_true",
help="use cross entropy formulation instead of margin loss",
)
parser.add_argument(
"--save-predictions", metavar="FILE", help="file to save predictions to"
)
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
-query_lprobs
+ self.args.wsc_margin_alpha
* (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0.0, 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample["labels"]):
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"][i].unsqueeze(0),
sample["query_masks"][i].unsqueeze(0),
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"][i],
sample["candidate_masks"][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample["id"][i].item()
if self.prediction_h is not None:
print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
nqueries = sum(log.get("nqueries", 0) for log in logging_outputs)
if nqueries > 0:
agg_output["accuracy"] = ncorrect / float(nqueries)
return agg_output
@register_criterion("winogrande")
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"],
sample["query_masks"],
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"],
sample["candidate_masks"],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample["query_tokens"].size(0)
ncorrect = pred.sum().item()
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": sample_size,
}
return loss, sample_size, logging_output
| EXA-1-master | exa/models/unilm-master/edgelm/examples/roberta/wsc/wsc_criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import wsc_criterion # noqa
from . import wsc_task # noqa
| EXA-1-master | exa/models/unilm-master/edgelm/examples/roberta/wsc/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import tempfile
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import (
Dictionary,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
SortDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
from . import wsc_utils
@register_task("wsc")
class WSCTask(LegacyFairseqTask):
"""Task to finetune RoBERTa for Winograd Schemas."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data", metavar="DIR", help="path to data directory; we load <split>.jsonl"
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol("<mask>")
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
# hack to handle GPT-2 BPE, which includes leading spaces
if args.bpe == "gpt2":
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == "wsc", "Must set --criterion=wsc"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def binarize(self, s: str, append_eos: bool = False):
if self.tokenizer is not None:
s = self.tokenizer.encode(s)
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s,
append_eos=append_eos,
add_if_not_exist=False,
).long()
if self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.bool)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start : mask_start + mask_size] = 1
return toks, mask
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[: pronoun_span.start].text
suffix = sentence[pronoun_span.end :].text_with_ws
# spaCy spans include trailing spaces, but we need to know about
# leading spaces for the GPT-2 BPE
leading_space = (
" " if sentence[: pronoun_span.start].text_with_ws.endswith(" ") else ""
)
trailing_space = " " if pronoun_span.text_with_ws.endswith(" ") else ""
# get noun phrases, excluding pronouns and anything overlapping with the query
cand_spans = wsc_utils.filter_noun_chunks(
wsc_utils.extended_noun_chunks(sentence),
exclude_pronouns=True,
exclude_query=query,
exact_match=False,
)
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_masks = [], []
for cand_span in cand_spans:
toks, mask = self.binarize_with_mask(
cand_span.text,
prefix,
suffix,
leading_space,
trailing_space,
)
cand_toks.append(toks)
cand_masks.append(mask)
# collate candidates
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert cand_toks.size() == cand_masks.size()
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, [1] * len(labels))
dataset = {
"id": IdDataset(),
"query_tokens": query_tokens,
"query_masks": query_masks,
"candidate_tokens": candidate_tokens,
"candidate_masks": candidate_masks,
"labels": labels,
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + "\n").encode("utf-8"))
dataset = self.load_dataset(
"disambiguate_pronoun",
data_path=h.name,
return_only=True,
)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask.bool()] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
logits, _ = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
cand_lprobs = get_lprobs(
sample["candidate_tokens"][0],
sample["candidate_masks"][0],
)
if sample["query_tokens"][0] is not None:
query_lprobs = get_lprobs(
sample["query_tokens"][0].unsqueeze(0),
sample["query_masks"][0].unsqueeze(0),
)
return (query_lprobs >= cand_lprobs).all().item() == 1
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample["candidate_tokens"][0][best_idx]
mask = sample["candidate_masks"][0][best_idx]
toks = full_cand[mask.bool()]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
@register_task("winogrande")
class WinograndeTask(WSCTask):
"""
Task for WinoGrande dataset. Efficient implementation for Winograd schema
tasks with exactly two candidates, one of which is correct.
"""
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == "winogrande", "Must set --criterion=winogrande"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == "test"))
for sample in itr:
sentence, pronoun_span, query, cand_text = sample
prefix = sentence[: pronoun_span[0]].rstrip()
suffix = sentence[pronoun_span[1] :]
leading_space = " " if sentence[: pronoun_span[0]].endswith(" ") else ""
trailing_space = ""
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query,
prefix,
suffix,
leading_space,
trailing_space,
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_mask = self.binarize_with_mask(
cand_text,
prefix,
suffix,
leading_space,
trailing_space,
)
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_mask)
candidate_lengths.append(cand_toks.size(0))
query_lengths = np.array(query_lengths)
def get_pad_dataset_fn(tokens, length, pad_idx):
return PadDataset(
ListDataset(tokens, length),
pad_idx=pad_idx,
left_pad=False,
)
query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = get_pad_dataset_fn(
candidate_tokens, candidate_lengths, self.vocab.pad()
)
candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
dataset = {
"id": IdDataset(),
"query_tokens": query_tokens,
"query_masks": query_masks,
"candidate_tokens": candidate_tokens,
"candidate_masks": candidate_masks,
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
| EXA-1-master | exa/models/unilm-master/edgelm/examples/roberta/wsc/wsc_task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import commonsense_qa_task # noqa
| EXA-1-master | exa/models/unilm-master/edgelm/examples/roberta/commonsense_qa/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
from fairseq.data import (
Dictionary,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
@register_task("commonsense_qa")
class CommonsenseQATask(LegacyFairseqTask):
"""Task to finetune RoBERTa for Commonsense QA."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data", metavar="DIR", help="path to data directory; we load <split>.jsonl"
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
parser.add_argument("--num-classes", type=int, default=5)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol("<mask>")
self.bpe = encoders.build_bpe(args)
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert (
args.criterion == "sentence_ranking"
), "Must set --criterion=sentence_ranking"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def binarize(s, append_bos=False):
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s,
append_eos=True,
add_if_not_exist=False,
).long()
if append_bos and self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
src_tokens = [[] for i in range(self.args.num_classes)]
src_lengths = [[] for i in range(self.args.num_classes)]
labels = []
with open(data_path) as h:
for line in h:
example = json.loads(line.strip())
if "answerKey" in example:
label = ord(example["answerKey"]) - ord("A")
labels.append(label)
question = example["question"]["stem"]
assert len(example["question"]["choices"]) == self.args.num_classes
# format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>`
question = "Q: " + question
question_toks = binarize(question, append_bos=True)
for i, choice in enumerate(example["question"]["choices"]):
src = "A: " + choice["text"]
src_bin = torch.cat([question_toks, binarize(src)])
src_tokens[i].append(src_bin)
src_lengths[i].append(len(src_bin))
assert all(
len(src_tokens[0]) == len(src_tokens[i])
for i in range(self.args.num_classes)
)
assert len(src_tokens[0]) == len(src_lengths[0])
assert len(labels) == 0 or len(labels) == len(src_tokens[0])
for i in range(self.args.num_classes):
src_lengths[i] = np.array(src_lengths[i])
src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])
src_lengths[i] = ListDataset(src_lengths[i])
dataset = {
"id": IdDataset(),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens[0], reduce=True),
}
for i in range(self.args.num_classes):
dataset.update(
{
"net_input{}".format(i + 1): {
"src_tokens": RightPadDataset(
src_tokens[i],
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths[i],
}
}
)
if len(labels) > 0:
dataset.update({"target": RawLabelDataset(labels)})
dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
with data_utils.numpy_seed(self.args.seed):
dataset = SortDataset(
dataset,
# shuffle
sort_order=[np.random.permutation(len(dataset))],
)
print("| Loaded {} with {} samples".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
"sentence_classification_head",
num_classes=1,
)
return model
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
| EXA-1-master | exa/models/unilm-master/edgelm/examples/roberta/commonsense_qa/commonsense_qa_task.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fileinput
import sacremoses
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("files", nargs="*", help="input files")
args = parser.parse_args()
detok = sacremoses.MosesDetokenizer()
for line in fileinput.input(args.files, openhook=fileinput.hook_compressed):
print(
detok.detokenize(line.strip().split(" "))
.replace(" @", "")
.replace("@ ", "")
.replace(" =", "=")
.replace("= ", "=")
.replace(" – ", "–")
)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/megatron_11b/detok.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import soundfile as sf
import sys
import torch
import torchaudio
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.logging import progress_bar
from fairseq.tasks.text_to_speech import plot_tts_output
from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def make_parser():
parser = options.get_speech_generation_parser()
parser.add_argument("--dump-features", action="store_true")
parser.add_argument("--dump-waveforms", action="store_true")
parser.add_argument("--dump-attentions", action="store_true")
parser.add_argument("--dump-eos-probs", action="store_true")
parser.add_argument("--dump-plots", action="store_true")
parser.add_argument("--dump-target", action="store_true")
parser.add_argument("--output-sample-rate", default=22050, type=int)
parser.add_argument("--teacher-forcing", action="store_true")
parser.add_argument(
"--audio-format", type=str, default="wav", choices=["wav", "flac"]
)
return parser
def postprocess_results(
dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target
):
def to_np(x):
return None if x is None else x.detach().cpu().numpy()
sample_ids = [dataset.ids[i] for i in sample["id"].tolist()]
texts = sample["src_texts"]
attns = [to_np(hypo["attn"]) for hypo in hypos]
eos_probs = [to_np(hypo.get("eos_prob", None)) for hypo in hypos]
feat_preds = [to_np(hypo["feature"]) for hypo in hypos]
wave_preds = [to_np(resample_fn(h["waveform"])) for h in hypos]
if dump_target:
feat_targs = [to_np(hypo["targ_feature"]) for hypo in hypos]
wave_targs = [to_np(resample_fn(h["targ_waveform"])) for h in hypos]
else:
feat_targs = [None for _ in hypos]
wave_targs = [None for _ in hypos]
return zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds,
feat_targs, wave_targs)
def dump_result(
is_na_model,
args,
vocoder,
sample_id,
text,
attn,
eos_prob,
feat_pred,
wave_pred,
feat_targ,
wave_targ,
):
sample_rate = args.output_sample_rate
out_root = Path(args.results_path)
if args.dump_features:
feat_dir = out_root / "feat"
feat_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_dir / f"{sample_id}.npy", feat_pred)
if args.dump_target:
feat_tgt_dir = out_root / "feat_tgt"
feat_tgt_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_tgt_dir / f"{sample_id}.npy", feat_targ)
if args.dump_attentions:
attn_dir = out_root / "attn"
attn_dir.mkdir(exist_ok=True, parents=True)
np.save(attn_dir / f"{sample_id}.npy", attn.numpy())
if args.dump_eos_probs and not is_na_model:
eos_dir = out_root / "eos"
eos_dir.mkdir(exist_ok=True, parents=True)
np.save(eos_dir / f"{sample_id}.npy", eos_prob)
if args.dump_plots:
images = [feat_pred.T] if is_na_model else [feat_pred.T, attn]
names = ["output"] if is_na_model else ["output", "alignment"]
if feat_targ is not None:
images = [feat_targ.T] + images
names = [f"target (idx={sample_id})"] + names
if is_na_model:
plot_tts_output(images, names, attn, "alignment", suptitle=text)
else:
plot_tts_output(images, names, eos_prob, "eos prob", suptitle=text)
plot_dir = out_root / "plot"
plot_dir.mkdir(exist_ok=True, parents=True)
plt.savefig(plot_dir / f"{sample_id}.png")
plt.close()
if args.dump_waveforms:
ext = args.audio_format
if wave_pred is not None:
wav_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}"
wav_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_dir / f"{sample_id}.{ext}", wave_pred, sample_rate)
if args.dump_target and wave_targ is not None:
wav_tgt_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}_tgt"
wav_tgt_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_tgt_dir / f"{sample_id}.{ext}", wave_targ, sample_rate)
def main(args):
assert(args.dump_features or args.dump_waveforms or args.dump_attentions
or args.dump_eos_probs or args.dump_plots)
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 8000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
task = tasks.setup_task(args)
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
task=task,
)
model = models[0].cuda() if use_cuda else models[0]
# use the original n_frames_per_step
task.args.n_frames_per_step = saved_cfg.task.n_frames_per_step
task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
data_cfg = task.data_cfg
sample_rate = data_cfg.config.get("features", {}).get("sample_rate", 22050)
resample_fn = {
False: lambda x: x,
True: lambda x: torchaudio.sox_effects.apply_effects_tensor(
x.detach().cpu().unsqueeze(0), sample_rate,
[['rate', str(args.output_sample_rate)]]
)[0].squeeze(0)
}.get(args.output_sample_rate != sample_rate)
if args.output_sample_rate != sample_rate:
logger.info(f"resampling to {args.output_sample_rate}Hz")
generator = task.build_generator([model], args)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
Path(args.results_path).mkdir(exist_ok=True, parents=True)
is_na_model = getattr(model, "NON_AUTOREGRESSIVE", False)
dataset = task.dataset(args.gen_subset)
vocoder = task.args.vocoder
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
hypos = generator.generate(model, sample, has_targ=args.dump_target)
for result in postprocess_results(
dataset, sample, hypos, resample_fn, args.dump_target
):
dump_result(is_na_model, args, vocoder, *result)
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/generate_waveform.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
from typing import Optional, List, Dict
import zipfile
import tempfile
from dataclasses import dataclass
from itertools import groupby
import torch
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_tsv_to_dicts
from fairseq.data.audio.audio_utils import TTSSpectrogram, TTSMelScale
def trim_or_pad_to_target_length(
data_1d_or_2d: np.ndarray, target_length: int
) -> np.ndarray:
assert len(data_1d_or_2d.shape) in {1, 2}
delta = data_1d_or_2d.shape[0] - target_length
if delta >= 0: # trim if being longer
data_1d_or_2d = data_1d_or_2d[: target_length]
else: # pad if being shorter
if len(data_1d_or_2d.shape) == 1:
data_1d_or_2d = np.concatenate(
[data_1d_or_2d, np.zeros(-delta)], axis=0
)
else:
data_1d_or_2d = np.concatenate(
[data_1d_or_2d, np.zeros((-delta, data_1d_or_2d.shape[1]))],
axis=0
)
return data_1d_or_2d
def extract_logmel_spectrogram(
waveform: torch.Tensor, sample_rate: int,
output_path: Optional[Path] = None, win_length: int = 1024,
hop_length: int = 256, n_fft: int = 1024,
win_fn: callable = torch.hann_window, n_mels: int = 80,
f_min: float = 0., f_max: float = 8000, eps: float = 1e-5,
overwrite: bool = False, target_length: Optional[int] = None
):
if output_path is not None and output_path.is_file() and not overwrite:
return
spectrogram_transform = TTSSpectrogram(
n_fft=n_fft, win_length=win_length, hop_length=hop_length,
window_fn=win_fn
)
mel_scale_transform = TTSMelScale(
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
n_stft=n_fft // 2 + 1
)
spectrogram = spectrogram_transform(waveform)
mel_spec = mel_scale_transform(spectrogram)
logmel_spec = torch.clamp(mel_spec, min=eps).log()
assert len(logmel_spec.shape) == 3 and logmel_spec.shape[0] == 1
logmel_spec = logmel_spec.squeeze().t() # D x T -> T x D
if target_length is not None:
trim_or_pad_to_target_length(logmel_spec, target_length)
if output_path is not None:
np.save(output_path.as_posix(), logmel_spec)
else:
return logmel_spec
def extract_pitch(
waveform: torch.Tensor, sample_rate: int,
output_path: Optional[Path] = None, hop_length: int = 256,
log_scale: bool = True, phoneme_durations: Optional[List[int]] = None
):
if output_path is not None and output_path.is_file():
return
try:
import pyworld
except ImportError:
raise ImportError("Please install PyWORLD: pip install pyworld")
_waveform = waveform.squeeze(0).double().numpy()
pitch, t = pyworld.dio(
_waveform, sample_rate, frame_period=hop_length / sample_rate * 1000
)
pitch = pyworld.stonemask(_waveform, pitch, t, sample_rate)
if phoneme_durations is not None:
pitch = trim_or_pad_to_target_length(pitch, sum(phoneme_durations))
try:
from scipy.interpolate import interp1d
except ImportError:
raise ImportError("Please install SciPy: pip install scipy")
nonzero_ids = np.where(pitch != 0)[0]
interp_fn = interp1d(
nonzero_ids,
pitch[nonzero_ids],
fill_value=(pitch[nonzero_ids[0]], pitch[nonzero_ids[-1]]),
bounds_error=False,
)
pitch = interp_fn(np.arange(0, len(pitch)))
d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations]))
pitch = np.array(
[
np.mean(pitch[d_cumsum[i-1]: d_cumsum[i]])
for i in range(1, len(d_cumsum))
]
)
assert len(pitch) == len(phoneme_durations)
if log_scale:
pitch = np.log(pitch + 1)
if output_path is not None:
np.save(output_path.as_posix(), pitch)
else:
return pitch
def extract_energy(
waveform: torch.Tensor, output_path: Optional[Path] = None,
hop_length: int = 256, n_fft: int = 1024, log_scale: bool = True,
phoneme_durations: Optional[List[int]] = None
):
if output_path is not None and output_path.is_file():
return
assert len(waveform.shape) == 2 and waveform.shape[0] == 1
waveform = waveform.view(1, 1, waveform.shape[1])
waveform = F.pad(
waveform.unsqueeze(1), [n_fft // 2, n_fft // 2, 0, 0],
mode="reflect"
)
waveform = waveform.squeeze(1)
fourier_basis = np.fft.fft(np.eye(n_fft))
cutoff = int((n_fft / 2 + 1))
fourier_basis = np.vstack(
[np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])]
)
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
forward_transform = F.conv1d(
waveform, forward_basis, stride=hop_length, padding=0
)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
energy = torch.norm(magnitude, dim=1).squeeze(0).numpy()
if phoneme_durations is not None:
energy = trim_or_pad_to_target_length(energy, sum(phoneme_durations))
d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations]))
energy = np.array(
[
np.mean(energy[d_cumsum[i - 1]: d_cumsum[i]])
for i in range(1, len(d_cumsum))
]
)
assert len(energy) == len(phoneme_durations)
if log_scale:
energy = np.log(energy + 1)
if output_path is not None:
np.save(output_path.as_posix(), energy)
else:
return energy
def get_global_cmvn(feature_root: Path, output_path: Optional[Path] = None):
mean_x, mean_x2, n_frames = None, None, 0
feature_paths = feature_root.glob("*.npy")
for p in tqdm(feature_paths):
with open(p, 'rb') as f:
frames = np.load(f).squeeze()
n_frames += frames.shape[0]
cur_mean_x = frames.sum(axis=0)
if mean_x is None:
mean_x = cur_mean_x
else:
mean_x += cur_mean_x
cur_mean_x2 = (frames ** 2).sum(axis=0)
if mean_x2 is None:
mean_x2 = cur_mean_x2
else:
mean_x2 += cur_mean_x2
mean_x /= n_frames
mean_x2 /= n_frames
var_x = mean_x2 - mean_x ** 2
std_x = np.sqrt(np.maximum(var_x, 1e-10))
if output_path is not None:
with open(output_path, 'wb') as f:
np.savez(f, mean=mean_x, std=std_x)
else:
return {"mean": mean_x, "std": std_x}
def ipa_phonemize(text, lang="en-us", use_g2p=False):
if use_g2p:
assert lang == "en-us", "g2pE phonemizer only works for en-us"
try:
from g2p_en import G2p
g2p = G2p()
return " ".join("|" if p == " " else p for p in g2p(text))
except ImportError:
raise ImportError(
"Please install phonemizer: pip install g2p_en"
)
else:
try:
from phonemizer import phonemize
from phonemizer.separator import Separator
return phonemize(
text, backend='espeak', language=lang,
separator=Separator(word="| ", phone=" ")
)
except ImportError:
raise ImportError(
"Please install phonemizer: pip install phonemizer"
)
@dataclass
class ForceAlignmentInfo(object):
tokens: List[str]
frame_durations: List[int]
start_sec: Optional[float]
end_sec: Optional[float]
def get_mfa_alignment_by_sample_id(
textgrid_zip_path: str, sample_id: str, sample_rate: int,
hop_length: int, silence_phones: List[str] = ("sil", "sp", "spn")
) -> ForceAlignmentInfo:
try:
import tgt
except ImportError:
raise ImportError("Please install TextGridTools: pip install tgt")
filename = f"{sample_id}.TextGrid"
out_root = Path(tempfile.gettempdir())
tgt_path = out_root / filename
with zipfile.ZipFile(textgrid_zip_path) as f_zip:
f_zip.extract(filename, path=out_root)
textgrid = tgt.io.read_textgrid(tgt_path.as_posix())
os.remove(tgt_path)
phones, frame_durations = [], []
start_sec, end_sec, end_idx = 0, 0, 0
for t in textgrid.get_tier_by_name("phones")._objects:
s, e, p = t.start_time, t.end_time, t.text
# Trim leading silences
if len(phones) == 0:
if p in silence_phones:
continue
else:
start_sec = s
phones.append(p)
if p not in silence_phones:
end_sec = e
end_idx = len(phones)
r = sample_rate / hop_length
frame_durations.append(int(np.round(e * r) - np.round(s * r)))
# Trim tailing silences
phones = phones[:end_idx]
frame_durations = frame_durations[:end_idx]
return ForceAlignmentInfo(
tokens=phones, frame_durations=frame_durations, start_sec=start_sec,
end_sec=end_sec
)
def get_mfa_alignment(
textgrid_zip_path: str, sample_ids: List[str], sample_rate: int,
hop_length: int
) -> Dict[str, ForceAlignmentInfo]:
return {
i: get_mfa_alignment_by_sample_id(
textgrid_zip_path, i, sample_rate, hop_length
) for i in tqdm(sample_ids)
}
def get_unit_alignment(
id_to_unit_tsv_path: str, sample_ids: List[str]
) -> Dict[str, ForceAlignmentInfo]:
id_to_units = {
e["id"]: e["units"] for e in load_tsv_to_dicts(id_to_unit_tsv_path)
}
id_to_units = {i: id_to_units[i].split() for i in sample_ids}
id_to_units_collapsed = {
i: [uu for uu, _ in groupby(u)] for i, u in id_to_units.items()
}
id_to_durations = {
i: [len(list(g)) for _, g in groupby(u)] for i, u in id_to_units.items()
}
return {
i: ForceAlignmentInfo(
tokens=id_to_units_collapsed[i], frame_durations=id_to_durations[i],
start_sec=None, end_sec=None
)
for i in sample_ids
}
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/data_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from scipy.interpolate import interp1d
import torchaudio
from fairseq.tasks.text_to_speech import (
batch_compute_distortion, compute_rms_dist
)
def batch_mel_spectral_distortion(
y1, y2, sr, normalize_type="path", mel_fn=None
):
"""
https://arxiv.org/pdf/2011.03568.pdf
Same as Mel Cepstral Distortion, but computed on log-mel spectrograms.
"""
if mel_fn is None or mel_fn.sample_rate != sr:
mel_fn = torchaudio.transforms.MelSpectrogram(
sr, n_fft=int(0.05 * sr), win_length=int(0.05 * sr),
hop_length=int(0.0125 * sr), f_min=20, n_mels=80,
window_fn=torch.hann_window
).to(y1[0].device)
offset = 1e-6
return batch_compute_distortion(
y1, y2, sr, lambda y: torch.log(mel_fn(y) + offset).transpose(-1, -2),
compute_rms_dist, normalize_type
)
# This code is based on
# "https://github.com/bastibe/MAPS-Scripts/blob/master/helper.py"
def _same_t_in_true_and_est(func):
def new_func(true_t, true_f, est_t, est_f):
assert type(true_t) is np.ndarray
assert type(true_f) is np.ndarray
assert type(est_t) is np.ndarray
assert type(est_f) is np.ndarray
interpolated_f = interp1d(
est_t, est_f, bounds_error=False, kind='nearest', fill_value=0
)(true_t)
return func(true_t, true_f, true_t, interpolated_f)
return new_func
@_same_t_in_true_and_est
def gross_pitch_error(true_t, true_f, est_t, est_f):
"""The relative frequency in percent of pitch estimates that are
outside a threshold around the true pitch. Only frames that are
considered pitched by both the ground truth and the estimator (if
applicable) are considered.
"""
correct_frames = _true_voiced_frames(true_t, true_f, est_t, est_f)
gross_pitch_error_frames = _gross_pitch_error_frames(
true_t, true_f, est_t, est_f
)
return np.sum(gross_pitch_error_frames) / np.sum(correct_frames)
def _gross_pitch_error_frames(true_t, true_f, est_t, est_f, eps=1e-8):
voiced_frames = _true_voiced_frames(true_t, true_f, est_t, est_f)
true_f_p_eps = [x + eps for x in true_f]
pitch_error_frames = np.abs(est_f / true_f_p_eps - 1) > 0.2
return voiced_frames & pitch_error_frames
def _true_voiced_frames(true_t, true_f, est_t, est_f):
return (est_f != 0) & (true_f != 0)
def _voicing_decision_error_frames(true_t, true_f, est_t, est_f):
return (est_f != 0) != (true_f != 0)
@_same_t_in_true_and_est
def f0_frame_error(true_t, true_f, est_t, est_f):
gross_pitch_error_frames = _gross_pitch_error_frames(
true_t, true_f, est_t, est_f
)
voicing_decision_error_frames = _voicing_decision_error_frames(
true_t, true_f, est_t, est_f
)
return (np.sum(gross_pitch_error_frames) +
np.sum(voicing_decision_error_frames)) / (len(true_t))
@_same_t_in_true_and_est
def voicing_decision_error(true_t, true_f, est_t, est_f):
voicing_decision_error_frames = _voicing_decision_error_frames(
true_t, true_f, est_t, est_f
)
return np.sum(voicing_decision_error_frames) / (len(true_t))
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Signal processing-based evaluation using waveforms
"""
import csv
import numpy as np
import os.path as op
import torch
import tqdm
from tabulate import tabulate
import torchaudio
from examples.speech_synthesis.utils import batch_mel_spectral_distortion
from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion
def load_eval_spec(path):
with open(path) as f:
reader = csv.DictReader(f, delimiter='\t')
samples = list(reader)
return samples
def eval_distortion(samples, distortion_fn, device="cuda"):
nmiss = 0
results = []
for sample in tqdm.tqdm(samples):
if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]):
nmiss += 1
results.append(None)
continue
# assume single channel
yref, sr = torchaudio.load(sample["ref"])
ysyn, _sr = torchaudio.load(sample["syn"])
yref, ysyn = yref[0].to(device), ysyn[0].to(device)
assert sr == _sr, f"{sr} != {_sr}"
distortion, extra = distortion_fn([yref], [ysyn], sr, None)[0]
_, _, _, _, _, pathmap = extra
nins = torch.sum(pathmap.sum(dim=1) - 1) # extra frames in syn
ndel = torch.sum(pathmap.sum(dim=0) - 1) # missing frames from syn
results.append(
(distortion.item(), # path distortion
pathmap.size(0), # yref num frames
pathmap.size(1), # ysyn num frames
pathmap.sum().item(), # path length
nins.item(), # insertion
ndel.item(), # deletion
)
)
return results
def eval_mel_cepstral_distortion(samples, device="cuda"):
return eval_distortion(samples, batch_mel_cepstral_distortion, device)
def eval_mel_spectral_distortion(samples, device="cuda"):
return eval_distortion(samples, batch_mel_spectral_distortion, device)
def print_results(results, show_bin):
results = np.array(list(filter(lambda x: x is not None, results)))
np.set_printoptions(precision=3)
def _print_result(results):
dist, dur_ref, dur_syn, dur_ali, nins, ndel = results.sum(axis=0)
res = {
"nutt": len(results),
"dist": dist,
"dur_ref": int(dur_ref),
"dur_syn": int(dur_syn),
"dur_ali": int(dur_ali),
"dist_per_ref_frm": dist/dur_ref,
"dist_per_syn_frm": dist/dur_syn,
"dist_per_ali_frm": dist/dur_ali,
"ins": nins/dur_ref,
"del": ndel/dur_ref,
}
print(tabulate(
[res.values()],
res.keys(),
floatfmt=".4f"
))
print(">>>> ALL")
_print_result(results)
if show_bin:
edges = [0, 200, 400, 600, 800, 1000, 2000, 4000]
for i in range(1, len(edges)):
mask = np.logical_and(results[:, 1] >= edges[i-1],
results[:, 1] < edges[i])
if not mask.any():
continue
bin_results = results[mask]
print(f">>>> ({edges[i-1]}, {edges[i]})")
_print_result(bin_results)
def main(eval_spec, mcd, msd, show_bin):
samples = load_eval_spec(eval_spec)
device = "cpu"
if mcd:
print("===== Evaluate Mean Cepstral Distortion =====")
results = eval_mel_cepstral_distortion(samples, device)
print_results(results, show_bin)
if msd:
print("===== Evaluate Mean Spectral Distortion =====")
results = eval_mel_spectral_distortion(samples, device)
print_results(results, show_bin)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("eval_spec")
parser.add_argument("--mcd", action="store_true")
parser.add_argument("--msd", action="store_true")
parser.add_argument("--show-bin", action="store_true")
args = parser.parse_args()
main(args.eval_spec, args.mcd, args.msd, args.show_bin)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/evaluation/eval_sp.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import editdistance
import re
import shutil
import soundfile as sf
import subprocess
from pathlib import Path
from examples.speech_to_text.data_utils import load_tsv_to_dicts
def preprocess_text(text):
text = "|".join(re.sub(r"[^A-Z' ]", " ", text.upper()).split())
text = " ".join(text)
return text
def prepare_w2v_data(
dict_dir, sample_rate, label, audio_paths, texts, split, data_dir
):
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(
dict_dir / f"dict.{label}.txt",
data_dir / f"dict.{label}.txt"
)
with open(data_dir / f"{split}.tsv", "w") as f:
f.write("/\n")
for audio_path in audio_paths:
wav, sr = sf.read(audio_path)
assert sr == sample_rate, f"{sr} != sample_rate"
nsample = len(wav)
f.write(f"{audio_path}\t{nsample}\n")
with open(data_dir / f"{split}.{label}", "w") as f:
for text in texts:
text = preprocess_text(text)
f.write(f"{text}\n")
def run_asr(asr_dir, split, w2v_ckpt, w2v_label, res_dir):
"""
results will be saved at
{res_dir}/{ref,hypo}.word-{w2v_ckpt.filename}-{split}.txt
"""
cmd = ["python", "-m", "examples.speech_recognition.infer"]
cmd += [str(asr_dir.resolve())]
cmd += ["--task", "audio_finetuning", "--nbest", "1", "--quiet"]
cmd += ["--w2l-decoder", "viterbi", "--criterion", "ctc"]
cmd += ["--post-process", "letter", "--max-tokens", "4000000"]
cmd += ["--path", str(w2v_ckpt.resolve()), "--labels", w2v_label]
cmd += ["--gen-subset", split, "--results-path", str(res_dir.resolve())]
print(f"running cmd:\n{' '.join(cmd)}")
subprocess.run(cmd, check=True)
def compute_error_rate(hyp_wrd_path, ref_wrd_path, unit="word"):
"""each line is "<text> (None-<index>)" """
tokenize_line = {
"word": lambda x: re.sub(r" \(.*\)$", "", x.rstrip()).split(),
"char": lambda x: list(re.sub(r" \(.*\)$", "", x.rstrip()))
}.get(unit)
if tokenize_line is None:
raise ValueError(f"{unit} not supported")
inds = [int(re.sub(r"\D*(\d*)\D*", r"\1", line))
for line in open(hyp_wrd_path)]
hyps = [tokenize_line(line) for line in open(hyp_wrd_path)]
refs = [tokenize_line(line) for line in open(ref_wrd_path)]
assert(len(hyps) == len(refs))
err_rates = [
editdistance.eval(hyp, ref) / len(ref) for hyp, ref in zip(hyps, refs)
]
ind_to_err_rates = {i: e for i, e in zip(inds, err_rates)}
return ind_to_err_rates
def main(args):
samples = load_tsv_to_dicts(args.raw_manifest)
ids = [
sample[args.id_header] if args.id_header else "" for sample in samples
]
audio_paths = [sample[args.audio_header] for sample in samples]
texts = [sample[args.text_header] for sample in samples]
prepare_w2v_data(
args.w2v_dict_dir,
args.w2v_sample_rate,
args.w2v_label,
audio_paths,
texts,
args.split,
args.asr_dir
)
run_asr(args.asr_dir, args.split, args.w2v_ckpt, args.w2v_label, args.asr_dir)
ind_to_err_rates = compute_error_rate(
args.asr_dir / f"hypo.word-{args.w2v_ckpt.name}-{args.split}.txt",
args.asr_dir / f"ref.word-{args.w2v_ckpt.name}-{args.split}.txt",
args.err_unit,
)
uer_path = args.asr_dir / f"uer_{args.err_unit}.{args.split}.tsv"
with open(uer_path, "w") as f:
f.write("id\taudio\tuer\n")
for ind, (id_, audio_path) in enumerate(zip(ids, audio_paths)):
f.write(f"{id_}\t{audio_path}\t{ind_to_err_rates[ind]:.4f}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--raw-manifest", required=True, type=Path)
parser.add_argument("--asr-dir", required=True, type=Path)
parser.add_argument("--id-header", default="id", type=str)
parser.add_argument("--audio-header", default="audio", type=str)
parser.add_argument("--text-header", default="src_text", type=str)
parser.add_argument("--split", default="raw", type=str)
parser.add_argument("--w2v-ckpt", required=True, type=Path)
parser.add_argument("--w2v-dict-dir", required=True, type=Path)
parser.add_argument("--w2v-sample-rate", default=16000, type=int)
parser.add_argument("--w2v-label", default="ltr", type=str)
parser.add_argument("--err-unit", default="word", type=str)
args = parser.parse_args()
main(args)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/evaluation/eval_asr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Signal processing-based evaluation using waveforms
"""
import numpy as np
import os.path as op
import torchaudio
import tqdm
from tabulate import tabulate
from examples.speech_synthesis.utils import (
gross_pitch_error, voicing_decision_error, f0_frame_error
)
from examples.speech_synthesis.evaluation.eval_sp import load_eval_spec
def difference_function(x, n, tau_max):
"""
Compute difference function of data x. This solution is implemented directly
with Numpy fft.
:param x: audio data
:param n: length of data
:param tau_max: integration window size
:return: difference function
:rtype: list
"""
x = np.array(x, np.float64)
w = x.size
tau_max = min(tau_max, w)
x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum()))
size = w + tau_max
p2 = (size // 32).bit_length()
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
size_pad = min(x * 2 ** p2 for x in nice_numbers if x * 2 ** p2 >= size)
fc = np.fft.rfft(x, size_pad)
conv = np.fft.irfft(fc * fc.conjugate())[:tau_max]
return x_cumsum[w:w - tau_max:-1] + x_cumsum[w] - x_cumsum[:tau_max] - \
2 * conv
def cumulative_mean_normalized_difference_function(df, n):
"""
Compute cumulative mean normalized difference function (CMND).
:param df: Difference function
:param n: length of data
:return: cumulative mean normalized difference function
:rtype: list
"""
# scipy method
cmn_df = df[1:] * range(1, n) / np.cumsum(df[1:]).astype(float)
return np.insert(cmn_df, 0, 1)
def get_pitch(cmdf, tau_min, tau_max, harmo_th=0.1):
"""
Return fundamental period of a frame based on CMND function.
:param cmdf: Cumulative Mean Normalized Difference function
:param tau_min: minimum period for speech
:param tau_max: maximum period for speech
:param harmo_th: harmonicity threshold to determine if it is necessary to
compute pitch frequency
:return: fundamental period if there is values under threshold, 0 otherwise
:rtype: float
"""
tau = tau_min
while tau < tau_max:
if cmdf[tau] < harmo_th:
while tau + 1 < tau_max and cmdf[tau + 1] < cmdf[tau]:
tau += 1
return tau
tau += 1
return 0 # if unvoiced
def compute_yin(sig, sr, w_len=512, w_step=256, f0_min=100, f0_max=500,
harmo_thresh=0.1):
"""
Compute the Yin Algorithm. Return fundamental frequency and harmonic rate.
https://github.com/NVIDIA/mellotron adaption of
https://github.com/patriceguyot/Yin
:param sig: Audio signal (list of float)
:param sr: sampling rate (int)
:param w_len: size of the analysis window (samples)
:param w_step: size of the lag between two consecutives windows (samples)
:param f0_min: Minimum fundamental frequency that can be detected (hertz)
:param f0_max: Maximum fundamental frequency that can be detected (hertz)
:param harmo_thresh: Threshold of detection. The yalgorithmù return the
first minimum of the CMND function below this threshold.
:returns:
* pitches: list of fundamental frequencies,
* harmonic_rates: list of harmonic rate values for each fundamental
frequency value (= confidence value)
* argmins: minimums of the Cumulative Mean Normalized DifferenceFunction
* times: list of time of each estimation
:rtype: tuple
"""
tau_min = int(sr / f0_max)
tau_max = int(sr / f0_min)
# time values for each analysis window
time_scale = range(0, len(sig) - w_len, w_step)
times = [t/float(sr) for t in time_scale]
frames = [sig[t:t + w_len] for t in time_scale]
pitches = [0.0] * len(time_scale)
harmonic_rates = [0.0] * len(time_scale)
argmins = [0.0] * len(time_scale)
for i, frame in enumerate(frames):
# Compute YIN
df = difference_function(frame, w_len, tau_max)
cm_df = cumulative_mean_normalized_difference_function(df, tau_max)
p = get_pitch(cm_df, tau_min, tau_max, harmo_thresh)
# Get results
if np.argmin(cm_df) > tau_min:
argmins[i] = float(sr / np.argmin(cm_df))
if p != 0: # A pitch was found
pitches[i] = float(sr / p)
harmonic_rates[i] = cm_df[p]
else: # No pitch, but we compute a value of the harmonic rate
harmonic_rates[i] = min(cm_df)
return pitches, harmonic_rates, argmins, times
def extract_f0(samples):
f0_samples = []
for sample in tqdm.tqdm(samples):
if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]):
f0_samples.append(None)
continue
# assume single channel
yref, sr = torchaudio.load(sample["ref"])
ysyn, _sr = torchaudio.load(sample["syn"])
yref, ysyn = yref[0], ysyn[0]
assert sr == _sr, f"{sr} != {_sr}"
yref_f0 = compute_yin(yref, sr)
ysyn_f0 = compute_yin(ysyn, sr)
f0_samples += [
{
"ref": yref_f0,
"syn": ysyn_f0
}
]
return f0_samples
def eval_f0_error(samples, distortion_fn):
results = []
for sample in tqdm.tqdm(samples):
if sample is None:
results.append(None)
continue
# assume single channel
yref_f, _, _, yref_t = sample["ref"]
ysyn_f, _, _, ysyn_t = sample["syn"]
yref_f = np.array(yref_f)
yref_t = np.array(yref_t)
ysyn_f = np.array(ysyn_f)
ysyn_t = np.array(ysyn_t)
distortion = distortion_fn(yref_t, yref_f, ysyn_t, ysyn_f)
results.append((distortion.item(),
len(yref_f),
len(ysyn_f)
))
return results
def eval_gross_pitch_error(samples):
return eval_f0_error(samples, gross_pitch_error)
def eval_voicing_decision_error(samples):
return eval_f0_error(samples, voicing_decision_error)
def eval_f0_frame_error(samples):
return eval_f0_error(samples, f0_frame_error)
def print_results(results, show_bin):
results = np.array(list(filter(lambda x: x is not None, results)))
np.set_printoptions(precision=3)
def _print_result(results):
res = {
"nutt": len(results),
"error": results[:, 0].mean(),
"std": results[:, 0].std(),
"dur_ref": int(results[:, 1].sum()),
"dur_syn": int(results[:, 2].sum()),
}
print(tabulate([res.values()], res.keys(), floatfmt=".4f"))
print(">>>> ALL")
_print_result(results)
if show_bin:
edges = [0, 200, 400, 600, 800, 1000, 2000, 4000]
for i in range(1, len(edges)):
mask = np.logical_and(results[:, 1] >= edges[i-1],
results[:, 1] < edges[i])
if not mask.any():
continue
bin_results = results[mask]
print(f">>>> ({edges[i-1]}, {edges[i]})")
_print_result(bin_results)
def main(eval_f0, gpe, vde, ffe, show_bin):
samples = load_eval_spec(eval_f0)
if gpe or vde or ffe:
f0_samples = extract_f0(samples)
if gpe:
print("===== Evaluate Gross Pitch Error =====")
results = eval_gross_pitch_error(f0_samples)
print_results(results, show_bin)
if vde:
print("===== Evaluate Voicing Decision Error =====")
results = eval_voicing_decision_error(f0_samples)
print_results(results, show_bin)
if ffe:
print("===== Evaluate F0 Frame Error =====")
results = eval_f0_frame_error(f0_samples)
print_results(results, show_bin)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("eval_f0")
parser.add_argument("--gpe", action="store_true")
parser.add_argument("--vde", action="store_true")
parser.add_argument("--ffe", action="store_true")
parser.add_argument("--show-bin", action="store_true")
args = parser.parse_args()
main(args.eval_f0, args.gpe, args.vde, args.ffe, args.show_bin)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/evaluation/eval_f0.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
from pathlib import Path
def main(args):
"""
`uid syn ref text`
"""
in_root = Path(args.generation_root).resolve()
ext = args.audio_format
with open(args.audio_manifest) as f, open(args.output_path, "w") as f_out:
reader = csv.DictReader(
f, delimiter="\t", quotechar=None, doublequote=False,
lineterminator="\n", quoting=csv.QUOTE_NONE
)
header = ["id", "syn", "ref", "text", "speaker"]
f_out.write("\t".join(header) + "\n")
for row in reader:
dir_name = f"{ext}_{args.sample_rate}hz_{args.vocoder}"
id_ = row["id"]
syn = (in_root / dir_name / f"{id_}.{ext}").as_posix()
ref = row["audio"]
if args.use_resynthesized_target:
ref = (in_root / f"{dir_name}_tgt" / f"{id_}.{ext}").as_posix()
sample = [id_, syn, ref, row["tgt_text"], row["speaker"]]
f_out.write("\t".join(sample) + "\n")
print(f"wrote evaluation file to {args.output_path}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--generation-root", help="output directory for generate_waveform.py"
)
parser.add_argument(
"--audio-manifest",
help="used to determine the original utterance ID and text"
)
parser.add_argument(
"--output-path", help="path to output evaluation spec file"
)
parser.add_argument(
"--use-resynthesized-target", action="store_true",
help="use resynthesized reference instead of the original audio"
)
parser.add_argument("--vocoder", type=str, default="griffin_lim")
parser.add_argument("--sample-rate", type=int, default=22_050)
parser.add_argument("--audio-format", type=str, default="wav")
args = parser.parse_args()
main(args)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/evaluation/get_eval_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/evaluation/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import numpy as np
import re
from pathlib import Path
from collections import defaultdict
import pandas as pd
from torchaudio.datasets import VCTK
from tqdm import tqdm
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
def normalize_text(text):
return re.sub(r"[^a-zA-Z.?!,'\- ]", '', text)
def process(args):
out_root = Path(args.output_data_root).absolute()
out_root.mkdir(parents=True, exist_ok=True)
# Generate TSV manifest
print("Generating manifest...")
dataset = VCTK(out_root.as_posix(), download=False)
ids = list(dataset._walker)
np.random.seed(args.seed)
np.random.shuffle(ids)
n_train = len(ids) - args.n_dev - args.n_test
_split = ["train"] * n_train + ["dev"] * args.n_dev + ["test"] * args.n_test
id_to_split = dict(zip(ids, _split))
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
progress = tqdm(enumerate(dataset), total=len(dataset))
for i, (waveform, _, text, speaker_id, _) in progress:
sample_id = dataset._walker[i]
_split = id_to_split[sample_id]
audio_dir = Path(dataset._path) / dataset._folder_audio / speaker_id
audio_path = audio_dir / f"{sample_id}.wav"
text = normalize_text(text)
manifest_by_split[_split]["id"].append(sample_id)
manifest_by_split[_split]["audio"].append(audio_path.as_posix())
manifest_by_split[_split]["n_frames"].append(len(waveform[0]))
manifest_by_split[_split]["tgt_text"].append(text)
manifest_by_split[_split]["speaker"].append(speaker_id)
manifest_by_split[_split]["src_text"].append(text)
manifest_root = Path(args.output_manifest_root).absolute()
manifest_root.mkdir(parents=True, exist_ok=True)
for _split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[_split]),
manifest_root / f"{_split}.audio.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output-data-root", "-d", required=True, type=str)
parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
parser.add_argument("--n-dev", default=50, type=int)
parser.add_argument("--n-test", default=100, type=int)
parser.add_argument("--seed", "-s", default=1234, type=int)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/get_vctk_audio_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from collections import defaultdict
from itertools import chain
from pathlib import Path
import numpy as np
import torchaudio
import torchaudio.sox_effects as ta_sox
import yaml
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_tsv_to_dicts
from examples.speech_synthesis.preprocessing.speaker_embedder import SpkrEmbedder
def extract_embedding(audio_path, embedder):
wav, sr = torchaudio.load(audio_path) # 2D
if sr != embedder.RATE:
wav, sr = ta_sox.apply_effects_tensor(
wav, sr, [["rate", str(embedder.RATE)]]
)
try:
emb = embedder([wav[0].cuda().float()]).cpu().numpy()
except RuntimeError:
emb = None
return emb
def process(args):
print("Fetching data...")
raw_manifest_root = Path(args.raw_manifest_root).absolute()
samples = [load_tsv_to_dicts(raw_manifest_root / (s + ".tsv"))
for s in args.splits]
samples = list(chain(*samples))
with open(args.config, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
with open(f"{config['audio_root']}/{config['speaker_set_filename']}") as f:
speaker_to_id = {r.strip(): i for i, r in enumerate(f)}
embedder = SpkrEmbedder(args.ckpt).cuda()
speaker_to_cnt = defaultdict(float)
speaker_to_emb = defaultdict(float)
for sample in tqdm(samples, desc="extract emb"):
emb = extract_embedding(sample["audio"], embedder)
if emb is not None:
speaker_to_cnt[sample["speaker"]] += 1
speaker_to_emb[sample["speaker"]] += emb
if len(speaker_to_emb) != len(speaker_to_id):
missed = set(speaker_to_id) - set(speaker_to_emb.keys())
print(
f"WARNING: missing embeddings for {len(missed)} speaker:\n{missed}"
)
speaker_emb_mat = np.zeros((len(speaker_to_id), len(emb)), float)
for speaker in speaker_to_emb:
idx = speaker_to_id[speaker]
emb = speaker_to_emb[speaker]
cnt = speaker_to_cnt[speaker]
speaker_emb_mat[idx, :] = emb / cnt
speaker_emb_name = "speaker_emb.npy"
speaker_emb_path = f"{config['audio_root']}/{speaker_emb_name}"
np.save(speaker_emb_path, speaker_emb_mat)
config["speaker_emb_filename"] = speaker_emb_name
with open(args.new_config, "w") as f:
yaml.dump(config, f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--raw-manifest-root", "-m", required=True, type=str)
parser.add_argument("--splits", "-s", type=str, nargs="+",
default=["train"])
parser.add_argument("--config", "-c", required=True, type=str)
parser.add_argument("--new-config", "-n", required=True, type=str)
parser.add_argument("--ckpt", required=True, type=str,
help="speaker embedder checkpoint")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/get_speaker_embedding.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
from collections import defaultdict
import pandas as pd
from torchaudio.datasets import LJSPEECH
from tqdm import tqdm
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
def process(args):
out_root = Path(args.output_data_root).absolute()
out_root.mkdir(parents=True, exist_ok=True)
# Generate TSV manifest
print("Generating manifest...")
# following FastSpeech's splits
dataset = LJSPEECH(out_root.as_posix(), download=True)
id_to_split = {}
for x in dataset._flist:
id_ = x[0]
speaker = id_.split("-")[0]
id_to_split[id_] = {
"LJ001": "test", "LJ002": "test", "LJ003": "dev"
}.get(speaker, "train")
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
progress = tqdm(enumerate(dataset), total=len(dataset))
for i, (waveform, _, utt, normalized_utt) in progress:
sample_id = dataset._flist[i][0]
split = id_to_split[sample_id]
manifest_by_split[split]["id"].append(sample_id)
audio_path = f"{dataset._path}/{sample_id}.wav"
manifest_by_split[split]["audio"].append(audio_path)
manifest_by_split[split]["n_frames"].append(len(waveform[0]))
manifest_by_split[split]["tgt_text"].append(normalized_utt)
manifest_by_split[split]["speaker"].append("ljspeech")
manifest_by_split[split]["src_text"].append(utt)
manifest_root = Path(args.output_manifest_root).absolute()
manifest_root.mkdir(parents=True, exist_ok=True)
for split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
manifest_root / f"{split}.audio.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output-data-root", "-d", required=True, type=str)
parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/get_ljspeech_audio_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from collections import Counter, defaultdict
import pandas as pd
import torchaudio
from tqdm import tqdm
from fairseq.data.audio.audio_utils import convert_waveform
from examples.speech_to_text.data_utils import (
create_zip,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_tsv_to_dicts,
save_df_to_tsv
)
from examples.speech_synthesis.data_utils import (
extract_logmel_spectrogram, extract_pitch, extract_energy, get_global_cmvn,
ipa_phonemize, get_mfa_alignment, get_unit_alignment
)
log = logging.getLogger(__name__)
def process(args):
assert "train" in args.splits
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
print("Fetching data...")
audio_manifest_root = Path(args.audio_manifest_root).absolute()
samples = []
for s in args.splits:
for e in load_tsv_to_dicts(audio_manifest_root / f"{s}.audio.tsv"):
e["split"] = s
samples.append(e)
sample_ids = [s["id"] for s in samples]
# Get alignment info
id_to_alignment = None
if args.textgrid_zip is not None:
assert args.id_to_units_tsv is None
id_to_alignment = get_mfa_alignment(
args.textgrid_zip, sample_ids, args.sample_rate, args.hop_length
)
elif args.id_to_units_tsv is not None:
# assume identical hop length on the unit sequence
id_to_alignment = get_unit_alignment(args.id_to_units_tsv, sample_ids)
# Extract features and pack features into ZIP
feature_name = "logmelspec80"
zip_path = out_root / f"{feature_name}.zip"
pitch_zip_path = out_root / "pitch.zip"
energy_zip_path = out_root / "energy.zip"
gcmvn_npz_path = out_root / "gcmvn_stats.npz"
if zip_path.exists() and gcmvn_npz_path.exists():
print(f"{zip_path} and {gcmvn_npz_path} exist.")
else:
feature_root = out_root / feature_name
feature_root.mkdir(exist_ok=True)
pitch_root = out_root / "pitch"
energy_root = out_root / "energy"
if args.add_fastspeech_targets:
pitch_root.mkdir(exist_ok=True)
energy_root.mkdir(exist_ok=True)
print("Extracting Mel spectrogram features...")
for sample in tqdm(samples):
waveform, sample_rate = torchaudio.load(sample["audio"])
waveform, sample_rate = convert_waveform(
waveform, sample_rate, normalize_volume=args.normalize_volume,
to_sample_rate=args.sample_rate
)
sample_id = sample["id"]
target_length = None
if id_to_alignment is not None:
a = id_to_alignment[sample_id]
target_length = sum(a.frame_durations)
if a.start_sec is not None and a.end_sec is not None:
start_frame = int(a.start_sec * sample_rate)
end_frame = int(a.end_sec * sample_rate)
waveform = waveform[:, start_frame: end_frame]
extract_logmel_spectrogram(
waveform, sample_rate, feature_root / f"{sample_id}.npy",
win_length=args.win_length, hop_length=args.hop_length,
n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min,
f_max=args.f_max, target_length=target_length
)
if args.add_fastspeech_targets:
assert id_to_alignment is not None
extract_pitch(
waveform, sample_rate, pitch_root / f"{sample_id}.npy",
hop_length=args.hop_length, log_scale=True,
phoneme_durations=id_to_alignment[sample_id].frame_durations
)
extract_energy(
waveform, energy_root / f"{sample_id}.npy",
hop_length=args.hop_length, n_fft=args.n_fft,
log_scale=True,
phoneme_durations=id_to_alignment[sample_id].frame_durations
)
print("ZIPing features...")
create_zip(feature_root, zip_path)
get_global_cmvn(feature_root, gcmvn_npz_path)
shutil.rmtree(feature_root)
if args.add_fastspeech_targets:
create_zip(pitch_root, pitch_zip_path)
shutil.rmtree(pitch_root)
create_zip(energy_root, energy_zip_path)
shutil.rmtree(energy_root)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
pitch_paths, pitch_lengths, energy_paths, energy_lengths = [None] * 4
if args.add_fastspeech_targets:
pitch_paths, pitch_lengths = get_zip_manifest(pitch_zip_path)
energy_paths, energy_lengths = get_zip_manifest(energy_zip_path)
# Generate TSV manifest
print("Generating manifest...")
manifest_by_split = {split: defaultdict(list) for split in args.splits}
for sample in tqdm(samples):
sample_id, split = sample["id"], sample["split"]
normalized_utt = sample["tgt_text"]
if id_to_alignment is not None:
normalized_utt = " ".join(id_to_alignment[sample_id].tokens)
elif args.ipa_vocab:
normalized_utt = ipa_phonemize(
normalized_utt, lang=args.lang, use_g2p=args.use_g2p
)
manifest_by_split[split]["id"].append(sample_id)
manifest_by_split[split]["audio"].append(audio_paths[sample_id])
manifest_by_split[split]["n_frames"].append(audio_lengths[sample_id])
manifest_by_split[split]["tgt_text"].append(normalized_utt)
manifest_by_split[split]["speaker"].append(sample["speaker"])
manifest_by_split[split]["src_text"].append(sample["src_text"])
if args.add_fastspeech_targets:
assert id_to_alignment is not None
duration = " ".join(
str(d) for d in id_to_alignment[sample_id].frame_durations
)
manifest_by_split[split]["duration"].append(duration)
manifest_by_split[split]["pitch"].append(pitch_paths[sample_id])
manifest_by_split[split]["energy"].append(energy_paths[sample_id])
for split in args.splits:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
out_root / f"{split}.tsv"
)
# Generate vocab
vocab_name, spm_filename = None, None
if id_to_alignment is not None or args.ipa_vocab:
vocab = Counter()
for t in manifest_by_split["train"]["tgt_text"]:
vocab.update(t.split(" "))
vocab_name = "vocab.txt"
with open(out_root / vocab_name, "w") as f:
for s, c in vocab.most_common():
f.write(f"{s} {c}\n")
else:
spm_filename_prefix = "spm_char"
spm_filename = f"{spm_filename_prefix}.model"
with NamedTemporaryFile(mode="w") as f:
for t in manifest_by_split["train"]["tgt_text"]:
f.write(t + "\n")
f.flush() # needed to ensure gen_vocab sees dumped text
gen_vocab(Path(f.name), out_root / spm_filename_prefix, "char")
# Generate speaker list
speakers = sorted({sample["speaker"] for sample in samples})
speakers_path = out_root / "speakers.txt"
with open(speakers_path, "w") as f:
for speaker in speakers:
f.write(f"{speaker}\n")
# Generate config YAML
win_len_t = args.win_length / args.sample_rate
hop_len_t = args.hop_length / args.sample_rate
extra = {
"sample_rate": args.sample_rate,
"features": {
"type": "spectrogram+melscale+log",
"eps": 1e-2, "n_mels": args.n_mels, "n_fft": args.n_fft,
"window_fn": "hann", "win_length": args.win_length,
"hop_length": args.hop_length, "sample_rate": args.sample_rate,
"win_len_t": win_len_t, "hop_len_t": hop_len_t,
"f_min": args.f_min, "f_max": args.f_max,
"n_stft": args.n_fft // 2 + 1
}
}
if len(speakers) > 1:
extra["speaker_set_filename"] = "speakers.txt"
gen_config_yaml(
out_root, spm_filename=spm_filename, vocab_name=vocab_name,
audio_root=out_root.as_posix(), input_channels=None,
input_feat_per_channel=None, specaugment_policy=None,
cmvn_type="global", gcmvn_path=gcmvn_npz_path, extra=extra
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-manifest-root", "-m", required=True, type=str)
parser.add_argument("--output-root", "-o", required=True, type=str)
parser.add_argument("--splits", "-s", type=str, nargs="+",
default=["train", "dev", "test"])
parser.add_argument("--ipa-vocab", action="store_true")
parser.add_argument("--use-g2p", action="store_true")
parser.add_argument("--lang", type=str, default="en-us")
parser.add_argument("--win-length", type=int, default=1024)
parser.add_argument("--hop-length", type=int, default=256)
parser.add_argument("--n-fft", type=int, default=1024)
parser.add_argument("--n-mels", type=int, default=80)
parser.add_argument("--f-min", type=int, default=20)
parser.add_argument("--f-max", type=int, default=8000)
parser.add_argument("--sample-rate", type=int, default=22050)
parser.add_argument("--normalize-volume", "-n", action="store_true")
parser.add_argument("--textgrid-zip", type=str, default=None)
parser.add_argument("--id-to-units-tsv", type=str, default=None)
parser.add_argument("--add-fastspeech-targets", action="store_true")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/get_feature_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
from collections import defaultdict
from typing import List, Dict, Tuple
import pandas as pd
import numpy as np
import torchaudio
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_df_from_tsv, save_df_to_tsv
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
def get_top_n(
root: Path, n_speakers: int = 10, min_n_tokens: int = 5
) -> pd.DataFrame:
df = load_df_from_tsv(root / "validated.tsv")
df["n_tokens"] = [len(s.split()) for s in df["sentence"]]
df = df[df["n_tokens"] >= min_n_tokens]
df["n_frames"] = [
torchaudio.info((root / "clips" / p).as_posix()).num_frames
for p in tqdm(df["path"])
]
df["id"] = [Path(p).stem for p in df["path"]]
total_duration_ms = df.groupby("client_id")["n_frames"].agg(["sum"])
total_duration_ms = total_duration_ms.sort_values("sum", ascending=False)
top_n_total_duration_ms = total_duration_ms.head(n_speakers)
top_n_client_ids = set(top_n_total_duration_ms.index.tolist())
df_top_n = df[df["client_id"].isin(top_n_client_ids)]
return df_top_n
def get_splits(
df, train_split_ratio=0.99, speaker_in_all_splits=False, rand_seed=0
) -> Tuple[Dict[str, str], List[str]]:
np.random.seed(rand_seed)
dev_split_ratio = (1. - train_split_ratio) / 3
grouped = list(df.groupby("client_id"))
id_to_split = {}
for _, cur_df in tqdm(grouped):
cur_n_examples = len(cur_df)
if speaker_in_all_splits and cur_n_examples < 3:
continue
cur_n_train = int(cur_n_examples * train_split_ratio)
cur_n_dev = int(cur_n_examples * dev_split_ratio)
cur_n_test = cur_n_examples - cur_n_dev - cur_n_train
if speaker_in_all_splits and cur_n_dev * cur_n_test == 0:
cur_n_dev, cur_n_test = 1, 1
cur_n_train = cur_n_examples - cur_n_dev - cur_n_test
cur_indices = cur_df.index.tolist()
cur_shuffled_indices = np.random.permutation(cur_n_examples)
cur_shuffled_indices = [cur_indices[i] for i in cur_shuffled_indices]
cur_indices_by_split = {
"train": cur_shuffled_indices[:cur_n_train],
"dev": cur_shuffled_indices[cur_n_train: cur_n_train + cur_n_dev],
"test": cur_shuffled_indices[cur_n_train + cur_n_dev:]
}
for split in SPLITS:
for i in cur_indices_by_split[split]:
id_ = df["id"].loc[i]
id_to_split[id_] = split
return id_to_split, sorted(df["client_id"].unique())
def convert_to_wav(root: Path, filenames: List[str], target_sr=16_000):
out_root = root / "wav"
out_root.mkdir(exist_ok=True, parents=True)
print("Converting to WAV...")
for n in tqdm(filenames):
in_path = (root / "clips" / n).as_posix()
waveform, sr = torchaudio.load(in_path)
converted, converted_sr = torchaudio.sox_effects.apply_effects_tensor(
waveform, sr, [["rate", str(target_sr)], ["channels", "1"]]
)
out_path = (out_root / Path(n).with_suffix(".wav").name).as_posix()
torchaudio.save(out_path, converted, converted_sr, encoding="PCM_S",
bits_per_sample=16)
def process(args):
data_root = Path(args.data_root).absolute() / args.lang
# Generate TSV manifest
print("Generating manifest...")
df_top_n = get_top_n(data_root)
id_to_split, speakers = get_splits(df_top_n)
if args.convert_to_wav:
convert_to_wav(data_root, df_top_n["path"].tolist())
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
for sample in tqdm(df_top_n.to_dict(orient="index").values()):
sample_id = sample["id"]
split = id_to_split[sample_id]
manifest_by_split[split]["id"].append(sample_id)
if args.convert_to_wav:
audio_path = data_root / "wav" / f"{sample_id}.wav"
else:
audio_path = data_root / "clips" / f"{sample_id}.mp3"
manifest_by_split[split]["audio"].append(audio_path.as_posix())
manifest_by_split[split]["n_frames"].append(sample["n_frames"])
manifest_by_split[split]["tgt_text"].append(sample["sentence"])
manifest_by_split[split]["speaker"].append(sample["client_id"])
manifest_by_split[split]["src_text"].append(sample["sentence"])
output_root = Path(args.output_manifest_root).absolute()
output_root.mkdir(parents=True, exist_ok=True)
for split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
output_root / f"{split}.audio.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
parser.add_argument("--lang", "-l", required=True, type=str)
parser.add_argument("--convert-to-wav", action="store_true")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import csv
import tempfile
from collections import defaultdict
from pathlib import Path
import torchaudio
try:
import webrtcvad
except ImportError:
raise ImportError("Please install py-webrtcvad: pip install webrtcvad")
import pandas as pd
from tqdm import tqdm
from examples.speech_synthesis.preprocessing.denoiser.pretrained import master64
import examples.speech_synthesis.preprocessing.denoiser.utils as utils
from examples.speech_synthesis.preprocessing.vad import (
frame_generator, vad_collector, read_wave, write_wave, FS_MS, THRESHOLD,
SCALE
)
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
PATHS = ["after_denoise", "after_vad"]
MIN_T = 0.05
def generate_tmp_filename(extension="txt"):
return tempfile._get_default_tempdir() + "/" + \
next(tempfile._get_candidate_names()) + "." + extension
def convert_sr(inpath, sr, output_path=None):
if not output_path:
output_path = generate_tmp_filename("wav")
cmd = f"sox {inpath} -r {sr} {output_path}"
os.system(cmd)
return output_path
def apply_vad(vad, inpath):
audio, sample_rate = read_wave(inpath)
frames = frame_generator(FS_MS, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, FS_MS, 300, vad, frames)
merge_segments = list()
timestamp_start = 0.0
timestamp_end = 0.0
# removing start, end, and long sequences of sils
for i, segment in enumerate(segments):
merge_segments.append(segment[0])
if i and timestamp_start:
sil_duration = segment[1] - timestamp_end
if sil_duration > THRESHOLD:
merge_segments.append(int(THRESHOLD / SCALE) * (b'\x00'))
else:
merge_segments.append(int((sil_duration / SCALE)) * (b'\x00'))
timestamp_start = segment[1]
timestamp_end = segment[2]
segment = b''.join(merge_segments)
return segment, sample_rate
def write(wav, filename, sr=16_000):
# Normalize audio if it prevents clipping
wav = wav / max(wav.abs().max().item(), 1)
torchaudio.save(filename, wav.cpu(), sr, encoding="PCM_S",
bits_per_sample=16)
def process(args):
# making sure we are requested either denoise or vad
if not args.denoise and not args.vad:
log.error("No denoise or vad is requested.")
return
log.info("Creating out directories...")
if args.denoise:
out_denoise = Path(args.output_dir).absolute().joinpath(PATHS[0])
out_denoise.mkdir(parents=True, exist_ok=True)
if args.vad:
out_vad = Path(args.output_dir).absolute().joinpath(PATHS[1])
out_vad.mkdir(parents=True, exist_ok=True)
log.info("Loading pre-trained speech enhancement model...")
model = master64().to(args.device)
log.info("Building the VAD model...")
vad = webrtcvad.Vad(int(args.vad_agg_level))
# preparing the output dict
output_dict = defaultdict(list)
log.info(f"Parsing input manifest: {args.audio_manifest}")
with open(args.audio_manifest, "r") as f:
manifest_dict = csv.DictReader(f, delimiter="\t")
for row in tqdm(manifest_dict):
filename = str(row["audio"])
final_output = filename
keep_sample = True
n_frames = row["n_frames"]
snr = -1
if args.denoise:
output_path_denoise = out_denoise.joinpath(Path(filename).name)
# convert to 16khz in case we use a differet sr
tmp_path = convert_sr(final_output, 16000)
# loading audio file and generating the enhanced version
out, sr = torchaudio.load(tmp_path)
out = out.to(args.device)
estimate = model(out)
estimate = (1 - args.dry_wet) * estimate + args.dry_wet * out
write(estimate[0], str(output_path_denoise), sr)
snr = utils.cal_snr(out, estimate)
snr = snr.cpu().detach().numpy()[0][0]
final_output = str(output_path_denoise)
if args.vad:
output_path_vad = out_vad.joinpath(Path(filename).name)
sr = torchaudio.info(final_output).sample_rate
if sr in [16000, 32000, 48000]:
tmp_path = final_output
elif sr < 16000:
tmp_path = convert_sr(final_output, 16000)
elif sr < 32000:
tmp_path = convert_sr(final_output, 32000)
else:
tmp_path = convert_sr(final_output, 48000)
# apply VAD
segment, sample_rate = apply_vad(vad, tmp_path)
if len(segment) < sample_rate * MIN_T:
keep_sample = False
print((
f"WARNING: skip {filename} because it is too short "
f"after VAD ({len(segment) / sample_rate} < {MIN_T})"
))
else:
if sample_rate != sr:
tmp_path = generate_tmp_filename("wav")
write_wave(tmp_path, segment, sample_rate)
convert_sr(tmp_path, sr,
output_path=str(output_path_vad))
else:
write_wave(str(output_path_vad), segment, sample_rate)
final_output = str(output_path_vad)
segment, _ = torchaudio.load(final_output)
n_frames = segment.size(1)
if keep_sample:
output_dict["id"].append(row["id"])
output_dict["audio"].append(final_output)
output_dict["n_frames"].append(n_frames)
output_dict["tgt_text"].append(row["tgt_text"])
output_dict["speaker"].append(row["speaker"])
output_dict["src_text"].append(row["src_text"])
output_dict["snr"].append(snr)
out_tsv_path = Path(args.output_dir) / Path(args.audio_manifest).name
log.info(f"Saving manifest to {out_tsv_path.as_posix()}")
save_df_to_tsv(pd.DataFrame.from_dict(output_dict), out_tsv_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-manifest", "-i", required=True,
type=str, help="path to the input manifest.")
parser.add_argument(
"--output-dir", "-o", required=True, type=str,
help="path to the output dir. it will contain files after denoising and"
" vad"
)
parser.add_argument("--vad-agg-level", "-a", type=int, default=2,
help="the aggresive level of the vad [0-3].")
parser.add_argument(
"--dry-wet", "-dw", type=float, default=0.01,
help="the level of linear interpolation between noisy and enhanced "
"files."
)
parser.add_argument(
"--device", "-d", type=str, default="cpu",
help="the device to be used for the speech enhancement model: "
"cpu | cuda."
)
parser.add_argument("--denoise", action="store_true",
help="apply a denoising")
parser.add_argument("--vad", action="store_true", help="apply a VAD")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/denoise_and_vad_audio.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import librosa
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torchaudio
EMBEDDER_PARAMS = {
'num_mels': 40,
'n_fft': 512,
'emb_dim': 256,
'lstm_hidden': 768,
'lstm_layers': 3,
'window': 80,
'stride': 40,
}
def set_requires_grad(nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary
computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
class LinearNorm(nn.Module):
def __init__(self, hp):
super(LinearNorm, self).__init__()
self.linear_layer = nn.Linear(hp["lstm_hidden"], hp["emb_dim"])
def forward(self, x):
return self.linear_layer(x)
class SpeechEmbedder(nn.Module):
def __init__(self, hp):
super(SpeechEmbedder, self).__init__()
self.lstm = nn.LSTM(hp["num_mels"],
hp["lstm_hidden"],
num_layers=hp["lstm_layers"],
batch_first=True)
self.proj = LinearNorm(hp)
self.hp = hp
def forward(self, mel):
# (num_mels, T) -> (num_mels, T', window)
mels = mel.unfold(1, self.hp["window"], self.hp["stride"])
mels = mels.permute(1, 2, 0) # (T', window, num_mels)
x, _ = self.lstm(mels) # (T', window, lstm_hidden)
x = x[:, -1, :] # (T', lstm_hidden), use last frame only
x = self.proj(x) # (T', emb_dim)
x = x / torch.norm(x, p=2, dim=1, keepdim=True) # (T', emb_dim)
x = x.mean(dim=0)
if x.norm(p=2) != 0:
x = x / x.norm(p=2)
return x
class SpkrEmbedder(nn.Module):
RATE = 16000
def __init__(
self,
embedder_path,
embedder_params=EMBEDDER_PARAMS,
rate=16000,
hop_length=160,
win_length=400,
pad=False,
):
super(SpkrEmbedder, self).__init__()
embedder_pt = torch.load(embedder_path, map_location="cpu")
self.embedder = SpeechEmbedder(embedder_params)
self.embedder.load_state_dict(embedder_pt)
self.embedder.eval()
set_requires_grad(self.embedder, requires_grad=False)
self.embedder_params = embedder_params
self.register_buffer('mel_basis', torch.from_numpy(
librosa.filters.mel(
sr=self.RATE,
n_fft=self.embedder_params["n_fft"],
n_mels=self.embedder_params["num_mels"])
)
)
self.resample = None
if rate != self.RATE:
self.resample = torchaudio.transforms.Resample(rate, self.RATE)
self.hop_length = hop_length
self.win_length = win_length
self.pad = pad
def get_mel(self, y):
if self.pad and y.shape[-1] < 14000:
y = F.pad(y, (0, 14000 - y.shape[-1]))
window = torch.hann_window(self.win_length).to(y)
y = torch.stft(y, n_fft=self.embedder_params["n_fft"],
hop_length=self.hop_length,
win_length=self.win_length,
window=window)
magnitudes = torch.norm(y, dim=-1, p=2) ** 2
mel = torch.log10(self.mel_basis @ magnitudes + 1e-6)
return mel
def forward(self, inputs):
dvecs = []
for wav in inputs:
mel = self.get_mel(wav)
if mel.dim() == 3:
mel = mel.squeeze(0)
dvecs += [self.embedder(mel)]
dvecs = torch.stack(dvecs)
dvec = torch.mean(dvecs, dim=0)
dvec = dvec / torch.norm(dvec)
return dvec
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/speaker_embedder/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import logging
import torch.hub
from .demucs import Demucs
from .utils import deserialize_model
logger = logging.getLogger(__name__)
ROOT = "https://dl.fbaipublicfiles.com/adiyoss/denoiser/"
DNS_48_URL = ROOT + "dns48-11decc9d8e3f0998.th"
DNS_64_URL = ROOT + "dns64-a7761ff99a7d5bb6.th"
MASTER_64_URL = ROOT + "master64-8a5dfb4bb92753dd.th"
def _demucs(pretrained, url, **kwargs):
model = Demucs(**kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu')
model.load_state_dict(state_dict)
return model
def dns48(pretrained=True):
return _demucs(pretrained, DNS_48_URL, hidden=48)
def dns64(pretrained=True):
return _demucs(pretrained, DNS_64_URL, hidden=64)
def master64(pretrained=True):
return _demucs(pretrained, MASTER_64_URL, hidden=64)
def add_model_flags(parser):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-m", "--model_path", help="Path to local trained model."
)
group.add_argument(
"--dns48", action="store_true",
help="Use pre-trained real time H=48 model trained on DNS."
)
group.add_argument(
"--dns64", action="store_true",
help="Use pre-trained real time H=64 model trained on DNS."
)
group.add_argument(
"--master64", action="store_true",
help="Use pre-trained real time H=64 model trained on DNS and Valentini."
)
def get_model(args):
"""
Load local model package or torchhub pre-trained model.
"""
if args.model_path:
logger.info("Loading model from %s", args.model_path)
pkg = torch.load(args.model_path)
model = deserialize_model(pkg)
elif args.dns64:
logger.info("Loading pre-trained real time H=64 model trained on DNS.")
model = dns64()
elif args.master64:
logger.info(
"Loading pre-trained real time H=64 model trained on DNS and Valentini."
)
model = master64()
else:
logger.info("Loading pre-trained real time H=48 model trained on DNS.")
model = dns48()
logger.debug(model)
return model
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/denoiser/pretrained.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import math
import torch as th
from torch.nn import functional as F
def sinc(t):
"""sinc.
:param t: the input tensor
"""
return th.where(t == 0, th.tensor(1., device=t.device, dtype=t.dtype),
th.sin(t) / t)
def kernel_upsample2(zeros=56):
"""kernel_upsample2.
"""
win = th.hann_window(4 * zeros + 1, periodic=False)
winodd = win[1::2]
t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
t *= math.pi
kernel = (sinc(t) * winodd).view(1, 1, -1)
return kernel
def upsample2(x, zeros=56):
"""
Upsampling the input by 2 using sinc interpolation.
Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
Vol. 9. IEEE, 1984.
"""
*other, time = x.shape
kernel = kernel_upsample2(zeros).to(x)
out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(
*other, time
)
y = th.stack([x, out], dim=-1)
return y.view(*other, -1)
def kernel_downsample2(zeros=56):
"""kernel_downsample2.
"""
win = th.hann_window(4 * zeros + 1, periodic=False)
winodd = win[1::2]
t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
t.mul_(math.pi)
kernel = (sinc(t) * winodd).view(1, 1, -1)
return kernel
def downsample2(x, zeros=56):
"""
Downsampling the input by 2 using sinc interpolation.
Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
Vol. 9. IEEE, 1984.
"""
if x.shape[-1] % 2 != 0:
x = F.pad(x, (0, 1))
xeven = x[..., ::2]
xodd = x[..., 1::2]
*other, time = xodd.shape
kernel = kernel_downsample2(zeros).to(x)
out = xeven + F.conv1d(
xodd.view(-1, 1, time), kernel, padding=zeros
)[..., :-1].view(*other, time)
return out.view(*other, -1).mul(0.5)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/denoiser/resample.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/denoiser/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import functools
import logging
from contextlib import contextmanager
import inspect
import time
logger = logging.getLogger(__name__)
EPS = 1e-8
def capture_init(init):
"""capture_init.
Decorate `__init__` with this, and you can then
recover the *args and **kwargs passed to it in `self._init_args_kwargs`
"""
@functools.wraps(init)
def __init__(self, *args, **kwargs):
self._init_args_kwargs = (args, kwargs)
init(self, *args, **kwargs)
return __init__
def deserialize_model(package, strict=False):
"""deserialize_model.
"""
klass = package['class']
if strict:
model = klass(*package['args'], **package['kwargs'])
else:
sig = inspect.signature(klass)
kw = package['kwargs']
for key in list(kw):
if key not in sig.parameters:
logger.warning("Dropping inexistant parameter %s", key)
del kw[key]
model = klass(*package['args'], **kw)
model.load_state_dict(package['state'])
return model
def copy_state(state):
return {k: v.cpu().clone() for k, v in state.items()}
def serialize_model(model):
args, kwargs = model._init_args_kwargs
state = copy_state(model.state_dict())
return {"class": model.__class__, "args": args, "kwargs": kwargs, "state": state}
@contextmanager
def swap_state(model, state):
"""
Context manager that swaps the state of a model, e.g:
# model is in old state
with swap_state(model, new_state):
# model in new state
# model back to old state
"""
old_state = copy_state(model.state_dict())
model.load_state_dict(state)
try:
yield
finally:
model.load_state_dict(old_state)
def pull_metric(history, name):
out = []
for metrics in history:
if name in metrics:
out.append(metrics[name])
return out
class LogProgress:
"""
Sort of like tqdm but using log lines and not as real time.
Args:
- logger: logger obtained from `logging.getLogger`,
- iterable: iterable object to wrap
- updates (int): number of lines that will be printed, e.g.
if `updates=5`, log every 1/5th of the total length.
- total (int): length of the iterable, in case it does not support
`len`.
- name (str): prefix to use in the log.
- level: logging level (like `logging.INFO`).
"""
def __init__(self,
logger,
iterable,
updates=5,
total=None,
name="LogProgress",
level=logging.INFO):
self.iterable = iterable
self.total = total or len(iterable)
self.updates = updates
self.name = name
self.logger = logger
self.level = level
def update(self, **infos):
self._infos = infos
def __iter__(self):
self._iterator = iter(self.iterable)
self._index = -1
self._infos = {}
self._begin = time.time()
return self
def __next__(self):
self._index += 1
try:
value = next(self._iterator)
except StopIteration:
raise
else:
return value
finally:
log_every = max(1, self.total // self.updates)
# logging is delayed by 1 it, in order to have the metrics from update
if self._index >= 1 and self._index % log_every == 0:
self._log()
def _log(self):
self._speed = (1 + self._index) / (time.time() - self._begin)
infos = " | ".join(f"{k.capitalize()} {v}" for k, v in self._infos.items())
if self._speed < 1e-4:
speed = "oo sec/it"
elif self._speed < 0.1:
speed = f"{1/self._speed:.1f} sec/it"
else:
speed = f"{self._speed:.1f} it/sec"
out = f"{self.name} | {self._index}/{self.total} | {speed}"
if infos:
out += " | " + infos
self.logger.log(self.level, out)
def colorize(text, color):
"""
Display text with some ANSI color in the terminal.
"""
code = f"\033[{color}m"
restore = "\033[0m"
return "".join([code, text, restore])
def bold(text):
"""
Display text in bold in the terminal.
"""
return colorize(text, "1")
def cal_snr(lbl, est):
import torch
y = 10.0 * torch.log10(
torch.sum(lbl**2, dim=-1) / (torch.sum((est-lbl)**2, dim=-1) + EPS) +
EPS
)
return y
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/denoiser/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import math
import time
import torch as th
from torch import nn
from torch.nn import functional as F
from .resample import downsample2, upsample2
from .utils import capture_init
class BLSTM(nn.Module):
def __init__(self, dim, layers=2, bi=True):
super().__init__()
klass = nn.LSTM
self.lstm = klass(
bidirectional=bi, num_layers=layers, hidden_size=dim, input_size=dim
)
self.linear = None
if bi:
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x, hidden=None):
x, hidden = self.lstm(x, hidden)
if self.linear:
x = self.linear(x)
return x, hidden
def rescale_conv(conv, reference):
std = conv.weight.std().detach()
scale = (std / reference)**0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale
def rescale_module(module, reference):
for sub in module.modules():
if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)):
rescale_conv(sub, reference)
class Demucs(nn.Module):
"""
Demucs speech enhancement model.
Args:
- chin (int): number of input channels.
- chout (int): number of output channels.
- hidden (int): number of initial hidden channels.
- depth (int): number of layers.
- kernel_size (int): kernel size for each layer.
- stride (int): stride for each layer.
- causal (bool): if false, uses BiLSTM instead of LSTM.
- resample (int): amount of resampling to apply to the input/output.
Can be one of 1, 2 or 4.
- growth (float): number of channels is multiplied by this for every layer.
- max_hidden (int): maximum number of channels. Can be useful to
control the size/speed of the model.
- normalize (bool): if true, normalize the input.
- glu (bool): if true uses GLU instead of ReLU in 1x1 convolutions.
- rescale (float): controls custom weight initialization.
See https://arxiv.org/abs/1911.13254.
- floor (float): stability flooring when normalizing.
"""
@capture_init
def __init__(self,
chin=1,
chout=1,
hidden=48,
depth=5,
kernel_size=8,
stride=4,
causal=True,
resample=4,
growth=2,
max_hidden=10_000,
normalize=True,
glu=True,
rescale=0.1,
floor=1e-3):
super().__init__()
if resample not in [1, 2, 4]:
raise ValueError("Resample should be 1, 2 or 4.")
self.chin = chin
self.chout = chout
self.hidden = hidden
self.depth = depth
self.kernel_size = kernel_size
self.stride = stride
self.causal = causal
self.floor = floor
self.resample = resample
self.normalize = normalize
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
activation = nn.GLU(1) if glu else nn.ReLU()
ch_scale = 2 if glu else 1
for index in range(depth):
encode = []
encode += [
nn.Conv1d(chin, hidden, kernel_size, stride),
nn.ReLU(),
nn.Conv1d(hidden, hidden * ch_scale, 1), activation,
]
self.encoder.append(nn.Sequential(*encode))
decode = []
decode += [
nn.Conv1d(hidden, ch_scale * hidden, 1), activation,
nn.ConvTranspose1d(hidden, chout, kernel_size, stride),
]
if index > 0:
decode.append(nn.ReLU())
self.decoder.insert(0, nn.Sequential(*decode))
chout = hidden
chin = hidden
hidden = min(int(growth * hidden), max_hidden)
self.lstm = BLSTM(chin, bi=not causal)
if rescale:
rescale_module(self, reference=rescale)
def valid_length(self, length):
"""
Return the nearest valid length to use with the model so that
there is no time steps left over in a convolutions, e.g. for all
layers, size of the input - kernel_size % stride = 0.
If the mixture has a valid length, the estimated sources
will have exactly the same length.
"""
length = math.ceil(length * self.resample)
for _ in range(self.depth):
length = math.ceil((length - self.kernel_size) / self.stride) + 1
length = max(length, 1)
for _ in range(self.depth):
length = (length - 1) * self.stride + self.kernel_size
length = int(math.ceil(length / self.resample))
return int(length)
@property
def total_stride(self):
return self.stride ** self.depth // self.resample
def forward(self, mix):
if mix.dim() == 2:
mix = mix.unsqueeze(1)
if self.normalize:
mono = mix.mean(dim=1, keepdim=True)
std = mono.std(dim=-1, keepdim=True)
mix = mix / (self.floor + std)
else:
std = 1
length = mix.shape[-1]
x = mix
x = F.pad(x, (0, self.valid_length(length) - length))
if self.resample == 2:
x = upsample2(x)
elif self.resample == 4:
x = upsample2(x)
x = upsample2(x)
skips = []
for encode in self.encoder:
x = encode(x)
skips.append(x)
x = x.permute(2, 0, 1)
x, _ = self.lstm(x)
x = x.permute(1, 2, 0)
for decode in self.decoder:
skip = skips.pop(-1)
x = x + skip[..., :x.shape[-1]]
x = decode(x)
if self.resample == 2:
x = downsample2(x)
elif self.resample == 4:
x = downsample2(x)
x = downsample2(x)
x = x[..., :length]
return std * x
def fast_conv(conv, x):
"""
Faster convolution evaluation if either kernel size is 1
or length of sequence is 1.
"""
batch, chin, length = x.shape
chout, chin, kernel = conv.weight.shape
assert batch == 1
if kernel == 1:
x = x.view(chin, length)
out = th.addmm(conv.bias.view(-1, 1),
conv.weight.view(chout, chin), x)
elif length == kernel:
x = x.view(chin * kernel, 1)
out = th.addmm(conv.bias.view(-1, 1),
conv.weight.view(chout, chin * kernel), x)
else:
out = conv(x)
return out.view(batch, chout, -1)
class DemucsStreamer:
"""
Streaming implementation for Demucs. It supports being fed with any amount
of audio at a time. You will get back as much audio as possible at that
point.
Args:
- demucs (Demucs): Demucs model.
- dry (float): amount of dry (e.g. input) signal to keep. 0 is maximum
noise removal, 1 just returns the input signal. Small values > 0
allows to limit distortions.
- num_frames (int): number of frames to process at once. Higher values
will increase overall latency but improve the real time factor.
- resample_lookahead (int): extra lookahead used for the resampling.
- resample_buffer (int): size of the buffer of previous inputs/outputs
kept for resampling.
"""
def __init__(self, demucs,
dry=0,
num_frames=1,
resample_lookahead=64,
resample_buffer=256):
device = next(iter(demucs.parameters())).device
self.demucs = demucs
self.lstm_state = None
self.conv_state = None
self.dry = dry
self.resample_lookahead = resample_lookahead
resample_buffer = min(demucs.total_stride, resample_buffer)
self.resample_buffer = resample_buffer
self.frame_length = demucs.valid_length(1) + \
demucs.total_stride * (num_frames - 1)
self.total_length = self.frame_length + self.resample_lookahead
self.stride = demucs.total_stride * num_frames
self.resample_in = th.zeros(demucs.chin, resample_buffer, device=device)
self.resample_out = th.zeros(
demucs.chin, resample_buffer, device=device
)
self.frames = 0
self.total_time = 0
self.variance = 0
self.pending = th.zeros(demucs.chin, 0, device=device)
bias = demucs.decoder[0][2].bias
weight = demucs.decoder[0][2].weight
chin, chout, kernel = weight.shape
self._bias = bias.view(-1, 1).repeat(1, kernel).view(-1, 1)
self._weight = weight.permute(1, 2, 0).contiguous()
def reset_time_per_frame(self):
self.total_time = 0
self.frames = 0
@property
def time_per_frame(self):
return self.total_time / self.frames
def flush(self):
"""
Flush remaining audio by padding it with zero. Call this
when you have no more input and want to get back the last chunk of audio.
"""
pending_length = self.pending.shape[1]
padding = th.zeros(
self.demucs.chin, self.total_length, device=self.pending.device
)
out = self.feed(padding)
return out[:, :pending_length]
def feed(self, wav):
"""
Apply the model to mix using true real time evaluation.
Normalization is done online as is the resampling.
"""
begin = time.time()
demucs = self.demucs
resample_buffer = self.resample_buffer
stride = self.stride
resample = demucs.resample
if wav.dim() != 2:
raise ValueError("input wav should be two dimensional.")
chin, _ = wav.shape
if chin != demucs.chin:
raise ValueError(f"Expected {demucs.chin} channels, got {chin}")
self.pending = th.cat([self.pending, wav], dim=1)
outs = []
while self.pending.shape[1] >= self.total_length:
self.frames += 1
frame = self.pending[:, :self.total_length]
dry_signal = frame[:, :stride]
if demucs.normalize:
mono = frame.mean(0)
variance = (mono**2).mean()
self.variance = variance / self.frames + \
(1 - 1 / self.frames) * self.variance
frame = frame / (demucs.floor + math.sqrt(self.variance))
frame = th.cat([self.resample_in, frame], dim=-1)
self.resample_in[:] = frame[:, stride - resample_buffer:stride]
if resample == 4:
frame = upsample2(upsample2(frame))
elif resample == 2:
frame = upsample2(frame)
# remove pre sampling buffer
frame = frame[:, resample * resample_buffer:]
# remove extra samples after window
frame = frame[:, :resample * self.frame_length]
out, extra = self._separate_frame(frame)
padded_out = th.cat([self.resample_out, out, extra], 1)
self.resample_out[:] = out[:, -resample_buffer:]
if resample == 4:
out = downsample2(downsample2(padded_out))
elif resample == 2:
out = downsample2(padded_out)
else:
out = padded_out
out = out[:, resample_buffer // resample:]
out = out[:, :stride]
if demucs.normalize:
out *= math.sqrt(self.variance)
out = self.dry * dry_signal + (1 - self.dry) * out
outs.append(out)
self.pending = self.pending[:, stride:]
self.total_time += time.time() - begin
if outs:
out = th.cat(outs, 1)
else:
out = th.zeros(chin, 0, device=wav.device)
return out
def _separate_frame(self, frame):
demucs = self.demucs
skips = []
next_state = []
first = self.conv_state is None
stride = self.stride * demucs.resample
x = frame[None]
for idx, encode in enumerate(demucs.encoder):
stride //= demucs.stride
length = x.shape[2]
if idx == demucs.depth - 1:
# This is sligthly faster for the last conv
x = fast_conv(encode[0], x)
x = encode[1](x)
x = fast_conv(encode[2], x)
x = encode[3](x)
else:
if not first:
prev = self.conv_state.pop(0)
prev = prev[..., stride:]
tgt = (length - demucs.kernel_size) // demucs.stride + 1
missing = tgt - prev.shape[-1]
offset = length - demucs.kernel_size - \
demucs.stride * (missing - 1)
x = x[..., offset:]
x = encode[1](encode[0](x))
x = fast_conv(encode[2], x)
x = encode[3](x)
if not first:
x = th.cat([prev, x], -1)
next_state.append(x)
skips.append(x)
x = x.permute(2, 0, 1)
x, self.lstm_state = demucs.lstm(x, self.lstm_state)
x = x.permute(1, 2, 0)
# In the following, x contains only correct samples, i.e. the one
# for which each time position is covered by two window of the upper
# layer. extra contains extra samples to the right, and is used only as
# a better padding for the online resampling.
extra = None
for idx, decode in enumerate(demucs.decoder):
skip = skips.pop(-1)
x += skip[..., :x.shape[-1]]
x = fast_conv(decode[0], x)
x = decode[1](x)
if extra is not None:
skip = skip[..., x.shape[-1]:]
extra += skip[..., :extra.shape[-1]]
extra = decode[2](decode[1](decode[0](extra)))
x = decode[2](x)
next_state.append(
x[..., -demucs.stride:] - decode[2].bias.view(-1, 1)
)
if extra is None:
extra = x[..., -demucs.stride:]
else:
extra[..., :demucs.stride] += next_state[-1]
x = x[..., :-demucs.stride]
if not first:
prev = self.conv_state.pop(0)
x[..., :demucs.stride] += prev
if idx != demucs.depth - 1:
x = decode[3](x)
extra = decode[3](extra)
self.conv_state = next_state
return x[0], extra[0]
def test():
import argparse
parser = argparse.ArgumentParser(
"denoiser.demucs",
description="Benchmark the streaming Demucs implementation, as well as "
"checking the delta with the offline implementation.")
parser.add_argument("--depth", default=5, type=int)
parser.add_argument("--resample", default=4, type=int)
parser.add_argument("--hidden", default=48, type=int)
parser.add_argument("--sample_rate", default=16000, type=float)
parser.add_argument("--device", default="cpu")
parser.add_argument("-t", "--num_threads", type=int)
parser.add_argument("-f", "--num_frames", type=int, default=1)
args = parser.parse_args()
if args.num_threads:
th.set_num_threads(args.num_threads)
sr = args.sample_rate
sr_ms = sr / 1000
demucs = Demucs(
depth=args.depth, hidden=args.hidden, resample=args.resample
).to(args.device)
x = th.randn(1, int(sr * 4)).to(args.device)
out = demucs(x[None])[0]
streamer = DemucsStreamer(demucs, num_frames=args.num_frames)
out_rt = []
frame_size = streamer.total_length
with th.no_grad():
while x.shape[1] > 0:
out_rt.append(streamer.feed(x[:, :frame_size]))
x = x[:, frame_size:]
frame_size = streamer.demucs.total_stride
out_rt.append(streamer.flush())
out_rt = th.cat(out_rt, 1)
model_size = sum(p.numel() for p in demucs.parameters()) * 4 / 2**20
initial_lag = streamer.total_length / sr_ms
tpf = 1000 * streamer.time_per_frame
print(f"model size: {model_size:.1f}MB, ", end='')
print(f"delta batch/streaming: {th.norm(out - out_rt) / th.norm(out):.2%}")
print(f"initial lag: {initial_lag:.1f}ms, ", end='')
print(f"stride: {streamer.stride * args.num_frames / sr_ms:.1f}ms")
print(f"time per frame: {tpf:.1f}ms, ", end='')
rtf = (1000 * streamer.time_per_frame) / (streamer.stride / sr_ms)
print(f"RTF: {rtf:.2f}")
print(f"Total lag with computation: {initial_lag + tpf:.1f}ms")
if __name__ == "__main__":
test()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/denoiser/demucs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import contextlib
import wave
try:
import webrtcvad
except ImportError:
raise ImportError("Please install py-webrtcvad: pip install webrtcvad")
import argparse
import os
import logging
from tqdm import tqdm
AUDIO_SUFFIX = '.wav'
FS_MS = 30
SCALE = 6e-5
THRESHOLD = 0.3
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
"""Writes a .wav file.
Takes path, PCM audio data, and sample rate.
"""
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
# sys.stdout.write('1' if is_speech else '0')
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, _ in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
triggered = False
yield [b''.join([f.bytes for f in voiced_frames]),
voiced_frames[0].timestamp, voiced_frames[-1].timestamp]
ring_buffer.clear()
voiced_frames = []
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield [b''.join([f.bytes for f in voiced_frames]),
voiced_frames[0].timestamp, voiced_frames[-1].timestamp]
def main(args):
# create output folder
try:
cmd = f"mkdir -p {args.out_path}"
os.system(cmd)
except Exception:
logging.error("Can not create output folder")
exit(-1)
# build vad object
vad = webrtcvad.Vad(int(args.agg))
# iterating over wavs in dir
for file in tqdm(os.listdir(args.in_path)):
if file.endswith(AUDIO_SUFFIX):
audio_inpath = os.path.join(args.in_path, file)
audio_outpath = os.path.join(args.out_path, file)
audio, sample_rate = read_wave(audio_inpath)
frames = frame_generator(FS_MS, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, FS_MS, 300, vad, frames)
merge_segments = list()
timestamp_start = 0.0
timestamp_end = 0.0
# removing start, end, and long sequences of sils
for i, segment in enumerate(segments):
merge_segments.append(segment[0])
if i and timestamp_start:
sil_duration = segment[1] - timestamp_end
if sil_duration > THRESHOLD:
merge_segments.append(int(THRESHOLD / SCALE)*(b'\x00'))
else:
merge_segments.append(int((sil_duration / SCALE))*(b'\x00'))
timestamp_start = segment[1]
timestamp_end = segment[2]
segment = b''.join(merge_segments)
write_wave(audio_outpath, segment, sample_rate)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Apply vad to a file of fils.')
parser.add_argument('in_path', type=str, help='Path to the input files')
parser.add_argument('out_path', type=str,
help='Path to save the processed files')
parser.add_argument('--agg', type=int, default=3,
help='The level of aggressiveness of the VAD: [0-3]')
args = parser.parse_args()
main(args)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/speech_synthesis/preprocessing/vad/__init__.py |
from . import criterions, models, tasks # noqa
| EXA-1-master | exa/models/unilm-master/edgelm/examples/discriminative_reranking_nmt/__init__.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Score raw text with a trained model.
"""
from collections import namedtuple
import logging
from multiprocessing import Pool
import sys
import os
import random
import numpy as np
import sacrebleu
import torch
from fairseq import checkpoint_utils, options, utils
logger = logging.getLogger("fairseq_cli.drnmt_rerank")
logger.setLevel(logging.INFO)
Batch = namedtuple("Batch", "ids src_tokens src_lengths")
pool_init_variables = {}
def init_loaded_scores(mt_scores, model_scores, hyp, ref):
global pool_init_variables
pool_init_variables["mt_scores"] = mt_scores
pool_init_variables["model_scores"] = model_scores
pool_init_variables["hyp"] = hyp
pool_init_variables["ref"] = ref
def parse_fairseq_gen(filename, task):
source = {}
hypos = {}
scores = {}
with open(filename, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if line.startswith("S-"): # source
uid, text = line.split("\t", 1)
uid = int(uid[2:])
source[uid] = text
elif line.startswith("D-"): # hypo
uid, score, text = line.split("\t", 2)
uid = int(uid[2:])
if uid not in hypos:
hypos[uid] = []
scores[uid] = []
hypos[uid].append(text)
scores[uid].append(float(score))
else:
continue
source_out = [source[i] for i in range(len(hypos))]
hypos_out = [h for i in range(len(hypos)) for h in hypos[i]]
scores_out = [s for i in range(len(scores)) for s in scores[i]]
return source_out, hypos_out, scores_out
def read_target(filename):
with open(filename, "r", encoding="utf-8") as f:
output = [line.strip() for line in f]
return output
def make_batches(args, src, hyp, task, max_positions, encode_fn):
assert len(src) * args.beam == len(
hyp
), f"Expect {len(src) * args.beam} hypotheses for {len(src)} source sentences with beam size {args.beam}. Got {len(hyp)} hypotheses intead."
hyp_encode = [
task.source_dictionary.encode_line(encode_fn(h), add_if_not_exist=False).long()
for h in hyp
]
if task.cfg.include_src:
src_encode = [
task.source_dictionary.encode_line(
encode_fn(s), add_if_not_exist=False
).long()
for s in src
]
tokens = [(src_encode[i // args.beam], h) for i, h in enumerate(hyp_encode)]
lengths = [(t1.numel(), t2.numel()) for t1, t2 in tokens]
else:
tokens = [(h,) for h in hyp_encode]
lengths = [(h.numel(),) for h in hyp_encode]
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch["id"],
src_tokens=batch["net_input"]["src_tokens"],
src_lengths=batch["net_input"]["src_lengths"],
)
def decode_rerank_scores(args):
if args.max_tokens is None and args.batch_size is None:
args.batch_size = 1
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load ensemble
logger.info("loading model(s) from {}".format(args.path))
models, _model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path], arg_overrides=eval(args.model_overrides),
)
for model in models:
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Initialize generator
generator = task.build_generator(args)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(args)
bpe = task.build_bpe(args)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
max_positions = utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
)
src, hyp, mt_scores = parse_fairseq_gen(args.in_text, task)
model_scores = {}
logger.info("decode reranker score")
for batch in make_batches(args, src, hyp, task, max_positions, encode_fn):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths},
}
scores = task.inference_step(generator, models, sample)
for id, sc in zip(batch.ids.tolist(), scores.tolist()):
model_scores[id] = sc[0]
model_scores = [model_scores[i] for i in range(len(model_scores))]
return src, hyp, mt_scores, model_scores
def get_score(mt_s, md_s, w1, lp, tgt_len):
return mt_s / (tgt_len ** lp) * w1 + md_s
def get_best_hyps(mt_scores, md_scores, hypos, fw_weight, lenpen, beam):
assert len(mt_scores) == len(md_scores) and len(mt_scores) == len(hypos)
hypo_scores = []
best_hypos = []
best_scores = []
offset = 0
for i in range(len(hypos)):
tgt_len = len(hypos[i].split())
hypo_scores.append(
get_score(mt_scores[i], md_scores[i], fw_weight, lenpen, tgt_len)
)
if (i + 1) % beam == 0:
max_i = np.argmax(hypo_scores)
best_hypos.append(hypos[offset + max_i])
best_scores.append(hypo_scores[max_i])
hypo_scores = []
offset += beam
return best_hypos, best_scores
def eval_metric(args, hypos, ref):
if args.metric == "bleu":
score = sacrebleu.corpus_bleu(hypos, [ref]).score
else:
score = sacrebleu.corpus_ter(hypos, [ref]).score
return score
def score_target_hypo(args, fw_weight, lp):
mt_scores = pool_init_variables["mt_scores"]
model_scores = pool_init_variables["model_scores"]
hyp = pool_init_variables["hyp"]
ref = pool_init_variables["ref"]
best_hypos, _ = get_best_hyps(
mt_scores, model_scores, hyp, fw_weight, lp, args.beam
)
rerank_eval = None
if ref:
rerank_eval = eval_metric(args, best_hypos, ref)
print(f"fw_weight {fw_weight}, lenpen {lp}, eval {rerank_eval}")
return rerank_eval
def print_result(best_scores, best_hypos, output_file):
for i, (s, h) in enumerate(zip(best_scores, best_hypos)):
print(f"{i}\t{s}\t{h}", file=output_file)
def main(args):
utils.import_user_module(args)
src, hyp, mt_scores, model_scores = decode_rerank_scores(args)
assert (
not args.tune or args.target_text is not None
), "--target-text has to be set when tuning weights"
if args.target_text:
ref = read_target(args.target_text)
assert len(src) == len(
ref
), f"different numbers of source and target sentences ({len(src)} vs. {len(ref)})"
orig_best_hypos = [hyp[i] for i in range(0, len(hyp), args.beam)]
orig_eval = eval_metric(args, orig_best_hypos, ref)
if args.tune:
logger.info("tune weights for reranking")
random_params = np.array(
[
[
random.uniform(
args.lower_bound_fw_weight, args.upper_bound_fw_weight
),
random.uniform(args.lower_bound_lenpen, args.upper_bound_lenpen),
]
for k in range(args.num_trials)
]
)
logger.info("launching pool")
with Pool(
32,
initializer=init_loaded_scores,
initargs=(mt_scores, model_scores, hyp, ref),
) as p:
rerank_scores = p.starmap(
score_target_hypo,
[
(args, random_params[i][0], random_params[i][1],)
for i in range(args.num_trials)
],
)
if args.metric == "bleu":
best_index = np.argmax(rerank_scores)
else:
best_index = np.argmin(rerank_scores)
best_fw_weight = random_params[best_index][0]
best_lenpen = random_params[best_index][1]
else:
assert (
args.lenpen is not None and args.fw_weight is not None
), "--lenpen and --fw-weight should be set"
best_fw_weight, best_lenpen = args.fw_weight, args.lenpen
best_hypos, best_scores = get_best_hyps(
mt_scores, model_scores, hyp, best_fw_weight, best_lenpen, args.beam
)
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(
args.results_path, "generate-{}.txt".format(args.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as o:
print_result(best_scores, best_hypos, o)
else:
print_result(best_scores, best_hypos, sys.stdout)
if args.target_text:
rerank_eval = eval_metric(args, best_hypos, ref)
print(f"before reranking, {args.metric.upper()}:", orig_eval)
print(
f"after reranking with fw_weight={best_fw_weight}, lenpen={best_lenpen}, {args.metric.upper()}:",
rerank_eval,
)
def cli_main():
parser = options.get_generation_parser(interactive=True)
parser.add_argument(
"--in-text",
default=None,
required=True,
help="text from fairseq-interactive output, containing source sentences and hypotheses",
)
parser.add_argument("--target-text", default=None, help="reference text")
parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu")
parser.add_argument(
"--tune",
action="store_true",
help="if set, tune weights on fw scores and lenpen instead of applying fixed weights for reranking",
)
parser.add_argument(
"--lower-bound-fw-weight",
default=0.0,
type=float,
help="lower bound of search space",
)
parser.add_argument(
"--upper-bound-fw-weight",
default=3,
type=float,
help="upper bound of search space",
)
parser.add_argument(
"--lower-bound-lenpen",
default=0.0,
type=float,
help="lower bound of search space",
)
parser.add_argument(
"--upper-bound-lenpen",
default=3,
type=float,
help="upper bound of search space",
)
parser.add_argument(
"--fw-weight", type=float, default=None, help="weight on the fw model score"
)
parser.add_argument(
"--num-trials",
default=1000,
type=int,
help="number of trials to do for random search",
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/models/unilm-master/edgelm/examples/discriminative_reranking_nmt/drnmt_rerank.py |
from .discriminative_reranking_task import DiscriminativeRerankingNMTTask
__all__ = [
"DiscriminativeRerankingNMTTask",
]
| EXA-1-master | exa/models/unilm-master/edgelm/examples/discriminative_reranking_nmt/tasks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import logging
import os
import numpy as np
import torch
from fairseq import metrics
from fairseq.data import (
ConcatDataset,
ConcatSentencesDataset,
data_utils,
Dictionary,
IdDataset,
indexed_dataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
TruncateDataset,
TokenBlockDataset,
)
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II, MISSING
EVAL_BLEU_ORDER = 4
TARGET_METRIC_CHOICES = ChoiceEnum(["bleu", "ter"])
logger = logging.getLogger(__name__)
@dataclass
class DiscriminativeRerankingNMTConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
num_data_splits: int = field(
default=1, metadata={"help": "total number of data splits"}
)
no_shuffle: bool = field(
default=False, metadata={"help": "do not shuffle training data"}
)
max_positions: int = field(
default=512, metadata={"help": "number of positional embeddings to learn"}
)
include_src: bool = field(
default=False, metadata={"help": "include source sentence"}
)
mt_beam: int = field(default=50, metadata={"help": "beam size of input hypotheses"})
eval_target_metric: bool = field(
default=False,
metadata={"help": "evaluation with the target metric during validation"},
)
target_metric: TARGET_METRIC_CHOICES = field(
default="bleu", metadata={"help": "name of the target metric to optimize for"}
)
train_subset: str = field(
default=II("dataset.train_subset"),
metadata={"help": "data subset to use for training (e.g. train, valid, test)"},
)
seed: int = field(
default=II("common.seed"),
metadata={"help": "pseudo random number generator seed"},
)
class RerankerScorer(object):
"""Scores the target for a given (source (optional), target) input."""
def __init__(self, args, mt_beam):
self.mt_beam = mt_beam
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample["net_input"]
assert len(models) == 1, "does not support model ensemble"
model = models[0]
bs = net_input["src_tokens"].shape[0]
assert (
model.joint_classification == "none" or bs % self.mt_beam == 0
), f"invalid batch size ({bs}) for joint classification with beam size ({self.mt_beam})"
model.eval()
logits = model(**net_input)
batch_out = model.sentence_forward(logits, net_input["src_tokens"])
if model.joint_classification == "sent":
batch_out = model.joint_forward(
batch_out.view(self.mt_beam, bs // self.mt_beam, -1)
)
scores = model.classification_forward(
batch_out.view(bs, 1, -1)
) # input: B x T x C
return scores
@register_task(
"discriminative_reranking_nmt", dataclass=DiscriminativeRerankingNMTConfig
)
class DiscriminativeRerankingNMTTask(FairseqTask):
"""
Translation rerank task.
The input can be either (src, tgt) sentence pairs or tgt sentence only.
"""
cfg: DiscriminativeRerankingNMTConfig
def __init__(self, cfg: DiscriminativeRerankingNMTConfig, data_dictionary=None):
super().__init__(cfg)
self.dictionary = data_dictionary
self._max_positions = cfg.max_positions
# args.tokens_per_sample = self._max_positions
# self.num_classes = 1 # for model
@classmethod
def load_dictionary(cls, cfg, filename):
"""Load the dictionary from the filename"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>") # for loading pretrained XLMR model
return dictionary
@classmethod
def setup_task(cls, cfg: DiscriminativeRerankingNMTConfig, **kwargs):
# load data dictionary (assume joint dictionary)
data_path = cfg.data
data_dict = cls.load_dictionary(
cfg, os.path.join(data_path, "input_src/dict.txt")
)
logger.info("[input] src dictionary: {} types".format(len(data_dict)))
return DiscriminativeRerankingNMTTask(cfg, data_dict)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
if self.cfg.data.endswith("1"):
data_shard = (epoch - 1) % self.cfg.num_data_splits + 1
data_path = self.cfg.data[:-1] + str(data_shard)
else:
data_path = self.cfg.data
def get_path(type, data_split):
return os.path.join(data_path, str(type), data_split)
def make_dataset(type, dictionary, data_split, combine):
split_path = get_path(type, data_split)
dataset = data_utils.load_indexed_dataset(
split_path, dictionary, combine=combine,
)
return dataset
def load_split(data_split, metric):
input_src = None
if self.cfg.include_src:
input_src = make_dataset(
"input_src", self.dictionary, data_split, combine=False
)
assert input_src is not None, "could not find dataset: {}".format(
get_path("input_src", data_split)
)
input_tgt = make_dataset(
"input_tgt", self.dictionary, data_split, combine=False
)
assert input_tgt is not None, "could not find dataset: {}".format(
get_path("input_tgt", data_split)
)
label_path = f"{get_path(metric, data_split)}.{metric}"
assert os.path.exists(label_path), f"could not find dataset: {label_path}"
np_labels = np.loadtxt(label_path)
if self.cfg.target_metric == "ter":
np_labels = -np_labels
label = RawLabelDataset(np_labels)
return input_src, input_tgt, label
src_datasets = []
tgt_datasets = []
label_datasets = []
if split == self.cfg.train_subset:
for k in itertools.count():
split_k = "train" + (str(k) if k > 0 else "")
prefix = os.path.join(data_path, "input_tgt", split_k)
if not indexed_dataset.dataset_exists(prefix, impl=None):
if k > 0:
break
else:
raise FileNotFoundError(f"Dataset not found: {prefix}")
input_src, input_tgt, label = load_split(
split_k, self.cfg.target_metric
)
src_datasets.append(input_src)
tgt_datasets.append(input_tgt)
label_datasets.append(label)
else:
input_src, input_tgt, label = load_split(split, self.cfg.target_metric)
src_datasets.append(input_src)
tgt_datasets.append(input_tgt)
label_datasets.append(label)
if len(tgt_datasets) == 1:
input_tgt, label = tgt_datasets[0], label_datasets[0]
if self.cfg.include_src:
input_src = src_datasets[0]
else:
input_tgt = ConcatDataset(tgt_datasets)
label = ConcatDataset(label_datasets)
if self.cfg.include_src:
input_src = ConcatDataset(src_datasets)
input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions)
if self.cfg.include_src:
input_src = PrependTokenDataset(input_src, self.dictionary.bos())
input_src = TruncateDataset(input_src, self.cfg.max_positions)
src_lengths = NumelDataset(input_src, reduce=False)
src_tokens = ConcatSentencesDataset(input_src, input_tgt)
else:
src_tokens = PrependTokenDataset(input_tgt, self.dictionary.bos())
src_lengths = NumelDataset(src_tokens, reduce=False)
dataset = {
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_tokens, pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths,
},
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens, reduce=True),
"target": label,
}
dataset = NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes],)
assert len(dataset) % self.cfg.mt_beam == 0, (
"dataset size (%d) is not a multiple of beam size (%d)"
% (len(dataset), self.cfg.mt_beam)
)
# no need to shuffle valid/test sets
if not self.cfg.no_shuffle and split == self.cfg.train_subset:
# need to keep all hypothese together
start_idx = np.arange(0, len(dataset), self.cfg.mt_beam)
with data_utils.numpy_seed(self.cfg.seed + epoch):
np.random.shuffle(start_idx)
idx = np.arange(0, self.cfg.mt_beam)
shuffle = np.tile(idx, (len(start_idx), 1)).reshape(-1) + np.tile(
start_idx, (self.cfg.mt_beam, 1)
).transpose().reshape(-1)
dataset = SortDataset(dataset, sort_order=[shuffle],)
logger.info(f"Loaded {split} with #samples: {len(dataset)}")
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
assert not self.cfg.include_src or len(src_tokens[0]) == 2
input_src = None
if self.cfg.include_src:
input_src = TokenBlockDataset(
[t[0] for t in src_tokens],
[l[0] for l in src_lengths],
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
)
input_src = PrependTokenDataset(input_src, self.dictionary.bos())
input_src = TruncateDataset(input_src, self.cfg.max_positions)
input_tgt = TokenBlockDataset(
[t[-1] for t in src_tokens],
[l[-1] for l in src_lengths],
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
)
input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions)
if self.cfg.include_src:
src_tokens = ConcatSentencesDataset(input_src, input_tgt)
src_lengths = NumelDataset(input_src, reduce=False)
else:
input_tgt = PrependTokenDataset(input_tgt, self.dictionary.bos())
src_tokens = input_tgt
src_lengths = NumelDataset(src_tokens, reduce=False)
dataset = {
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_tokens, pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths,
},
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens, reduce=True),
}
return NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes],)
def build_model(self, cfg: FairseqDataclass):
return super().build_model(cfg)
def build_generator(self, args):
return RerankerScorer(args, mt_beam=self.cfg.mt_beam)
def max_positions(self):
return self._max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def create_dummy_batch(self, device):
dummy_target = (
torch.zeros(self.cfg.mt_beam, EVAL_BLEU_ORDER * 2 + 3).long().to(device)
if not self.cfg.eval_ter
else torch.zeros(self.cfg.mt_beam, 3).long().to(device)
)
return {
"id": torch.zeros(self.cfg.mt_beam, 1).long().to(device),
"net_input": {
"src_tokens": torch.zeros(self.cfg.mt_beam, 4).long().to(device),
"src_lengths": torch.ones(self.cfg.mt_beam, 1).long().to(device),
},
"nsentences": 0,
"ntokens": 0,
"target": dummy_target,
}
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
if ignore_grad and sample is None:
sample = self.create_dummy_batch(model.device)
return super().train_step(
sample, model, criterion, optimizer, update_num, ignore_grad
)
def valid_step(self, sample, model, criterion):
if sample is None:
sample = self.create_dummy_batch(model.device)
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if not self.cfg.eval_target_metric:
return loss, sample_size, logging_output
scores = logging_output["scores"]
if self.cfg.target_metric == "bleu":
assert sample["target"].shape[1] == EVAL_BLEU_ORDER * 2 + 3, (
"target does not contain enough information ("
+ str(sample["target"].shape[1])
+ "for evaluating BLEU"
)
max_id = torch.argmax(scores, dim=1)
select_id = max_id + torch.arange(
0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam
).to(max_id.device)
bleu_data = sample["target"][select_id, 1:].sum(0).data
logging_output["_bleu_sys_len"] = bleu_data[0]
logging_output["_bleu_ref_len"] = bleu_data[1]
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu_data[2 + i]
logging_output["_bleu_totals_" + str(i)] = bleu_data[
2 + EVAL_BLEU_ORDER + i
]
elif self.cfg.target_metric == "ter":
assert sample["target"].shape[1] == 3, (
"target does not contain enough information ("
+ str(sample["target"].shape[1])
+ "for evaluating TER"
)
max_id = torch.argmax(scores, dim=1)
select_id = max_id + torch.arange(
0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam
).to(max_id.device)
ter_data = sample["target"][select_id, 1:].sum(0).data
logging_output["_ter_num_edits"] = -ter_data[0]
logging_output["_ter_ref_len"] = -ter_data[1]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if not self.cfg.eval_target_metric:
return
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
if self.cfg.target_metric == "bleu":
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth,
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
elif self.cfg.target_metric == "ter":
num_edits = sum_logs("_ter_num_edits")
ref_len = sum_logs("_ter_ref_len")
if ref_len > 0:
metrics.log_scalar("_ter_num_edits", num_edits)
metrics.log_scalar("_ter_ref_len", ref_len)
def compute_ter(meters):
score = meters["_ter_num_edits"].sum / meters["_ter_ref_len"].sum
return round(score.item(), 2)
metrics.log_derived("ter", compute_ter)
| EXA-1-master | exa/models/unilm-master/edgelm/examples/discriminative_reranking_nmt/tasks/discriminative_reranking_task.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.