python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import glob
import os
from typing import List, Optional, Tuple
import logging
import numpy as np
import torchvision.transforms.functional as TF
import PIL
from PIL import Image
from torchvision.datasets import VisionDataset
logger = logging.getLogger(__name__)
class PathDataset(VisionDataset):
def __init__(
self,
root: List[str],
loader: None = None,
transform: Optional[str] = None,
extra_transform: Optional[str] = None,
mean: Optional[List[float]] = None,
std: Optional[List[float]] = None,
):
super().__init__(root=root)
PIL.Image.MAX_IMAGE_PIXELS = 256000001
self.files = []
for folder in self.root:
self.files.extend(
sorted(glob.glob(os.path.join(folder, "**", "*.jpg"), recursive=True))
)
self.files.extend(
sorted(glob.glob(os.path.join(folder, "**", "*.png"), recursive=True))
)
self.transform = transform
self.extra_transform = extra_transform
self.mean = mean
self.std = std
self.loader = loader
logger.info(f"loaded {len(self.files)} samples from {root}")
assert (mean is None) == (std is None)
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, idx) -> Tuple[np.ndarray, np.ndarray]:
path = self.files[idx]
if self.loader is not None:
return self.loader(path), None
img = Image.open(path).convert("RGB")
if self.transform is not None:
img = self.transform(img)
img = TF.to_tensor(img)
if self.mean is not None and self.std is not None:
img = TF.normalize(img, self.mean, self.std)
return img, None
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/data/path_dataset.py |
import logging
import argparse
import random
import sys
import os
import numpy as np
import torch
import soundfile as sf
import shutil
import librosa
import json
from pathlib import Path
from tqdm import tqdm
import amfm_decompy.basic_tools as basic
import amfm_decompy.pYAAPT as pYAAPT
dir_path = os.path.dirname(__file__)
resynth_path = os.path.dirname(os.path.abspath(__file__)) + "/speech-resynthesis"
sys.path.append(resynth_path)
from models import CodeGenerator
from inference import scan_checkpoint, load_checkpoint, generate
from emotion_models.pitch_predictor import load_ckpt as load_pitch_predictor
from emotion_models.duration_predictor import load_ckpt as load_duration_predictor
from dataset import load_audio, MAX_WAV_VALUE, parse_style, parse_speaker, EMOV_SPK2ID, EMOV_STYLE2ID
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(message)s',
handlers=[logging.FileHandler('debug.log'), logging.StreamHandler()]
)
logger = logging.getLogger(__name__)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def parse_generation_file(fname):
lines = open(fname).read()
lines = lines.split('\n')
results = {}
for l in lines:
if len(l) == 0:
continue
if l[0] == 'H':
parts = l[2:].split('\t')
if len(parts) == 2:
sid, utt = parts
else:
sid, _, utt = parts
sid = int(sid)
utt = [int(x) for x in utt.split()]
if sid in results:
results[sid]['H'] = utt
else:
results[sid] = {'H': utt}
elif l[0] == 'S':
sid, utt = l[2:].split('\t')
sid = int(sid)
utt = [x for x in utt.split()]
if sid in results:
results[sid]['S'] = utt
else:
results[sid] = {'S': utt}
elif l[0] == 'T':
sid, utt = l[2:].split('\t')
sid = int(sid)
utt = [int(x) for x in utt.split()]
if sid in results:
results[sid]['T'] = utt
else:
results[sid] = {'T': utt}
for d, result in results.items():
if 'H' not in result:
result['H'] = result['S']
return results
def get_code_to_fname(manifest, tokens):
if tokens is None:
code_to_fname = {}
with open(manifest) as f:
for line in f:
line = line.strip()
fname, code = line.split()
code = code.replace(',', ' ')
code_to_fname[code] = fname
return code_to_fname
with open(manifest) as f:
fnames = [l.strip() for l in f.readlines()]
root = Path(fnames[0])
fnames = fnames[1:]
if '\t' in fnames[0]:
fnames = [x.split()[0] for x in fnames]
with open(tokens) as f:
codes = [l.strip() for l in f.readlines()]
code_to_fname = {}
for fname, code in zip(fnames, codes):
code = code.replace(',', ' ')
code_to_fname[code] = str(root / fname)
return root, code_to_fname
def code_to_str(s):
k = ' '.join([str(x) for x in s])
return k
def get_praat_f0(audio, rate=16000, interp=False):
frame_length = 20.0
to_pad = int(frame_length / 1000 * rate) // 2
f0s = []
for y in audio.astype(np.float64):
y_pad = np.pad(y.squeeze(), (to_pad, to_pad), "constant", constant_values=0)
signal = basic.SignalObj(y_pad, rate)
pitch = pYAAPT.yaapt(signal, **{'frame_length': frame_length, 'frame_space': 5.0, 'nccf_thresh1': 0.25,
'tda_frame_length': 25.0})
if interp:
f0s += [pitch.samp_interp[None, None, :]]
else:
f0s += [pitch.samp_values[None, None, :]]
f0 = np.vstack(f0s)
return f0
def generate_from_code(generator, h, code, spkr=None, f0=None, gst=None, device="cpu"):
batch = {
'code': torch.LongTensor(code).to(device).view(1, -1),
}
if spkr is not None:
batch['spkr'] = spkr.to(device).unsqueeze(0)
if f0 is not None:
batch['f0'] = f0.to(device)
if gst is not None:
batch['style'] = gst.to(device)
with torch.no_grad():
audio, rtf = generate(h, generator, batch)
audio = librosa.util.normalize(audio / 2 ** 15)
return audio
@torch.no_grad()
def synth(argv, interactive=False):
parser = argparse.ArgumentParser()
parser.add_argument('--result-path', type=Path, help='Translation Model Output', required=True)
parser.add_argument('--data', type=Path, help='a directory with the files: src.tsv, src.km, trg.tsv, trg.km, orig.tsv, orig.km')
parser.add_argument("--orig-tsv", default="/checkpoint/felixkreuk/datasets/emov/manifests/emov_16khz/data.tsv")
parser.add_argument("--orig-km", default="/checkpoint/felixkreuk/datasets/emov/manifests/emov_16khz/core_manifests/emov_16khz_km_100/data.km")
parser.add_argument('--checkpoint-file', type=Path, help='Generator Checkpoint', required=True)
parser.add_argument('--dur-model', type=Path, help='a token duration prediction model (if tokens were deduped)')
parser.add_argument('--f0-model', type=Path, help='a f0 prediction model')
parser.add_argument('-s', '--src-emotion', default=None)
parser.add_argument('-t', '--trg-emotion', default=None)
parser.add_argument('-N', type=int, default=10)
parser.add_argument('--split', default="test")
parser.add_argument('--outdir', type=Path, default=Path('results'))
parser.add_argument('--orig-filename', action='store_true')
parser.add_argument('--device', type=int, default=0)
a = parser.parse_args(argv)
seed = 52
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if os.path.isdir(a.checkpoint_file):
config_file = os.path.join(a.checkpoint_file, 'config.json')
else:
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
generator = CodeGenerator(h).to(a.device)
if os.path.isdir(a.checkpoint_file):
cp_g = scan_checkpoint(a.checkpoint_file, 'g_')
else:
cp_g = a.checkpoint_file
state_dict_g = load_checkpoint(cp_g)
generator.load_state_dict(state_dict_g['generator'])
generator.eval()
generator.remove_weight_norm()
dur_models = {
"neutral": load_duration_predictor(f"{a.dur_model}/neutral.ckpt"),
"amused": load_duration_predictor(f"{a.dur_model}/amused.ckpt"),
"disgusted": load_duration_predictor(f"{a.dur_model}/disgusted.ckpt"),
"angry": load_duration_predictor(f"{a.dur_model}/angry.ckpt"),
"sleepy": load_duration_predictor(f"{a.dur_model}/sleepy.ckpt"),
}
logger.info(f"loaded duration prediction model from {a.dur_model}")
f0_model = load_pitch_predictor(a.f0_model).to(a.device)
logger.info(f"loaded f0 prediction model from {a.f0_model}")
# we need to know how to map code back to the filename
# (if we want the original files names as output)
results = parse_generation_file(a.result_path)
_, src_code_to_fname = get_code_to_fname(f'{a.data}/files.{a.split}.{a.src_emotion}', f'{a.data}/{a.split}.{a.src_emotion}')
_, tgt_code_to_fname = get_code_to_fname(f'{a.data}/files.{a.split}.{a.trg_emotion}', f'{a.data}/{a.split}.{a.trg_emotion}')
# we need the originals (before dedup) to get the ground-truth durations
orig_tsv = open(a.orig_tsv, 'r').readlines()
orig_tsv_root, orig_tsv = orig_tsv[0].strip(), orig_tsv[1:]
orig_km = open(a.orig_km, 'r').readlines()
fname_to_idx = {orig_tsv_root + "/" + line.split("\t")[0]: i for i, line in enumerate(orig_tsv)}
outdir = a.outdir
outdir.mkdir(parents=True, exist_ok=True)
(outdir / '0-source').mkdir(exist_ok=True)
(outdir / '1-src-tokens-src-style-src-f0').mkdir(exist_ok=True)
(outdir / '2-src-tokens-trg-style-src-f0').mkdir(exist_ok=True)
(outdir / '2.5-src-tokens-trg-style-src-f0').mkdir(exist_ok=True)
(outdir / '3-src-tokens-trg-style-pred-f0').mkdir(exist_ok=True)
(outdir / '4-gen-tokens-trg-style-pred-f0').mkdir(exist_ok=True)
(outdir / '5-target').mkdir(exist_ok=True)
N = 0
results = list(results.items())
random.shuffle(results)
for i, (sid, result) in tqdm(enumerate(results)):
N += 1
if N > a.N and a.N != -1:
break
if '[' in result['S'][0]:
result['S'] = result['S'][1:]
if '_' in result['S'][-1]:
result['S'] = result['S'][:-1]
src_ref = src_code_to_fname[code_to_str(result['S'])]
trg_ref = tgt_code_to_fname[code_to_str(result['T'])]
src_style, trg_style = None, None
src_spkr, trg_spkr = None, None
src_f0 = None
src_audio = (load_audio(src_ref)[0] / MAX_WAV_VALUE) * 0.95
trg_audio = (load_audio(trg_ref)[0] / MAX_WAV_VALUE) * 0.95
src_audio = torch.FloatTensor(src_audio).unsqueeze(0).cuda()
trg_audio = torch.FloatTensor(trg_audio).unsqueeze(0).cuda()
src_spkr = parse_speaker(src_ref, h.multispkr)
src_spkr = src_spkr if src_spkr in EMOV_SPK2ID else random.choice(list(EMOV_SPK2ID.keys()))
src_spkr = EMOV_SPK2ID[src_spkr]
src_spkr = torch.LongTensor([src_spkr])
trg_spkr = parse_speaker(trg_ref, h.multispkr)
trg_spkr = trg_spkr if trg_spkr in EMOV_SPK2ID else random.choice(list(EMOV_SPK2ID.keys()))
trg_spkr = EMOV_SPK2ID[trg_spkr]
trg_spkr = torch.LongTensor([trg_spkr])
src_style = EMOV_STYLE2ID[a.src_emotion]
src_style = torch.LongTensor([src_style]).cuda()
trg_style_str = a.trg_emotion
trg_style = EMOV_STYLE2ID[a.trg_emotion]
trg_style = torch.LongTensor([trg_style]).cuda()
src_tokens = list(map(int, orig_km[fname_to_idx[src_ref]].strip().split(" ")))
src_tokens = torch.LongTensor(src_tokens).unsqueeze(0)
src_tokens_dur_pred = torch.LongTensor(list(map(int, result['S']))).unsqueeze(0)
src_tokens_dur_pred = dur_models[trg_style_str].inflate_input(src_tokens_dur_pred)
gen_tokens = torch.LongTensor(result['H']).unsqueeze(0)
gen_tokens = dur_models[trg_style_str].inflate_input(gen_tokens)
trg_tokens = torch.LongTensor(result['T']).unsqueeze(0)
trg_tokens = dur_models[trg_style_str].inflate_input(trg_tokens)
src_f0 = get_praat_f0(src_audio.unsqueeze(0).cpu().numpy())
src_f0 = torch.FloatTensor(src_f0).cuda()
pred_src_f0 = f0_model.inference(torch.LongTensor(src_tokens).to(a.device), src_spkr, trg_style).unsqueeze(0)
pred_src_dur_pred_f0 = f0_model.inference(torch.LongTensor(src_tokens_dur_pred).to(a.device), src_spkr, trg_style).unsqueeze(0)
pred_gen_f0 = f0_model.inference(torch.LongTensor(gen_tokens).to(a.device), src_spkr, trg_style).unsqueeze(0)
pred_trg_f0 = f0_model.inference(torch.LongTensor(trg_tokens).to(a.device), src_spkr, trg_style).unsqueeze(0)
if a.orig_filename:
path = src_code_to_fname[code_to_str(result['S'])]
sid = str(sid) + "__" + Path(path).stem
shutil.copy(src_code_to_fname[code_to_str(result['S'])], outdir / '0-source' / f'{sid}.wav')
audio = generate_from_code(generator, h, src_tokens, spkr=src_spkr, f0=src_f0, gst=src_style, device=a.device)
sf.write(outdir / '1-src-tokens-src-style-src-f0' / f'{sid}.wav', audio, samplerate=h.sampling_rate)
audio = generate_from_code(generator, h, src_tokens, spkr=src_spkr, f0=src_f0, gst=trg_style, device=a.device)
sf.write(outdir / '2-src-tokens-trg-style-src-f0' / f'{sid}.wav', audio, samplerate=h.sampling_rate)
audio = generate_from_code(generator, h, src_tokens_dur_pred, spkr=src_spkr, f0=src_f0, gst=trg_style, device=a.device)
sf.write(outdir / '2.5-src-tokens-trg-style-src-f0' / f'{sid}.wav', audio, samplerate=h.sampling_rate)
audio = generate_from_code(generator, h, src_tokens_dur_pred, spkr=src_spkr, f0=pred_src_dur_pred_f0, gst=trg_style, device=a.device)
sf.write(outdir / '3-src-tokens-trg-style-pred-f0' / f'{sid}.wav', audio, samplerate=h.sampling_rate)
audio = generate_from_code(generator, h, gen_tokens, spkr=src_spkr, f0=pred_gen_f0, gst=trg_style, device=a.device)
sf.write(outdir / '4-gen-tokens-trg-style-pred-f0' / f'{sid}.wav', audio, samplerate=h.sampling_rate)
shutil.copy(tgt_code_to_fname[code_to_str(result['T'])], outdir / '5-target' / f'{sid}.wav')
logger.info("Done.")
if __name__ == '__main__':
synth(sys.argv[1:])
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/synthesize.py |
from pathlib import Path
import os
import argparse
import random
import numpy as np
from sklearn.utils import shuffle
if __name__ == "__main__":
"""
this is a standalone script to process a km file
specifically, to dedup or remove tokens that repeat less
than k times in a row
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument("tsv", type=str, help="path to tsv file")
parser.add_argument("km", type=str, help="path to km file")
parser.add_argument("--destdir", required=True, type=str)
parser.add_argument("--valid-percent", type=float, default=0.05, help="percent to allocate to validation set")
parser.add_argument("--test-percent", type=float, default=0.05, help="percent to allocate to test set")
parser.add_argument("-sh", "--shuffle", action="store_true", help="path to km file")
parser.add_argument("--seed", type=int, default=42, help="")
args = parser.parse_args()
np.random.seed(args.seed)
random.seed(args.seed)
os.makedirs(args.destdir, exist_ok=True)
km = open(args.km, "r").readlines()
tsv = open(args.tsv, "r").readlines()
root, tsv = tsv[0], tsv[1:]
assert args.tsv.endswith(".tsv") and args.km.endswith(".km")
assert len(tsv) == len(km)
if args.shuffle:
tsv, km = shuffle(tsv, km)
print(f"shuffled")
N = len(tsv)
N_tt = int(N * args.test_percent)
N_cv = int(N * args.valid_percent)
N_tr = N - N_tt - N_cv
train_tsv = tsv[:N_tr]
valid_tsv = tsv[N_tr:N_tr + N_cv]
test_tsv = tsv[N_tr + N_cv:]
train_km = km[:N_tr]
valid_km = km[N_tr:N_tr + N_cv]
test_km = km[N_tr + N_cv:]
assert len(train_tsv) + len(valid_tsv) + len(test_tsv) == len(tsv)
assert len(train_tsv) == len(train_km) and len(valid_tsv) == len(valid_km) and len(test_tsv) == len(test_km)
dir = Path(args.destdir)
open(dir / f"train.tsv", "w").writelines([root] + train_tsv)
open(dir / f"valid.tsv", "w").writelines([root] + valid_tsv)
open(dir / f"test.tsv", "w").writelines([root] + test_tsv)
open(dir / f"train.km", "w").writelines(train_km)
open(dir / f"valid.km", "w").writelines(valid_km)
open(dir / f"test.km", "w").writelines(test_km)
print(f"train: {len(train_km)}")
print(f"valid: {len(valid_km)}")
print(f"test: {len(test_km)}")
print("done")
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/preprocess/split_km_tsv.py |
EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/preprocess/__init__.py |
|
from pathlib import Path
import os
import sys
import argparse
import random
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from build_translation_manifests import get_utt_id
def train_val_test_split(tsv_lines, km_lines, valid_percent, test_percent, seed=42):
utt_ids = list(sorted(set([get_utt_id(x) for x in tsv_lines])))
utt_ids, valid_utt_ids, _, _ = train_test_split(utt_ids, utt_ids, test_size=valid_percent, shuffle=True, random_state=seed)
train_utt_ids, test_utt_ids, _, _ = train_test_split(utt_ids, utt_ids, test_size=test_percent, shuffle=True, random_state=seed)
train_idx = [i for i, line in enumerate(tsv_lines) if get_utt_id(line) in train_utt_ids]
valid_idx = [i for i, line in enumerate(tsv_lines) if get_utt_id(line) in valid_utt_ids]
test_idx = [i for i, line in enumerate(tsv_lines) if get_utt_id(line) in test_utt_ids]
train_tsv, train_km = [tsv_lines[i] for i in train_idx], [km_lines[i] for i in train_idx]
valid_tsv, valid_km = [tsv_lines[i] for i in valid_idx], [km_lines[i] for i in valid_idx]
test_tsv, test_km = [tsv_lines[i] for i in test_idx], [km_lines[i] for i in test_idx]
print(f"train {len(train_km)}")
print(f"valid {len(valid_km)}")
print(f"test {len(test_km)}")
return train_tsv, train_km, valid_tsv, valid_km, test_tsv, test_km
if __name__ == "__main__":
"""
this is a standalone script to process a km file
specifically, to dedup or remove tokens that repeat less
than k times in a row
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument("tsv", type=str, help="path to tsv file")
parser.add_argument("km", type=str, help="path to km file")
parser.add_argument("--destdir", required=True, type=str)
parser.add_argument("--valid-percent", type=float, default=0.05, help="percent to allocate to validation set")
parser.add_argument("--test-percent", type=float, default=0.05, help="percent to allocate to test set")
parser.add_argument("--seed", type=int, default=42, help="")
args = parser.parse_args()
np.random.seed(args.seed)
random.seed(args.seed)
os.makedirs(args.destdir, exist_ok=True)
km = open(args.km, "r").readlines()
tsv = open(args.tsv, "r").readlines()
root, tsv = tsv[0], tsv[1:]
assert args.tsv.endswith(".tsv") and args.km.endswith(".km")
assert len(tsv) == len(km)
train_tsv, train_km, valid_tsv, valid_km, test_tsv, test_km = train_val_test_split(tsv, km, args.valid_percent, args.test_percent, args.seed)
assert len(train_tsv) + len(valid_tsv) + len(test_tsv) == len(tsv)
assert len(train_tsv) == len(train_km) and len(valid_tsv) == len(valid_km) and len(test_tsv) == len(test_km)
dir = Path(args.destdir)
open(dir / f"train.tsv", "w").writelines([root] + train_tsv)
open(dir / f"valid.tsv", "w").writelines([root] + valid_tsv)
open(dir / f"test.tsv", "w").writelines([root] + test_tsv)
open(dir / f"train.km", "w").writelines(train_km)
open(dir / f"valid.km", "w").writelines(valid_km)
open(dir / f"test.km", "w").writelines(test_km)
print("done")
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/preprocess/split_emov_km_tsv_by_uttid.py |
from glob import glob
import argparse
from collections import defaultdict, Counter
from itertools import combinations, product, groupby
from pathlib import Path
import os
from sklearn.utils import shuffle
import numpy as np
import random
from shutil import copy
from subprocess import check_call
np.random.seed(42)
random.seed(42)
def get_fname(s):
return s.split("\t")[0]
def get_emotion(s):
return get_fname(s).split("_")[0].split("/")[1].lower()
def get_utt_id(s):
return get_fname(s).split(".")[0].split("_")[-1]
def dedup(seq):
""" >> remove_repetitions("1 2 2 3 100 2 2 1")
'1 2 3 100 2 1' """
seq = seq.strip().split(" ")
result = seq[:1]
reps = []
rep_counter = 1
for k in seq[1:]:
if k != result[-1]:
result += [k]
reps += [rep_counter]
rep_counter = 1
else:
rep_counter += 1
reps += [rep_counter]
assert len(reps) == len(result) and sum(reps) == len(seq)
return " ".join(result) + "\n" #, reps
def remove_under_k(seq, k):
""" remove tokens that repeat less then k times in a row
>> remove_under_k("a a a a b c c c", 1) ==> a a a a c c c """
seq = seq.strip().split(" ")
result = []
freqs = [(k,len(list(g))) for k, g in groupby(seq)]
for c, f in freqs:
if f > k:
result += [c for _ in range(f)]
return " ".join(result) + "\n" #, reps
def call(cmd):
print(cmd)
check_call(cmd, shell=True)
def denoising_preprocess(path, lang, dict):
bin = 'fairseq-preprocess'
cmd = [
bin,
f'--trainpref {path}/train.{lang} --validpref {path}/valid.{lang} --testpref {path}/test.{lang}',
f'--destdir {path}/tokenized/{lang}',
'--only-source',
'--task multilingual_denoising',
'--workers 40',
]
if dict != "":
cmd += [f'--srcdict {dict}']
cmd = " ".join(cmd)
call(cmd)
def translation_preprocess(path, src_lang, trg_lang, dict, only_train=False):
bin = 'fairseq-preprocess'
cmd = [
bin,
f'--source-lang {src_lang} --target-lang {trg_lang}',
f'--trainpref {path}/train',
f'--destdir {path}/tokenized',
'--workers 40',
]
if not only_train:
cmd += [f'--validpref {path}/valid --testpref {path}/test']
if dict != "":
cmd += [
f'--srcdict {dict}',
f'--tgtdict {dict}',
]
cmd = " ".join(cmd)
call(cmd)
def load_tsv_km(tsv_path, km_path):
assert tsv_path.exists() and km_path.exists()
tsv_lines = open(tsv_path, "r").readlines()
root, tsv_lines = tsv_lines[0], tsv_lines[1:]
km_lines = open(km_path, "r").readlines()
assert len(tsv_lines) == len(km_lines), ".tsv and .km should be the same length!"
return root, tsv_lines, km_lines
def main():
desc = """
this script takes as input .tsv and .km files for EMOV dataset, and a pairs of emotions.
it generates parallel .tsv and .km files for these emotions. for exmaple:
❯ python build_emov_translation_manifests.py \
/checkpoint/felixkreuk/datasets/emov/manifests/emov_16khz/train.tsv \
/checkpoint/felixkreuk/datasets/emov/manifests/emov_16khz/emov_16khz_km_100/train.km \
~/tmp/emov_pairs \
--src-emotion amused --trg-emotion neutral \
--dedup --shuffle --cross-speaker --dry-run
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("data", type=Path, help="path to a dir containing .tsv and .km files containing emov dataset")
parser.add_argument("output_path", type=Path, help="output directory with the manifests will be created")
parser.add_argument("-cs", "--cross-speaker", action='store_true', help="if set then translation will occur also between speakers, meaning the same sentence can be translated between different speakers (default: false)")
parser.add_argument("-dd", "--dedup", action='store_true', help="remove repeated tokens (example: 'aaabc=>abc')")
parser.add_argument("-sh", "--shuffle", action='store_true', help="shuffle the data")
parser.add_argument("-ae", "--autoencode", action='store_true', help="include training pairs from the same emotion (this includes examples of the same sentence uttered by different people and examples where the src and trg are the exact same seq)")
parser.add_argument("-dr", "--dry-run", action='store_true', help="don't write anything to disk")
parser.add_argument("-zs", "--zero-shot", action='store_true', help="if true, the denoising task will train on the same splits as the translation task (split by utterance id). if false, the denoising task will train on randomly sampled splits (not split by utterance id)")
parser.add_argument("--km-ext", default="km", help="")
parser.add_argument("--dict", default="/checkpoint/felixkreuk/datasets/emov/manifests/emov_16khz/fairseq.dict.txt", help="")
args = parser.parse_args()
SPEAKERS = ["bea", "jenie", "josh", "sam", "SAME"]
EMOTIONS = ['neutral', 'amused', 'angry', 'disgusted', 'sleepy']
suffix = ""
if args.cross_speaker: suffix += "_cross-speaker"
if args.dedup: suffix += "_dedup"
translation_suffix = ""
if args.autoencode: translation_suffix += "_autoencode"
denoising_suffix = ""
denoising_suffix += "_zeroshot" if args.zero_shot else "_nonzeroshot"
translation_dir = Path(args.output_path) / ("emov_multilingual_translation" + suffix + translation_suffix)
os.makedirs(translation_dir, exist_ok=True)
denoising_dir = Path(args.output_path) / ("emov_multilingual_denoising" + suffix + denoising_suffix)
os.makedirs(denoising_dir, exist_ok=True)
denoising_data = [p.name for p in (args.data / "denoising").glob("*") if "emov" not in p.name]
for split in ["train", "valid", "test"]:
root, tsv_lines, km_lines = load_tsv_km(
tsv_path = args.data / "denoising" / "emov" / f"{split}.tsv",
km_path = args.data / "denoising" / "emov" / f"{split}.{args.km_ext}"
)
# generate data for the multilingual denoising task
for EMOTION in EMOTIONS:
print("---")
print(split)
print(f"denoising: {EMOTION}")
emotion_tsv, emotion_km = [], []
for tsv_line, km_line in zip(tsv_lines, km_lines):
if EMOTION.lower() in tsv_line.lower():
km_line = km_line if not args.dedup else dedup(km_line)
emotion_tsv.append(tsv_line)
emotion_km.append(km_line)
print(f"{len(emotion_km)} samples")
open(denoising_dir / f"files.{split}.{EMOTION}", "w").writelines([root] + emotion_tsv)
open(denoising_dir / f"{split}.{EMOTION}", "w").writelines(emotion_km)
for data in denoising_data:
with open(args.data / "denoising" / data / f"{split}.{args.km_ext}", "r") as f1:
with open(denoising_dir / f"{split}.{data}", "w") as f2:
f2.writelines([l if not args.dedup else dedup(l) for l in f1.readlines()])
# start of translation preprocessing
root, tsv_lines, km_lines = load_tsv_km(
tsv_path = args.data / "translation" / f"{split}.tsv",
km_path = args.data / "translation" / f"{split}.{args.km_ext}"
)
# generate data for the multilingual translation task
for SRC_EMOTION in EMOTIONS:
TRG_EMOTIONS = EMOTIONS if args.autoencode else set(EMOTIONS) - set([SRC_EMOTION])
for TRG_EMOTION in TRG_EMOTIONS:
# when translating back to the same emotion - we dont want these emotion
# pairs to be part of the validation/test sets (because its not really emotion conversino)
# if SRC_EMOTION == TRG_EMOTION and split in ["valid", "test"]: continue
print("---")
print(split)
print(f"src emotions: {SRC_EMOTION}\ntrg emotions: {TRG_EMOTION}")
# create a dictionary with the following structure:
# output[SPEAKER][UTT_ID] = list with indexes of line from the tsv file
# that match the speaker and utterance id. for exmaple:
# output = {'sam': {'0493': [875, 1608, 1822], ...}, ...}
# meaning, for speaker 'sam', utterance id '0493', the indexes in tsv_lines
# are 875, 1608, 1822
spkr2utts = defaultdict(lambda: defaultdict(list))
for i, tsv_line in enumerate(tsv_lines):
speaker = tsv_line.split("/")[0]
if args.cross_speaker: speaker = "SAME"
assert speaker in SPEAKERS, "unknown speaker! make sure the .tsv contains EMOV data"
utt_id = get_utt_id(tsv_line)
spkr2utts[speaker][utt_id].append(i)
# create a tsv and km files with all the combinations for translation
src_tsv, trg_tsv, src_km, trg_km = [], [], [], []
for speaker, utt_ids in spkr2utts.items():
for utt_id, indices in utt_ids.items():
# generate all pairs
pairs = [(x,y) for x in indices for y in indices]
# self-translation
if SRC_EMOTION == TRG_EMOTION:
pairs = [(x,y) for (x,y) in pairs if x == y]
# filter according to src and trg emotions
pairs = [(x,y) for (x,y) in pairs
if get_emotion(tsv_lines[x]) == SRC_EMOTION and get_emotion(tsv_lines[y]) == TRG_EMOTION]
for idx1, idx2 in pairs:
assert get_utt_id(tsv_lines[idx1]) == get_utt_id(tsv_lines[idx2])
src_tsv.append(tsv_lines[idx1])
trg_tsv.append(tsv_lines[idx2])
km_line_idx1 = km_lines[idx1]
km_line_idx2 = km_lines[idx2]
km_line_idx1 = km_line_idx1 if not args.dedup else dedup(km_line_idx1)
km_line_idx2 = km_line_idx2 if not args.dedup else dedup(km_line_idx2)
src_km.append(km_line_idx1)
trg_km.append(km_line_idx2)
assert len(src_tsv) == len(trg_tsv) == len(src_km) == len(trg_km)
print(f"{len(src_tsv)} pairs")
if len(src_tsv) == 0:
raise Exception("ERROR: generated 0 pairs!")
if args.dry_run: continue
# create files
os.makedirs(translation_dir / f"{SRC_EMOTION}-{TRG_EMOTION}", exist_ok=True)
open(translation_dir / f"{SRC_EMOTION}-{TRG_EMOTION}" / f"files.{split}.{SRC_EMOTION}", "w").writelines([root] + src_tsv)
open(translation_dir / f"{SRC_EMOTION}-{TRG_EMOTION}" / f"files.{split}.{TRG_EMOTION}", "w").writelines([root] + trg_tsv)
open(translation_dir / f"{SRC_EMOTION}-{TRG_EMOTION}" / f"{split}.{SRC_EMOTION}", "w").writelines(src_km)
open(translation_dir / f"{SRC_EMOTION}-{TRG_EMOTION}" / f"{split}.{TRG_EMOTION}", "w").writelines(trg_km)
# fairseq-preprocess the denoising data
for EMOTION in EMOTIONS + denoising_data:
denoising_preprocess(denoising_dir, EMOTION, args.dict)
os.system(f"cp {args.dict} {denoising_dir}/tokenized/dict.txt")
# fairseq-preprocess the translation data
os.makedirs(translation_dir / "tokenized", exist_ok=True)
for SRC_EMOTION in EMOTIONS:
TRG_EMOTIONS = EMOTIONS if args.autoencode else set(EMOTIONS) - set([SRC_EMOTION])
for TRG_EMOTION in TRG_EMOTIONS:
translation_preprocess(translation_dir / f"{SRC_EMOTION}-{TRG_EMOTION}", SRC_EMOTION, TRG_EMOTION, args.dict)#, only_train=SRC_EMOTION==TRG_EMOTION)
os.system(f"cp -rf {translation_dir}/**/tokenized/* {translation_dir}/tokenized")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/preprocess/build_translation_manifests.py |
from pathlib import Path
import os
import sys
import subprocess
import argparse
from datetime import datetime
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(message)s',
handlers=[logging.FileHandler('debug.log'), logging.StreamHandler()]
)
logger = logging.getLogger(__name__)
def verify_dict_size(km, dict):
logger.info(f"verifying: {km}")
dict_size = len(open(dict, "r").readlines())
km_vocab = set(open(km, "r").read().replace("\n", " ").split(" "))
if "" in km_vocab: km_vocab.remove("")
km_vocab_size = len(km_vocab)
return dict_size == km_vocab_size
def verify_files_exist(l):
for f in l:
if not f.exists():
logging.error(f"{f} doesn't exist!")
return False
return True
def run_cmd(cmd, print_output=True):
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
if print_output:
logger.info(f"command output:\n{out}")
return out
except subprocess.CalledProcessError as grepexc:
logger.info(f"error executing command!:\n{cmd}")
logger.info(grepexc.output)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--tsv", default="/checkpoint/felixkreuk/datasets/emov/manifests/emov_16khz/data.tsv", type=Path)
parser.add_argument("--emov-km", required=True, type=Path)
parser.add_argument("--km", nargs='+', required=True, type=Path)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--dict", default="/checkpoint/felixkreuk/datasets/emov/manifests/emov_16khz/fairseq.dict.txt")
parser.add_argument("--manifests-dir", type=Path, default="/checkpoint/felixkreuk/datasets/emov/manifests/emov_16khz")
args = parser.parse_args()
manifests_dir = args.manifests_dir
date = datetime.now().strftime('%d%m%y')
outdir = manifests_dir / f"{date}"
# verify input and create folders
all_kms = args.km + [args.emov_km]
assert verify_files_exist(all_kms), "make sure the km dir contains: train-clean-all.km, blizzard2013.km, data.km"
for codes in all_kms:
assert verify_dict_size(codes, args.dict), "dict argument doesn't match the vocabulary of the km file!"
assert not outdir.exists(), "data dir already exists!"
outdir.mkdir(parents=True, exist_ok=True)
logger.info("generating denoising split (emov)")
run_cmd(f"python preprocess/split_km_tsv.py {args.tsv} {args.emov_km} --destdir {outdir}/denoising/emov -sh --seed {args.seed}")
for codes in args.km:
codes_name = os.path.basename(codes)
run_cmd(f"python preprocess/split_km.py {codes} --destdir {outdir}/denoising/{codes_name} -sh --seed {args.seed}")
logger.info("generating translation split")
run_cmd(f"python preprocess/split_emov_km_tsv_by_uttid.py {args.tsv} {args.emov_km} --destdir {outdir}/translation --seed {args.seed}")
emov_code_name = os.path.basename(args.emov_km)
logger.info("generating hifigan split")
run_cmd(
f"mkdir -p {outdir}/hifigan &&"
f"python preprocess/build_hifigan_manifest.py --km_type hubert --tsv {outdir}/denoising/emov/train.tsv --km {outdir}/denoising/emov/train.km > {outdir}/hifigan/train.txt &&"
f"python preprocess/build_hifigan_manifest.py --km_type hubert --tsv {outdir}/denoising/emov/valid.tsv --km {outdir}/denoising/emov/valid.km > {outdir}/hifigan/valid.txt &&"
f"python preprocess/build_hifigan_manifest.py --km_type hubert --tsv {outdir}/denoising/emov/test.tsv --km {outdir}/denoising/emov/test.km > {outdir}/hifigan/test.txt"
)
logger.info("generating fairseq manifests")
run_cmd(f"python preprocess/build_translation_manifests.py {outdir} {outdir}/fairseq-data -dd -cs --dict {args.dict}")
logger.info(f"finished processing data at:\n{outdir}")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/preprocess/create_core_manifest.py |
import sys
import argparse
from tqdm import tqdm
from build_emov_translation_manifests import dedup, remove_under_k
if __name__ == "__main__":
"""
this is a standalone script to process a km file
specifically, to dedup or remove tokens that repeat less
than k times in a row
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument("km", type=str, help="path to km file")
parser.add_argument("--dedup", action='store_true')
parser.add_argument("--remove-under-k", type=int, default=0)
parser.add_argument("--output", default=None)
args = parser.parse_args()
if not args.dedup and args.remove_under_k == 0:
print("nothing to do! quitting...")
sys.exit(0)
km = open(args.km, "r").readlines()
out = []
for line in tqdm(km):
if args.remove_under_k > 0:
line = remove_under_k(line, args.remove_under_k)
if args.dedup:
line = dedup(line)
out.append(line)
path = args.km if args.output is None else args.output
if args.remove_under_k > 0:
path = path.replace(".km", f"-k{args.remove_under_k}.km")
if args.dedup:
path = path.replace(".km", f"-deduped.km")
open(path, "w").writelines(out)
print(f"written to {path}")
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/preprocess/process_km.py |
from pathlib import Path
import os
import argparse
import random
import numpy as np
from sklearn.utils import shuffle
if __name__ == "__main__":
"""
this is a standalone script to process a km file
specifically, to dedup or remove tokens that repeat less
than k times in a row
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument("km", type=str, help="path to km file")
parser.add_argument("--destdir", required=True, type=str)
parser.add_argument("--valid-percent", type=float, default=0.05, help="percent to allocate to validation set")
parser.add_argument("--test-percent", type=float, default=0.05, help="percent to allocate to test set")
parser.add_argument("-sh", "--shuffle", action="store_true", help="path to km file")
parser.add_argument("--seed", type=int, default=42, help="")
args = parser.parse_args()
np.random.seed(args.seed)
random.seed(args.seed)
os.makedirs(args.destdir, exist_ok=True)
km = open(args.km, "r").readlines()
if args.shuffle:
km = shuffle(km)
print(f"shuffled")
N = len(km)
N_tt = int(N * args.test_percent)
N_cv = int(N * args.valid_percent)
N_tr = N - N_tt - N_cv
train_km = km[:N_tr]
valid_km = km[N_tr:N_tr + N_cv]
test_km = km[N_tr + N_cv:]
dir = Path(args.destdir)
open(dir / f"train.km", "w").writelines(train_km)
open(dir / f"valid.km", "w").writelines(valid_km)
open(dir / f"test.km", "w").writelines(test_km)
print(f"train: {len(train_km)}")
print(f"valid: {len(valid_km)}")
print(f"test: {len(test_km)}")
print("done")
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/preprocess/split_km.py |
import argparse
from tqdm import tqdm
from multiprocessing import Manager, Pool
from scipy.io.wavfile import read
from librosa.util import normalize
import numpy as np
import amfm_decompy.pYAAPT as pYAAPT
import amfm_decompy.basic_tools as basic
MAX_WAV_VALUE = 32768.0
parser = argparse.ArgumentParser(description="")
parser.add_argument("tsv", help="")
parser.add_argument("--extractor", choices=["crepe", "pyaapt"], default="pyaapt", help="")
parser.add_argument("--interp", action="store_true", help="")
parser.add_argument("--n_workers", type=int, default=40, help="")
args = parser.parse_args()
tsv_lines = open(args.tsv, "r").readlines()
root, tsv_lines = tsv_lines[0].strip(), tsv_lines[1:]
def extract_f0(tsv_line):
wav_path, _ = tsv_line.split("\t")
wav_path = root.strip() + "/" + wav_path
sr, wav = read(wav_path)
wav = wav / MAX_WAV_VALUE
wav = normalize(wav) * 0.95
if args.extractor == "pyaapt":
frame_length = 20.0
pad = int(frame_length / 1000 * sr) // 2
wav = np.pad(wav.squeeze(), (pad, pad), "constant", constant_values=0)
signal = basic.SignalObj(wav, sr)
pitch = pYAAPT.yaapt(
signal,
**{
'frame_length': frame_length,
'frame_space': 5.0,
'nccf_thresh1': 0.25,
'tda_frame_length': 25.0
})
pitch = pitch.samp_interp[None, None, :] if args.interp else pitch.samp_values[None, None, :]
pitch = pitch[0, 0]
f0_path = wav_path.replace(".wav", ".yaapt")
f0_path += ".interp.f0" if args.interp else ".f0"
np.save(f0_path, pitch)
def main():
with Pool(args.n_workers) as p:
r = list(tqdm(p.imap(extract_f0, tsv_lines), total=len(tsv_lines)))
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/preprocess/extract_f0.py |
import torchaudio
import argparse
import json
def main():
parser = argparse.ArgumentParser(description="example: python create_hifigan_manifest.py --tsv /checkpoint/felixkreuk/datasets/vctk/splits/vctk_16khz/train.tsv --km /checkpoint/felixkreuk/experiments/hubert/hubert_feats/vctk_16khz_km_100/train.km --km_type hubert_100km > ~/tmp/tmp_mani.txt")
parser.add_argument("--tsv", required=True, help="path to fairseq tsv file")
parser.add_argument("--km", required=True, help="path to a km file generated by HuBERT clustering")
parser.add_argument("--km_type", required=True, help="name of the codes in the output json (for example: 'cpc_100km')")
args = parser.parse_args()
km_lines = open(args.km, "r").readlines()
tsv_lines = open(args.tsv, "r").readlines()
assert len(km_lines) == len(tsv_lines) - 1, "tsv and km files are not of the same length!"
wav_root = tsv_lines[0].strip()
tsv_lines = tsv_lines[1:]
for tsv_line, km_line in zip(tsv_lines, km_lines):
tsv_line, km_line = tsv_line.strip(), km_line.strip()
wav_basename, wav_num_frames = tsv_line.split("\t")
wav_path = wav_root + "/" + wav_basename
wav_info = torchaudio.info(wav_path)
assert int(wav_num_frames) == wav_info.num_frames, "tsv duration and actual duration don't match!"
wav_duration = wav_info.num_frames / wav_info.sample_rate
manifest_line = {"audio": wav_path, "duration": wav_duration, args.km_type: km_line}
print(json.dumps(manifest_line))
if __name__ == "__main__":
"""
usage:
python create_hifigan_manifest.py \
--tsv /checkpoint/felixkreuk/datasets/vctk/manifests/vctk_16khz/valid.tsv \
--km /checkpoint/felixkreuk/datasets/vctk/manifests/vctk_16khz/hubert_km_100/valid.km \
--km_type hubert \
> /checkpoint/felixkreuk/datasets/vctk/manifests/vctk_16khz/hubert_km_100/hifigan_valid_manifest.txt
"""
main()
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/preprocess/build_hifigan_manifest.py |
import logging
import os
import hydra
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.layers.torch import Rearrange
from torch.utils.data import DataLoader, Dataset
from .utils import Accuracy
logger = logging.getLogger(__name__)
def save_ckpt(model, path, model_class):
ckpt = {
"state_dict": model.state_dict(),
"padding_token": model.padding_token,
"model_class": model_class,
}
torch.save(ckpt, path)
def load_ckpt(path):
ckpt = torch.load(path)
ckpt["model_class"]["_target_"] = "emotion_models.duration_predictor.CnnPredictor"
model = hydra.utils.instantiate(ckpt["model_class"])
model.load_state_dict(ckpt["state_dict"])
model.padding_token = ckpt["padding_token"]
model = model.cpu()
model.eval()
return model
class Collator:
def __init__(self, padding_idx):
self.padding_idx = padding_idx
def __call__(self, batch):
x = [item[0] for item in batch]
lengths = [len(item) for item in x]
x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True, padding_value=self.padding_idx)
y = [item[1] for item in batch]
y = torch.nn.utils.rnn.pad_sequence(y, batch_first=True, padding_value=self.padding_idx)
mask = (x != self.padding_idx)
return x, y, mask, lengths
class Predictor(nn.Module):
def __init__(self, n_tokens, emb_dim):
super(Predictor, self).__init__()
self.n_tokens = n_tokens
self.emb_dim = emb_dim
self.padding_token = n_tokens
# add 1 extra embedding for padding token, set the padding index to be the last token
# (tokens from the clustering start at index 0)
self.emb = nn.Embedding(n_tokens + 1, emb_dim, padding_idx=self.padding_token)
def inflate_input(self, batch):
""" get a sequence of tokens, predict their durations
and inflate them accordingly """
batch_durs = self.forward(batch)
batch_durs = torch.exp(batch_durs) - 1
batch_durs = batch_durs.round()
output = []
for seq, durs in zip(batch, batch_durs):
inflated_seq = []
for token, n in zip(seq, durs):
if token == self.padding_token:
break
n = int(n.item())
token = int(token.item())
inflated_seq.extend([token for _ in range(n)])
output.append(inflated_seq)
output = torch.LongTensor(output)
return output
class CnnPredictor(Predictor):
def __init__(self, n_tokens, emb_dim, channels, kernel, output_dim, dropout, n_layers):
super(CnnPredictor, self).__init__(n_tokens=n_tokens, emb_dim=emb_dim)
layers = [
Rearrange("b t c -> b c t"),
nn.Conv1d(emb_dim, channels, kernel_size=kernel, padding=(kernel - 1) // 2),
Rearrange("b c t -> b t c"),
nn.ReLU(),
nn.LayerNorm(channels),
nn.Dropout(dropout),
]
for _ in range(n_layers-1):
layers += [
Rearrange("b t c -> b c t"),
nn.Conv1d(channels, channels, kernel_size=kernel, padding=(kernel - 1) // 2),
Rearrange("b c t -> b t c"),
nn.ReLU(),
nn.LayerNorm(channels),
nn.Dropout(dropout),
]
self.conv_layer = nn.Sequential(*layers)
self.proj = nn.Linear(channels, output_dim)
def forward(self, x):
x = self.emb(x)
x = self.conv_layer(x)
x = self.proj(x)
x = x.squeeze(-1)
return x
def l2_log_loss(input, target):
return F.mse_loss(
input=input.float(),
target=torch.log(target.float() + 1),
reduce=False
)
class DurationDataset(Dataset):
def __init__(self, tsv_path, km_path, substring=""):
lines = open(tsv_path, "r").readlines()
self.root, self.tsv = lines[0], lines[1:]
self.km = open(km_path, "r").readlines()
logger.info(f"loaded {len(self.km)} files")
if substring != "":
tsv, km = [], []
for tsv_line, km_line in zip(self.tsv, self.km):
if substring.lower() in tsv_line.lower():
tsv.append(tsv_line)
km.append(km_line)
self.tsv, self.km = tsv, km
logger.info(f"after filtering: {len(self.km)} files")
def __len__(self):
return len(self.km)
def __getitem__(self, i):
x = self.km[i]
x = x.split(" ")
x = list(map(int, x))
y = []
xd = []
count = 1
for x1, x2 in zip(x[:-1], x[1:]):
if x1 == x2:
count += 1
continue
else:
y.append(count)
xd.append(x1)
count = 1
xd = torch.LongTensor(xd)
y = torch.LongTensor(y)
return xd, y
def train(cfg):
device = "cuda:0"
model = hydra.utils.instantiate(cfg[cfg.model]).to(device)
optimizer = hydra.utils.instantiate(cfg.optimizer, model.parameters())
# add 1 extra embedding for padding token, set the padding index to be the last token
# (tokens from the clustering start at index 0)
collate_fn = Collator(padding_idx=model.padding_token)
logger.info(f"data: {cfg.train_tsv}")
train_ds = DurationDataset(cfg.train_tsv, cfg.train_km, substring=cfg.substring)
valid_ds = DurationDataset(cfg.valid_tsv, cfg.valid_km, substring=cfg.substring)
train_dl = DataLoader(train_ds, batch_size=32, shuffle=True, collate_fn=collate_fn)
valid_dl = DataLoader(valid_ds, batch_size=32, shuffle=False, collate_fn=collate_fn)
best_loss = float("inf")
for epoch in range(cfg.epochs):
train_loss, train_loss_scaled = train_epoch(model, train_dl, l2_log_loss, optimizer, device)
valid_loss, valid_loss_scaled, *acc = valid_epoch(model, valid_dl, l2_log_loss, device)
acc0, acc1, acc2, acc3 = acc
if valid_loss_scaled < best_loss:
path = f"{os.getcwd()}/{cfg.substring}.ckpt"
save_ckpt(model, path, cfg[cfg.model])
best_loss = valid_loss_scaled
logger.info(f"saved checkpoint: {path}")
logger.info(f"[epoch {epoch}] train loss: {train_loss:.3f}, train scaled: {train_loss_scaled:.3f}")
logger.info(f"[epoch {epoch}] valid loss: {valid_loss:.3f}, valid scaled: {valid_loss_scaled:.3f}")
logger.info(f"acc: {acc0,acc1,acc2,acc3}")
def train_epoch(model, loader, criterion, optimizer, device):
model.train()
epoch_loss = 0
epoch_loss_scaled = 0
for x, y, mask, _ in loader:
x, y, mask = x.to(device), y.to(device), mask.to(device)
yhat = model(x)
loss = criterion(yhat, y) * mask
loss = torch.mean(loss)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
epoch_loss += loss.item()
# get normal scale loss
yhat_scaled = torch.exp(yhat) - 1
yhat_scaled = torch.round(yhat_scaled)
scaled_loss = torch.mean(torch.abs(yhat_scaled - y) * mask)
epoch_loss_scaled += scaled_loss.item()
return epoch_loss / len(loader), epoch_loss_scaled / len(loader)
def valid_epoch(model, loader, criterion, device):
model.eval()
epoch_loss = 0
epoch_loss_scaled = 0
acc = Accuracy()
for x, y, mask, _ in loader:
x, y, mask = x.to(device), y.to(device), mask.to(device)
yhat = model(x)
loss = criterion(yhat, y) * mask
loss = torch.mean(loss)
epoch_loss += loss.item()
# get normal scale loss
yhat_scaled = torch.exp(yhat) - 1
yhat_scaled = torch.round(yhat_scaled)
scaled_loss = torch.sum(torch.abs(yhat_scaled - y) * mask) / mask.sum()
acc.update(yhat_scaled[mask].view(-1).float(), y[mask].view(-1).float())
epoch_loss_scaled += scaled_loss.item()
logger.info(f"example y: {y[0, :10].tolist()}")
logger.info(f"example yhat: {yhat_scaled[0, :10].tolist()}")
acc0 = acc.acc(tol=0)
acc1 = acc.acc(tol=1)
acc2 = acc.acc(tol=2)
acc3 = acc.acc(tol=3)
logger.info(f"accs: {acc0,acc1,acc2,acc3}")
return epoch_loss / len(loader), epoch_loss_scaled / len(loader), acc0, acc1, acc2, acc3
@hydra.main(config_path=".", config_name="duration_predictor.yaml")
def main(cfg):
logger.info(f"{cfg}")
train(cfg)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/emotion_models/duration_predictor.py |
EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/emotion_models/__init__.py |
|
import torch
class Stat:
def __init__(self, keep_raw=False):
self.x = 0.0
self.x2 = 0.0
self.z = 0.0 # z = logx
self.z2 = 0.0
self.n = 0.0
self.u = 0.0
self.keep_raw = keep_raw
self.raw = []
def update(self, new_x):
new_z = new_x.log()
self.x += new_x.sum()
self.x2 += (new_x**2).sum()
self.z += new_z.sum()
self.z2 += (new_z**2).sum()
self.n += len(new_x)
self.u += 1
if self.keep_raw:
self.raw.append(new_x)
@property
def mean(self):
return self.x / self.n
@property
def std(self):
return (self.x2 / self.n - self.mean**2) ** 0.5
@property
def mean_log(self):
return self.z / self.n
@property
def std_log(self):
return (self.z2 / self.n - self.mean_log**2) ** 0.5
@property
def n_frms(self):
return self.n
@property
def n_utts(self):
return self.u
@property
def raw_data(self):
assert self.keep_raw, "does not support storing raw data!"
return torch.cat(self.raw)
class F0Stat(Stat):
def update(self, new_x):
# assume unvoiced frames are 0 and consider only voiced frames
if new_x is not None:
super().update(new_x[new_x != 0])
class Accuracy:
def __init__(self):
self.y, self.yhat = [], []
def update(self, yhat, y):
self.yhat.append(yhat)
self.y.append(y)
def acc(self, tol):
yhat = torch.cat(self.yhat)
y = torch.cat(self.y)
acc = torch.abs(yhat - y) <= tol
acc = acc.float().mean().item()
return acc
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/emotion_models/utils.py |
import logging
import os
import random
import sys
from collections import defaultdict
import hydra
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from einops.layers.torch import Rearrange
from scipy.io.wavfile import read
from scipy.ndimage import gaussian_filter1d
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
dir_path = os.path.dirname(__file__)
resynth_path = os.path.dirname(dir_path) + "/speech-resynthesis"
sys.path.append(resynth_path)
from dataset import parse_speaker, parse_style
from .utils import F0Stat
MAX_WAV_VALUE = 32768.0
logger = logging.getLogger(__name__)
def quantize_f0(speaker_to_f0, nbins, normalize, log):
f0_all = []
for speaker, f0 in speaker_to_f0.items():
f0 = f0.raw_data
if log:
f0 = f0.log()
mean = speaker_to_f0[speaker].mean_log if log else speaker_to_f0[speaker].mean
std = speaker_to_f0[speaker].std_log if log else speaker_to_f0[speaker].std
if normalize == "mean":
f0 = f0 - mean
elif normalize == "meanstd":
f0 = (f0 - mean) / std
f0_all.extend(f0.tolist())
hist, bin_x = np.histogram(f0_all, 100000)
cum_hist = np.cumsum(hist) / len(f0_all) * 100
bin_offset = []
bin_size = 100 / nbins
threshold = bin_size
for i in range(nbins - 1):
index = (np.abs(cum_hist - threshold)).argmin()
bin_offset.append(bin_x[index])
threshold += bin_size
bins = np.array(bin_offset)
bins = torch.FloatTensor(bins)
return bins
def save_ckpt(model, path, model_class, f0_min, f0_max, f0_bins, speaker_stats):
ckpt = {
"state_dict": model.state_dict(),
"padding_token": model.padding_token,
"model_class": model_class,
"speaker_stats": speaker_stats,
"f0_min": f0_min,
"f0_max": f0_max,
"f0_bins": f0_bins,
}
torch.save(ckpt, path)
def load_ckpt(path):
ckpt = torch.load(path)
ckpt["model_class"]["_target_"] = "emotion_models.pitch_predictor.CnnPredictor"
model = hydra.utils.instantiate(ckpt["model_class"])
model.load_state_dict(ckpt["state_dict"])
model.setup_f0_stats(
ckpt["f0_min"],
ckpt["f0_max"],
ckpt["f0_bins"],
ckpt["speaker_stats"],
)
return model
def freq2bin(f0, f0_min, f0_max, bins):
f0 = f0.clone()
f0[f0 < f0_min] = f0_min
f0[f0 > f0_max] = f0_max
f0 = torch.bucketize(f0, bins)
return f0
def bin2freq(x, f0_min, f0_max, bins, mode):
n_bins = len(bins) + 1
assert x.shape[-1] == n_bins
bins = torch.cat([torch.tensor([f0_min]), bins]).to(x.device)
if mode == "mean":
f0 = (x * bins).sum(-1, keepdims=True) / x.sum(-1, keepdims=True)
elif mode == "argmax":
idx = F.one_hot(x.argmax(-1), num_classes=n_bins)
f0 = (idx * bins).sum(-1, keepdims=True)
else:
raise NotImplementedError()
return f0[..., 0]
def load_wav(full_path):
sampling_rate, data = read(full_path)
return data, sampling_rate
def l1_loss(input, target):
return F.l1_loss(input=input.float(), target=target.float(), reduce=False)
def l2_loss(input, target):
return F.mse_loss(input=input.float(), target=target.float(), reduce=False)
class Collator:
def __init__(self, padding_idx):
self.padding_idx = padding_idx
def __call__(self, batch):
tokens = [item[0] for item in batch]
lengths = [len(item) for item in tokens]
tokens = torch.nn.utils.rnn.pad_sequence(
tokens, batch_first=True, padding_value=self.padding_idx
)
f0 = [item[1] for item in batch]
f0 = torch.nn.utils.rnn.pad_sequence(
f0, batch_first=True, padding_value=self.padding_idx
)
f0_raw = [item[2] for item in batch]
f0_raw = torch.nn.utils.rnn.pad_sequence(
f0_raw, batch_first=True, padding_value=self.padding_idx
)
spk = [item[3] for item in batch]
spk = torch.LongTensor(spk)
gst = [item[4] for item in batch]
gst = torch.LongTensor(gst)
mask = tokens != self.padding_idx
return tokens, f0, f0_raw, spk, gst, mask, lengths
class CnnPredictor(nn.Module):
def __init__(
self,
n_tokens,
emb_dim,
channels,
kernel,
dropout,
n_layers,
spk_emb,
gst_emb,
n_bins,
f0_pred,
f0_log,
f0_norm,
):
super(CnnPredictor, self).__init__()
self.n_tokens = n_tokens
self.emb_dim = emb_dim
self.f0_log = f0_log
self.f0_pred = f0_pred
self.padding_token = n_tokens
self.f0_norm = f0_norm
# add 1 extra embedding for padding token, set the padding index to be the last token
# (tokens from the clustering start at index 0)
self.token_emb = nn.Embedding(
n_tokens + 1, emb_dim, padding_idx=self.padding_token
)
self.spk_emb = spk_emb
self.gst_emb = nn.Embedding(20, gst_emb)
self.setup = False
feats = emb_dim + gst_emb
# feats = emb_dim + gst_emb + (256 if spk_emb else 0)
layers = [
nn.Sequential(
Rearrange("b t c -> b c t"),
nn.Conv1d(
feats, channels, kernel_size=kernel, padding=(kernel - 1) // 2
),
Rearrange("b c t -> b t c"),
nn.ReLU(),
nn.LayerNorm(channels),
nn.Dropout(dropout),
)
]
for _ in range(n_layers - 1):
layers += [
nn.Sequential(
Rearrange("b t c -> b c t"),
nn.Conv1d(
channels,
channels,
kernel_size=kernel,
padding=(kernel - 1) // 2,
),
Rearrange("b c t -> b t c"),
nn.ReLU(),
nn.LayerNorm(channels),
nn.Dropout(dropout),
)
]
self.conv_layer = nn.ModuleList(layers)
self.proj = nn.Linear(channels, n_bins)
def forward(self, x, gst=None):
x = self.token_emb(x)
feats = [x]
if gst is not None:
gst = self.gst_emb(gst)
gst = rearrange(gst, "b c -> b c 1")
gst = F.interpolate(gst, x.shape[1])
gst = rearrange(gst, "b c t -> b t c")
feats.append(gst)
x = torch.cat(feats, dim=-1)
for i, conv in enumerate(self.conv_layer):
if i != 0:
x = conv(x) + x
else:
x = conv(x)
x = self.proj(x)
x = x.squeeze(-1)
if self.f0_pred == "mean":
x = torch.sigmoid(x)
elif self.f0_pred == "argmax":
x = torch.softmax(x, dim=-1)
else:
raise NotImplementedError
return x
def setup_f0_stats(self, f0_min, f0_max, f0_bins, speaker_stats):
self.f0_min = f0_min
self.f0_max = f0_max
self.f0_bins = f0_bins
self.speaker_stats = speaker_stats
self.setup = True
def inference(self, x, spk_id=None, gst=None):
assert (
self.setup == True
), "make sure that `setup_f0_stats` was called before inference!"
probs = self(x, gst)
f0 = bin2freq(probs, self.f0_min, self.f0_max, self.f0_bins, self.f0_pred)
for i in range(f0.shape[0]):
mean = (
self.speaker_stats[spk_id[i].item()].mean_log
if self.f0_log
else self.speaker_stats[spk_id[i].item()].mean
)
std = (
self.speaker_stats[spk_id[i].item()].std_log
if self.f0_log
else self.speaker_stats[spk_id[i].item()].std
)
if self.f0_norm == "mean":
f0[i] = f0[i] + mean
if self.f0_norm == "meanstd":
f0[i] = (f0[i] * std) + mean
if self.f0_log:
f0 = f0.exp()
return f0
class PitchDataset(Dataset):
def __init__(
self,
tsv_path,
km_path,
substring,
spk,
spk2id,
gst,
gst2id,
f0_bins,
f0_bin_type,
f0_smoothing,
f0_norm,
f0_log,
):
lines = open(tsv_path, "r").readlines()
self.root, self.tsv = lines[0], lines[1:]
self.root = self.root.strip()
self.km = open(km_path, "r").readlines()
print(f"loaded {len(self.km)} files")
self.spk = spk
self.spk2id = spk2id
self.gst = gst
self.gst2id = gst2id
self.f0_bins = f0_bins
self.f0_smoothing = f0_smoothing
self.f0_norm = f0_norm
self.f0_log = f0_log
if substring != "":
tsv, km = [], []
for tsv_line, km_line in zip(self.tsv, self.km):
if substring.lower() in tsv_line.lower():
tsv.append(tsv_line)
km.append(km_line)
self.tsv, self.km = tsv, km
print(f"after filtering: {len(self.km)} files")
self.speaker_stats = self._compute_f0_stats()
self.f0_min, self.f0_max = self._compute_f0_minmax()
if f0_bin_type == "adaptive":
self.f0_bins = quantize_f0(
self.speaker_stats, self.f0_bins, self.f0_norm, self.f0_log
)
elif f0_bin_type == "uniform":
self.f0_bins = torch.linspace(self.f0_min, self.f0_max, self.f0_bins + 1)[
1:-1
]
else:
raise NotImplementedError
print(f"f0 min: {self.f0_min}, f0 max: {self.f0_max}")
print(f"bins: {self.f0_bins} (shape: {self.f0_bins.shape})")
def __len__(self):
return len(self.km)
def _load_f0(self, tsv_line):
tsv_line = tsv_line.split("\t")[0]
f0 = self.root + "/" + tsv_line.replace(".wav", ".yaapt.f0.npy")
f0 = np.load(f0)
f0 = torch.FloatTensor(f0)
return f0
def _preprocess_f0(self, f0, spk):
mask = f0 != -999999 # process all frames
# mask = (f0 != 0) # only process voiced frames
mean = (
self.speaker_stats[spk].mean_log
if self.f0_log
else self.speaker_stats[spk].mean
)
std = (
self.speaker_stats[spk].std_log
if self.f0_log
else self.speaker_stats[spk].std
)
if self.f0_log:
f0[f0 == 0] = 1e-5
f0[mask] = f0[mask].log()
if self.f0_norm == "mean":
f0[mask] = f0[mask] - mean
if self.f0_norm == "meanstd":
f0[mask] = (f0[mask] - mean) / std
return f0
def _compute_f0_minmax(self):
f0_min, f0_max = float("inf"), -float("inf")
for tsv_line in tqdm(self.tsv, desc="computing f0 minmax"):
spk = self.spk2id[parse_speaker(tsv_line, self.spk)]
f0 = self._load_f0(tsv_line)
f0 = self._preprocess_f0(f0, spk)
f0_min = min(f0_min, f0.min().item())
f0_max = max(f0_max, f0.max().item())
return f0_min, f0_max
def _compute_f0_stats(self):
from functools import partial
speaker_stats = defaultdict(partial(F0Stat, True))
for tsv_line in tqdm(self.tsv, desc="computing speaker stats"):
spk = self.spk2id[parse_speaker(tsv_line, self.spk)]
f0 = self._load_f0(tsv_line)
mask = f0 != 0
f0 = f0[mask] # compute stats only on voiced parts
speaker_stats[spk].update(f0)
return speaker_stats
def __getitem__(self, i):
x = self.km[i]
x = x.split(" ")
x = list(map(int, x))
x = torch.LongTensor(x)
gst = parse_style(self.tsv[i], self.gst)
gst = self.gst2id[gst]
spk = parse_speaker(self.tsv[i], self.spk)
spk = self.spk2id[spk]
f0_raw = self._load_f0(self.tsv[i])
f0 = self._preprocess_f0(f0_raw.clone(), spk)
f0 = F.interpolate(f0.unsqueeze(0).unsqueeze(0), x.shape[0])[0, 0]
f0_raw = F.interpolate(f0_raw.unsqueeze(0).unsqueeze(0), x.shape[0])[0, 0]
f0 = freq2bin(f0, f0_min=self.f0_min, f0_max=self.f0_max, bins=self.f0_bins)
f0 = F.one_hot(f0.long(), num_classes=len(self.f0_bins) + 1).float()
if self.f0_smoothing > 0:
f0 = torch.tensor(
gaussian_filter1d(f0.float().numpy(), sigma=self.f0_smoothing)
)
return x, f0, f0_raw, spk, gst
def train(cfg):
device = "cuda:0"
# add 1 extra embedding for padding token, set the padding index to be the last token
# (tokens from the clustering start at index 0)
padding_token = cfg.n_tokens
collate_fn = Collator(padding_idx=padding_token)
train_ds = PitchDataset(
cfg.train_tsv,
cfg.train_km,
substring=cfg.substring,
spk=cfg.spk,
spk2id=cfg.spk2id,
gst=cfg.gst,
gst2id=cfg.gst2id,
f0_bins=cfg.f0_bins,
f0_bin_type=cfg.f0_bin_type,
f0_smoothing=cfg.f0_smoothing,
f0_norm=cfg.f0_norm,
f0_log=cfg.f0_log,
)
valid_ds = PitchDataset(
cfg.valid_tsv,
cfg.valid_km,
substring=cfg.substring,
spk=cfg.spk,
spk2id=cfg.spk2id,
gst=cfg.gst,
gst2id=cfg.gst2id,
f0_bins=cfg.f0_bins,
f0_bin_type=cfg.f0_bin_type,
f0_smoothing=cfg.f0_smoothing,
f0_norm=cfg.f0_norm,
f0_log=cfg.f0_log,
)
train_dl = DataLoader(
train_ds,
num_workers=0,
batch_size=cfg.batch_size,
shuffle=True,
collate_fn=collate_fn,
)
valid_dl = DataLoader(
valid_ds, num_workers=0, batch_size=16, shuffle=False, collate_fn=collate_fn
)
f0_min = train_ds.f0_min
f0_max = train_ds.f0_max
f0_bins = train_ds.f0_bins
speaker_stats = train_ds.speaker_stats
model = hydra.utils.instantiate(cfg["model"]).to(device)
model.setup_f0_stats(f0_min, f0_max, f0_bins, speaker_stats)
optimizer = hydra.utils.instantiate(cfg.optimizer, model.parameters())
best_loss = float("inf")
for epoch in range(cfg.epochs):
train_loss, train_l2_loss, train_l2_voiced_loss = run_epoch(
model, train_dl, optimizer, device, cfg, mode="train"
)
valid_loss, valid_l2_loss, valid_l2_voiced_loss = run_epoch(
model, valid_dl, None, device, cfg, mode="valid"
)
print(
f"[epoch {epoch}] train loss: {train_loss:.3f}, l2 loss: {train_l2_loss:.3f}, l2 voiced loss: {train_l2_voiced_loss:.3f}"
)
print(
f"[epoch {epoch}] valid loss: {valid_loss:.3f}, l2 loss: {valid_l2_loss:.3f}, l2 voiced loss: {valid_l2_voiced_loss:.3f}"
)
if valid_l2_voiced_loss < best_loss:
path = f"{os.getcwd()}/pitch_predictor.ckpt"
save_ckpt(model, path, cfg["model"], f0_min, f0_max, f0_bins, speaker_stats)
best_loss = valid_l2_voiced_loss
print(f"saved checkpoint: {path}")
print(f"[epoch {epoch}] best loss: {best_loss:.3f}")
def run_epoch(model, loader, optimizer, device, cfg, mode):
if mode == "train":
model.train()
else:
model.eval()
epoch_loss = 0
l1 = 0
l1_voiced = 0
for x, f0_bin, f0_raw, spk_id, gst, mask, _ in tqdm(loader):
x, f0_bin, f0_raw, spk_id, gst, mask = (
x.to(device),
f0_bin.to(device),
f0_raw.to(device),
spk_id.to(device),
gst.to(device),
mask.to(device),
)
b, t, n_bins = f0_bin.shape
yhat = model(x, gst)
nonzero_mask = (f0_raw != 0).logical_and(mask)
yhat_raw = model.inference(x, spk_id, gst)
expanded_mask = mask.unsqueeze(-1).expand(-1, -1, n_bins)
if cfg.f0_pred == "mean":
loss = F.binary_cross_entropy(
yhat[expanded_mask], f0_bin[expanded_mask]
).mean()
elif cfg.f0_pred == "argmax":
loss = F.cross_entropy(
rearrange(yhat, "b t d -> (b t) d"),
rearrange(f0_bin.argmax(-1), "b t -> (b t)"),
reduce=False,
)
loss = rearrange(loss, "(b t) -> b t", b=b, t=t)
loss = (loss * mask).sum() / mask.float().sum()
else:
raise NotImplementedError
l1 += F.l1_loss(yhat_raw[mask], f0_raw[mask]).item()
l1_voiced += F.l1_loss(yhat_raw[nonzero_mask], f0_raw[nonzero_mask]).item()
epoch_loss += loss.item()
if mode == "train":
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
print(f"{mode} example y: {f0_bin.argmax(-1)[0, 50:60].tolist()}")
print(f"{mode} example yhat: {yhat.argmax(-1)[0, 50:60].tolist()}")
print(f"{mode} example y: {f0_raw[0, 50:60].round().tolist()}")
print(f"{mode} example yhat: {yhat_raw[0, 50:60].round().tolist()}")
return epoch_loss / len(loader), l1 / len(loader), l1_voiced / len(loader)
@hydra.main(config_path=dir_path, config_name="pitch_predictor.yaml")
def main(cfg):
np.random.seed(1)
random.seed(1)
torch.manual_seed(1)
from hydra.core.hydra_config import HydraConfig
overrides = {
x.split("=")[0]: x.split("=")[1]
for x in HydraConfig.get().overrides.task
if "/" not in x
}
print(f"{cfg}")
train(cfg)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/emotion_models/pitch_predictor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.models import (
FairseqMultiModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
Embedding,
base_architecture,
)
from fairseq.models.multilingual_transformer import (
MultilingualTransformerModel,
base_multilingual_architecture,
)
from fairseq.utils import safe_hasattr
from collections import OrderedDict
@register_model("multilingual_transformer_from_mbart")
class MultilingualTransformerModelFromMbart(MultilingualTransformerModel):
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
assert isinstance(task, MultilingualTranslationTask)
# make sure all arguments are present in older models
base_multilingual_architecture(args)
if not safe_hasattr(args, "max_source_positions"):
args.max_source_positions = 1024
if not safe_hasattr(args, "max_target_positions"):
args.max_target_positions = 1024
src_langs = [lang_pair.split("-")[0] for lang_pair in task.model_lang_pairs]
tgt_langs = [lang_pair.split("-")[1] for lang_pair in task.model_lang_pairs]
if args.share_encoders:
args.share_encoder_embeddings = True
if args.share_decoders:
args.share_decoder_embeddings = True
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
# build shared embeddings (if applicable)
shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None
if args.share_all_embeddings:
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=task.langs,
embed_dim=args.encoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.encoder_embed_path,
)
shared_decoder_embed_tokens = shared_encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
if args.share_encoder_embeddings:
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=src_langs,
embed_dim=args.encoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.encoder_embed_path,
)
if args.share_decoder_embeddings:
shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=tgt_langs,
embed_dim=args.decoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.decoder_embed_path,
)
# encoders/decoders for each language
lang_encoders, lang_decoders = {}, {}
def get_encoder(lang):
if lang not in lang_encoders:
if shared_encoder_embed_tokens is not None:
encoder_embed_tokens = shared_encoder_embed_tokens
else:
encoder_embed_tokens = build_embedding(
task.dicts[lang],
args.encoder_embed_dim,
args.encoder_embed_path,
)
lang_encoders[lang] = MultilingualTransformerModel._get_module_class(
True, args, task.dicts[lang], encoder_embed_tokens, src_langs
)
return lang_encoders[lang]
def get_decoder(lang):
if lang not in lang_decoders:
if shared_decoder_embed_tokens is not None:
decoder_embed_tokens = shared_decoder_embed_tokens
else:
decoder_embed_tokens = build_embedding(
task.dicts[lang],
args.decoder_embed_dim,
args.decoder_embed_path,
)
lang_decoders[lang] = MultilingualTransformerModel._get_module_class(
False, args, task.dicts[lang], decoder_embed_tokens, tgt_langs
)
return lang_decoders[lang]
# shared encoders/decoders (if applicable)
shared_encoder, shared_decoder = None, None
if args.share_encoders:
shared_encoder = get_encoder(src_langs[0])
if args.share_decoders:
shared_decoder = get_decoder(tgt_langs[0])
encoders, decoders = OrderedDict(), OrderedDict()
for lang_pair, src, tgt in zip(task.model_lang_pairs, src_langs, tgt_langs):
encoders[lang_pair] = (
shared_encoder if shared_encoder is not None else get_encoder(src)
)
decoders[lang_pair] = (
shared_decoder if shared_decoder is not None else get_decoder(tgt)
)
return MultilingualTransformerModelFromMbart(encoders, decoders)
def load_state_dict(self, state_dict, strict=True, model_cfg=None):
state_dict_subset = state_dict.copy()
lang_pairs = set([x.split(".")[1] for x in state_dict.keys()])
finetune_mode = not any("neutral" in lp for lp in lang_pairs)
if finetune_mode:
# load a pre-trained mBART/BART model
# we need this code because mBART/BART are not of type FairseqMultiModel but FairseqModel
# so we hackishly load the weights by replicating them for all lang pairs
print("loading pre-trained BART")
self_state_dict = self.state_dict()
for k, v in state_dict.items():
for lang_pair in self.models:
new_key = k if "models." in k else f"models.{lang_pair}.{k}"
# print(new_key)
if self_state_dict[new_key].shape == v.shape:
state_dict_subset[new_key] = v
elif any(
w in k
for w in [
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
"decoder.output_projection.weight",
]
):
# why vocab_size - 5? because there are `vocab_size` tokens from the language
# and 5 additional tokens in the denoising task: eos,bos,pad,unk,mask.
# but in the translation task there are only `vocab_size` + 4 (no mask).
print(
f"{k}: {self_state_dict[new_key].shape} != {v.shape}",
end="",
flush=True,
)
vocab_size = v.shape[0] - 5
state_dict_subset[new_key] = self_state_dict[new_key]
state_dict_subset[new_key] = v[: vocab_size + 4]
print(f" => fixed by using first {vocab_size + 4} dims")
else:
raise ValueError("unable to load model due to mimatched dims!")
del state_dict_subset[k]
else:
print("loading pre-trained emotion translation model")
for k, _ in state_dict.items():
assert k.startswith("models.")
lang_pair = k.split(".")[1]
if lang_pair not in self.models:
del state_dict_subset[k]
super().load_state_dict(state_dict_subset, strict=strict, model_cfg=model_cfg)
@register_model_architecture("transformer", "transformer_small")
def transformer_small(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 3)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 512)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 3)
base_architecture(args)
@register_model_architecture(
"multilingual_transformer_from_mbart", "multilingual_small"
)
def multilingual_small(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 3)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 512)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 3)
base_multilingual_architecture(args)
| EXA-1-master | exa/libraries/fairseq/examples/emotion_conversion/fairseq_models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.tasks import register_task
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
from fairseq.utils import safe_hasattr
from .loss.latent_depth import LatentLayersKLLoss, LatentLayersSparsityLoss
@register_task("multilingual_translation_latent_depth")
class MultilingualTranslationTaskLatentDepth(MultilingualTranslationTask):
"""A task for multiple translation with latent depth.
See `"Deep Transformer with Latent Depth"
(Li et al., 2020) <https://arxiv.org/pdf/2009.13102.pdf>`_.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--encoder-latent-layer', action='store_true', help='latent layer selection in encoder')
parser.add_argument('--decoder-latent-layer', action='store_true', help='latent layer selection in decoder')
parser.add_argument('--target-layers', default=-1, type=int,
help='number of effective layers to learn; -1 means no constraint')
parser.add_argument('--sparsity-weight', default=0.0, type=float,
help='weight for sparsity loss')
parser.add_argument('--share-weight', default=0.0, type=float,
help='weight for sharing loss')
parser.add_argument('--soft-update', default=1, type=int,
help='number of updates with soft sampling')
parser.add_argument('--anneal-updates', default=1, type=int,
help='number of updates to anneal the KL loss weight')
parser.add_argument('--prior', default="uniform", type=str,
help='prior used for computing KL loss')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args, dicts, training)
self.src_langs, self.tgt_langs = zip(
*[(lang.split("-")[0], lang.split("-")[1]) for lang in args.lang_pairs]
)
if self.training and self.encoder_latent_layer:
assert self.args.share_encoders
if self.training and self.decoder_latent_layer:
assert self.args.share_decoders
if training or self.encoder_latent_layer or self.decoder_latent_layer:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)]
self.eval_lang_pairs = self.lang_pairs
self.model_lang_pairs = self.lang_pairs
if self.training and (self.encoder_latent_layer or self.decoder_latent_layer):
self.kl_loss = LatentLayersKLLoss(self.args)
self.sparsity_loss = LatentLayersSparsityLoss(self.args)
def _per_lang_pair_train_loss(
self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad
):
src, tgt = lang_pair.split("-")
if self.encoder_latent_layer:
src_lang_idx = self.src_lang_idx_dict[src]
model.models[lang_pair].encoder.set_lang_idx(src_lang_idx)
model.models[lang_pair].encoder.layer_select.hard_select = (
update_num > self.args.soft_update
)
if self.decoder_latent_layer:
tgt_lang_idx = self.tgt_lang_idx_dict[tgt]
model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx)
model.models[lang_pair].decoder.layer_select.hard_select = (
update_num > self.args.soft_update
)
loss, sample_size, logging_output = criterion(
model.models[lang_pair], sample[lang_pair]
)
if self.encoder_latent_layer:
none_samples = sum(
1 if x is None else 0
for x in model.models[lang_pair].encoder.layer_select.layer_samples
)
if none_samples == 0 or self.args.prior != "agged_posterior":
loss += self.kl_loss(
model.models[lang_pair].encoder.layer_select.layer_samples,
src_lang_idx,
update_num,
sample_size,
)
if self.decoder_latent_layer:
none_samples = sum(
1 if x is None else 0
for x in model.models[lang_pair].decoder.layer_select.layer_samples
)
if none_samples == 0 or self.args.prior != "agged_posterior":
loss += self.kl_loss(
model.models[lang_pair].decoder.layer_select.layer_samples,
tgt_lang_idx,
update_num,
sample_size,
)
if ignore_grad:
loss *= 0
if hasattr(self, "sparsity_loss") and self.sparsity_loss.is_valid(update_num):
# need to retain the graph if sparsity loss needs to be added
loss.backward(retain_graph=True)
else:
optimizer.backward(loss)
return loss, sample_size, logging_output
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
agg_loss, agg_sample_size, agg_logging_output = super().train_step(
sample, model, criterion, optimizer, update_num, ignore_grad
)
# compute auxiliary loss from layere sparsity, based on all samples from all languages
if hasattr(self, "sparsity_loss") and self.sparsity_loss.is_valid(update_num):
sparsity_loss = 0
if self.encoder_latent_layer:
sparsity_loss += self.sparsity_loss(
next(
iter(model.models.values())
).encoder.layer_select.layer_samples,
update_num,
agg_sample_size,
)
if self.decoder_latent_layer:
sparsity_loss += self.sparsity_loss(
next(
iter(model.models.values())
).decoder.layer_select.layer_samples,
update_num,
agg_sample_size,
)
if sparsity_loss > 0:
optimizer.backward(sparsity_loss)
return agg_loss, agg_sample_size, agg_logging_output
def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample):
src, tgt = lang_pair.split("-")
if self.encoder_latent_layer:
src_lang_idx = self.src_lang_idx_dict[src]
model.models[lang_pair].encoder.set_lang_idx(src_lang_idx)
if self.decoder_latent_layer:
tgt_lang_idx = self.tgt_lang_idx_dict[tgt]
model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx)
loss, sample_size, logging_output = criterion(
model.models[lang_pair], sample[lang_pair]
)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
if self.encoder_latent_layer or self.decoder_latent_layer:
for model in models:
if self.encoder_latent_layer:
assert model.encoder.layer_select is not None
src_lang_idx = self.src_lang_idx_dict[self.args.source_lang]
model.encoder.set_lang_idx(src_lang_idx)
if self.decoder_latent_layer:
assert model.decoder.layer_select is not None
tgt_lang_idx = self.tgt_lang_idx_dict[self.args.target_lang]
model.decoder.set_lang_idx(tgt_lang_idx)
return super().inference_step(
generator, models, sample, prefix_tokens, constraints
)
@property
def encoder_latent_layer(self):
return (
safe_hasattr(self.args, "encoder_latent_layer")
and self.args.encoder_latent_layer
)
@property
def decoder_latent_layer(self):
return (
safe_hasattr(self.args, "decoder_latent_layer")
and self.args.decoder_latent_layer
)
@property
def src_lang_idx_dict(self):
return {lang: lang_idx for lang_idx, lang in enumerate(self.src_langs)}
@property
def tgt_lang_idx_dict(self):
return {lang: lang_idx for lang_idx, lang in enumerate(self.tgt_langs)}
| EXA-1-master | exa/libraries/fairseq/examples/latent_depth/latent_depth_src/multilingual_translation_latent_depth.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import multilingual_translation_latent_depth # noqa
from .loss import latent_depth # noqa
from .models import latent_multilingual_transformer # noqa
from .modules import latent_layers # noqa
| EXA-1-master | exa/libraries/fairseq/examples/latent_depth/latent_depth_src/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch.nn.modules.loss import _Loss
class LatentLayersKLLoss(_Loss):
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, layer_samples, lang_idx, update_num, sample_size):
prior = self.args.prior
samples = layer_samples[lang_idx]
eps = 1e-7
if prior == "uniform":
# uniform prior
kl_loss = (samples * (torch.log(samples + eps) - math.log(0.5))).sum(-1)
elif prior == "agged_posterior":
# aggregated posterior
y_t = torch.stack([x.detach() for x in layer_samples], dim=0)
agged_q = torch.sum(y_t, dim=0)
row_norm = agged_q.sum(-1)
normed_agg_q = agged_q / row_norm
kl_loss = (
samples * (torch.log(samples + eps) - torch.log(normed_agg_q + eps))
).sum(-1)
else:
raise NotImplementedError("The specified prior is not implemented.")
# normalized by number of layers
kl_loss /= layer_samples[0].size()[0]
kl_weight = min(
self.args.sparsity_weight,
(update_num - self.args.soft_update)
* self.args.sparsity_weight
/ self.args.anneal_updates,
)
kl_loss *= kl_weight * sample_size
return kl_loss
class LatentLayersSparsityLoss(_Loss):
def __init__(self, args):
super().__init__()
self.args = args
def is_valid(self, update_num):
if self.args.target_layers <= 0:
return False
return update_num > (self.args.soft_update + self.args.anneal_updates)
def forward(self, layer_samples_list, update_num, sample_size):
batch_loss = 0
share_loss = 0
global_sparsity_loss = 0
layer_samples = torch.stack(layer_samples_list, dim=0)
if (
self.args.target_layers > 0 or self.args.share_weight > 0
) and update_num > (self.args.soft_update + self.args.anneal_updates):
# anneal sparsity weight
if update_num < (self.args.anneal_updates + self.args.soft_update):
weight_anneal = 0
elif update_num < (2 * self.args.anneal_updates + self.args.soft_update):
weight_anneal = (
(update_num - self.args.soft_update - self.args.anneal_updates)
* self.args.share_weight
/ self.args.anneal_updates
)
else:
weight_anneal = 1
# compute ratio among languages
layer_utilization = torch.sum(layer_samples, dim=0)
layer_utilization /= layer_samples.size()[0]
if self.args.share_weight > 0:
# encouraging sharing across languages
share_loss = sum(
-1.0 * v * math.log(v) for v in layer_utilization if v > 0
)
batch_loss += (
weight_anneal * self.args.share_weight * sample_size * share_loss
)
if self.args.target_layers > 0:
# computed expected number of layers selected
expeted_layers = sum(layer_utilization)
# compute l2 loss wrt target number of layers
global_sparsity_loss = (expeted_layers - self.args.target_layers) ** 2
batch_loss += (
weight_anneal
* self.args.share_weight
* sample_size
* global_sparsity_loss
)
return batch_loss
| EXA-1-master | exa/libraries/fairseq/examples/latent_depth/latent_depth_src/loss/latent_depth.py |
EXA-1-master | exa/libraries/fairseq/examples/latent_depth/latent_depth_src/loss/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import register_model, register_model_architecture
from fairseq.models.multilingual_transformer import MultilingualTransformerModel
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
base_architecture,
)
from fairseq.utils import safe_hasattr
from .latent_transformer import LatentTransformerDecoder, LatentTransformerEncoder
@register_model("latent_multilingual_transformer")
class LatentMultilingualTransformerModel(MultilingualTransformerModel):
"""A variant of standard multilingual Transformer models which encoder and/or
decoders supports latent depth, as is in "Deep Transformer with Latent Depth"
(https://arxiv.org/abs/2009.13102).
"""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
MultilingualTransformerModel.add_args(parser)
parser.add_argument(
'--soft-select',
action='store_true',
help='use soft samples in training an inference',
)
parser.add_argument(
'--sampling-tau',
type=float,
default=5.,
help='sampling temperature',
)
@classmethod
def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs):
if is_encoder:
if safe_hasattr(args, "encoder_latent_layer") and args.encoder_latent_layer:
return LatentTransformerEncoder(
args, lang_dict, embed_tokens, num_logits=len(langs)
)
else:
return TransformerEncoder(args, lang_dict, embed_tokens)
else:
if safe_hasattr(args, "decoder_latent_layer") and args.decoder_latent_layer:
return LatentTransformerDecoder(
args, lang_dict, embed_tokens, num_logits=len(langs)
)
else:
return TransformerDecoder(args, lang_dict, embed_tokens)
@register_model_architecture(
"latent_multilingual_transformer", "latent_multilingual_transformer"
)
def latent_multilingual_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 24)
args.share_encoders = getattr(args, "share_encoders", True)
args.share_decoders = getattr(args, "share_decoders", True)
args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", True)
args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", True)
base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import torch.nn as nn
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.models.transformer import TransformerDecoder, TransformerEncoder
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer
from torch import Tensor
from ..modules.latent_layers import LayerSelect
class LatentTransformerEncoder(TransformerEncoder):
"""Latent depth (https://arxiv.org/abs/2009.13102) implemented in
TransformerEncoder.
"""
def __init__(self, args, dictionary, embed_tokens, num_logits=1):
self.num_logits = num_logits
self.num_layers = args.encoder_layers
super().__init__(args, dictionary, embed_tokens)
self.layer_select = LayerSelect(
num_layers=self.num_layers,
num_logits=self.num_logits,
soft_select=getattr(args, "soft_select", False),
sampling_tau=getattr(args, "sampling_tau", 5.),
)
self.lang_idx = None
self.layers = nn.ModuleList(
[self._build_encoder_layer(args, idx) for idx in range(args.encoder_layers)]
)
def set_lang_idx(self, lang_idx):
self.lang_idx = lang_idx
def _build_encoder_layer(self, args, idx=None):
return LatentTransformerEncoderLayer(args, idx, layer_select=self.layer_select)
def forward(self, src_tokens, src_lengths, return_all_hiddens: bool = False):
self.layer_select.sample(self.lang_idx)
return super().forward(src_tokens, src_lengths, return_all_hiddens)
class LatentTransformerEncoderLayer(TransformerEncoderLayer):
"""Encoder layer with each (non_residual) block weighted by samples of Bernouli
or Gumbel Signmoid samples.
Args:
args (argparse.Namespace): parsed command-line arguments from standard
TransformerEncoderLayer.
idx (int): layer index (used to retrieve samples).
layer_select (LayerSelect, optional): instance of LayerSelect module with logits
parameters and sampling method.
"""
def __init__(self, args, idx, layer_select=None):
super().__init__(args)
self.idx = idx
self.layer_select = layer_select
def residual_connection(self, x, residual):
return residual + x * self.layer_select(self.idx)
class LatentTransformerDecoder(TransformerDecoder):
"""Latent depth (https://arxiv.org/abs/2009.13102) implemented in
TransformerDecoder.
"""
def __init__(
self, args, dictionary, embed_tokens, no_encoder_attn=False, num_logits=1
):
self.num_logits = num_logits
self.num_layers = args.decoder_layers
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.layer_select = LayerSelect(
num_layers=self.num_layers,
num_logits=self.num_logits,
soft_select=getattr(args, "soft_select", False),
sampling_tau=getattr(args, "sampling_tau", 5.),
)
self.lang_idx = None
self.layers = nn.ModuleList(
[
self._build_decoder_layer(args, no_encoder_attn, idx)
for idx in range(args.decoder_layers)
]
)
def set_lang_idx(self, lang_idx):
self.lang_idx = lang_idx
def _build_decoder_layer(self, args, no_encoder_attn=False, idx=None):
return LatentTransformerDecoderLayer(
args, idx, layer_select=self.layer_select, no_encoder_attn=no_encoder_attn
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
self.layer_select.sample(self.lang_idx)
return super().forward(
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
features_only=features_only,
alignment_layer=alignment_layer,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
class LatentTransformerDecoderLayer(TransformerDecoderLayer):
"""Decoder layer with each (non_residual) block weighted by samples of Bernouli
or Gumbel Signmoid samples.
Args:
args (argparse.Namespace): parsed command-line arguments from standard
TransformerDecoderLayer.
idx (int): layer index (used to retrieve samples).
layer_select (LayerSelect, optional): instance of LayerSelect module with logits
parameters and sampling method.
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
args,
idx,
layer_select=None,
no_encoder_attn=False,
add_bias_kv=False,
add_zero_attn=False,
):
super().__init__(args, no_encoder_attn, add_bias_kv, add_zero_attn)
self.idx = idx
self.layer_select = layer_select
def residual_connection(self, x, residual):
return residual + x * self.layer_select(self.idx)
| EXA-1-master | exa/libraries/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py |
EXA-1-master | exa/libraries/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py |
|
EXA-1-master | exa/libraries/fairseq/examples/latent_depth/latent_depth_src/modules/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
class LayerSelect(nn.Module):
"""Compute samples (from a Gumbel-Sigmoid distribution) which is used as
either (soft) weighting or (hard) selection of residual connection.
https://arxiv.org/abs/2009.13102
"""
def __init__(self, num_layers, num_logits, soft_select=False, sampling_tau=5.):
super(LayerSelect, self).__init__()
self.layer_logits = torch.nn.Parameter(
torch.Tensor(num_logits, num_layers),
requires_grad=True,
)
self.hard_select = not soft_select
self.tau = sampling_tau
self.detach_grad = False
self.layer_samples = [None] * num_logits
def sample(self, logit_idx):
"""To leverage the efficiency of distributed training, samples for all
layers are computed at once for each logit_idx. Logits are parameters
learnt independent of each other.
Args:
logit_idx: The index of logit parameters used for sampling.
"""
assert logit_idx is not None
self.samples = self._gumbel_sigmoid(
self.layer_logits[logit_idx, :].detach()
if self.detach_grad
else self.layer_logits[logit_idx, :],
dim=-1,
tau=self.tau,
hard=self.hard_select,
)
self.layer_samples[logit_idx] = self.samples
def forward(self, i):
sample = self.samples[i]
return sample
def _gumbel_sigmoid(
self, logits, tau=1, hard=False, eps=1e-10, dim=-1, threshold=0.5
):
# ~Gumbel(0,1)
gumbels1 = (
-torch.empty_like(logits, memory_format=torch.legacy_contiguous_format)
.exponential_()
.log()
)
gumbels2 = (
-torch.empty_like(logits, memory_format=torch.legacy_contiguous_format)
.exponential_()
.log()
)
# Difference of two gumbels because we apply a sigmoid
gumbels1 = (logits + gumbels1 - gumbels2) / tau
y_soft = gumbels1.sigmoid()
if hard:
# Straight through.
y_hard = torch.zeros_like(
logits, memory_format=torch.legacy_contiguous_format
).masked_fill(y_soft > threshold, 1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
# Reparametrization trick.
ret = y_soft
return ret
| EXA-1-master | exa/libraries/fairseq/examples/latent_depth/latent_depth_src/modules/latent_layers.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Scoring script for computing pairwise BLEU and multi-ref BLEU over a set of
candidate hypotheses.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
"""
import argparse
import random
import sys
from itertools import chain
import numpy as np
import sacrebleu
from sacrebleu import corpus_bleu as _corpus_bleu
def main():
parser = argparse.ArgumentParser(sys.argv[0])
parser.add_argument(
"--sys", nargs="*", default="", metavar="FILE", help="path to system output"
)
parser.add_argument("--ref", default="", metavar="FILE", help="path to references")
parser.add_argument(
"--output",
default="",
metavar="FILE",
help="print outputs into a pretty format",
)
args = parser.parse_args()
if args.sys:
src, tgt, hypos, log_probs = load_sys(args.sys)
print("pairwise BLEU: %.2f" % pairwise(hypos))
if args.output:
merge(src, tgt, hypos, log_probs, args.output)
if args.ref:
_, _, refs = load_ref(args.ref)
if args.sys:
multi_ref(refs, hypos)
else:
intra_ref(refs)
def dictolist(d):
a = sorted(d.items(), key=lambda i: i[0])
return [i[1] for i in a]
def load_sys(paths):
src, tgt, hypos, log_probs = {}, {}, {}, {}
for path in paths:
with open(path) as f:
for line in f:
line = line.rstrip()
# S: source
# T: target
# D: detokenized system output
if line.startswith(("S-", "T-", "D-")):
i = int(line[line.find("-") + 1 : line.find("\t")])
if line.startswith("S-"):
src[i] = line.split("\t")[1]
if line.startswith("T-"):
tgt[i] = line.split("\t")[1]
if line.startswith("D-"):
if i not in hypos:
hypos[i] = []
log_probs[i] = []
hypos[i].append(line.split("\t")[2])
log_probs[i].append(float(line.split("\t")[1]))
return dictolist(src), dictolist(tgt), dictolist(hypos), dictolist(log_probs)
def load_ref(path):
with open(path) as f:
lines = f.readlines()
src, tgt, refs = [], [], []
i = 0
while i < len(lines):
if lines[i].startswith("S-"):
src.append(lines[i].split("\t")[1].rstrip())
i += 1
elif lines[i].startswith("T-"):
tgt.append(lines[i].split("\t")[1].rstrip())
i += 1
else:
a = []
while i < len(lines) and lines[i].startswith("R"):
a.append(lines[i].split("\t")[1].rstrip())
i += 1
refs.append(a)
return src, tgt, refs
def merge(src, tgt, hypos, log_probs, path):
with open(path, "w") as f:
for s, t, hs, lps in zip(src, tgt, hypos, log_probs):
f.write(s + "\n")
f.write(t + "\n")
f.write("\n")
for h, lp in zip(hs, lps):
f.write("\t%f\t%s\n" % (lp, h.strip()))
f.write("------------------------------------------------------\n")
def corpus_bleu(sys_stream, ref_streams):
bleu = _corpus_bleu(sys_stream, ref_streams, tokenize="none")
return bleu.score
def sentence_bleu(hypothesis, reference):
bleu = _corpus_bleu(hypothesis, reference)
for i in range(1, 4):
bleu.counts[i] += 1
bleu.totals[i] += 1
bleu = sacrebleu.BLEU.compute_bleu(
bleu.counts,
bleu.totals,
bleu.sys_len,
bleu.ref_len,
smooth_method="exp",
)
return bleu.score
def pairwise(sents):
_ref, _hypo = [], []
for s in sents:
for i in range(len(s)):
for j in range(len(s)):
if i != j:
_ref.append(s[i])
_hypo.append(s[j])
return corpus_bleu(_hypo, [_ref])
def multi_ref(refs, hypos):
_ref, _hypo = [], []
ref_cnt = 0
assert len(refs) == len(hypos)
# count number of refs covered
for rs, hs in zip(refs, hypos):
a = set()
for h in hs:
s = [sentence_bleu(h, r) for r in rs]
j = np.argmax(s)
_ref.append(rs[j])
_hypo.append(h)
best = [k for k in range(len(rs)) if s[k] == s[j]]
a.add(random.choice(best))
ref_cnt += len(a)
print("#refs covered: %.2f" % (ref_cnt / len(refs)))
# transpose refs and hypos
refs = list(zip(*refs))
hypos = list(zip(*hypos))
# compute multi-ref corpus BLEU (leave-one-out to be comparable to intra_ref)
k = len(hypos)
m = len(refs)
flat_hypos = [hypos[j][i] for i in range(len(hypos[0])) for j in range(k)]
duplicated_refs = [[ref for ref in refs_i for _ in range(k)] for refs_i in refs]
loo_bleus = []
for held_out_ref in range(m):
remaining_refs = (
duplicated_refs[:held_out_ref] + duplicated_refs[held_out_ref + 1 :]
)
assert len(remaining_refs) == m - 1
loo_bleus.append(corpus_bleu(flat_hypos, remaining_refs))
print("average multi-reference BLEU (leave-one-out): %.2f" % np.mean(loo_bleus))
def intra_ref(refs):
print("ref pairwise BLEU: %.2f" % pairwise(refs))
refs = list(zip(*refs))
m = len(refs)
concat_h = []
concat_rest = [[] for j in range(m - 1)]
for i, h in enumerate(refs):
rest = refs[:i] + refs[i + 1 :]
concat_h.append(h)
for j in range(m - 1):
concat_rest[j].extend(rest[j])
concat_h = list(chain.from_iterable(concat_h))
bleu = corpus_bleu(concat_h, concat_rest)
print("multi-reference BLEU (leave-one-out): %.2f" % bleu)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/translation_moe/score.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
class MeanPoolGatingNetwork(torch.nn.Module):
"""A simple mean-pooling gating network for selecting experts.
This module applies mean pooling over an encoder's output and returns
reponsibilities for each expert. The encoder format is expected to match
:class:`fairseq.models.transformer.TransformerEncoder`.
"""
def __init__(self, embed_dim, num_experts, dropout=None):
super().__init__()
self.embed_dim = embed_dim
self.num_experts = num_experts
self.fc1 = torch.nn.Linear(embed_dim, embed_dim)
self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None
self.fc2 = torch.nn.Linear(embed_dim, num_experts)
def forward(self, encoder_out):
if not (
"encoder_out" in encoder_out
and "encoder_padding_mask" in encoder_out
and encoder_out["encoder_out"][0].size(2) == self.embed_dim
):
raise ValueError("Unexpected format for encoder_out")
# mean pooling over time
encoder_padding_mask = encoder_out["encoder_padding_mask"][0] # B x T
encoder_out = encoder_out["encoder_out"][0].transpose(0, 1) # B x T x C
if encoder_padding_mask is not None:
encoder_out = encoder_out.clone() # required because of transpose above
encoder_out[encoder_padding_mask] = 0
ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True)
x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out)
else:
x = torch.mean(encoder_out, dim=1)
x = torch.tanh(self.fc1(x))
if self.dropout is not None:
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x)
| EXA-1-master | exa/libraries/fairseq/examples/translation_moe/translation_moe_src/mean_pool_gating_network.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import translation_moe # noqa
| EXA-1-master | exa/libraries/fairseq/examples/translation_moe/translation_moe_src/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class LogSumExpMoE(torch.autograd.Function):
"""Standard LogSumExp forward pass, but use *posterior* for the backward.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
"""
@staticmethod
def forward(ctx, logp, posterior, dim=-1):
ctx.save_for_backward(posterior)
ctx.dim = dim
return torch.logsumexp(logp, dim=dim)
@staticmethod
def backward(ctx, grad_output):
(posterior,) = ctx.saved_tensors
grad_logp = grad_output.unsqueeze(ctx.dim) * posterior
return grad_logp, None, None
| EXA-1-master | exa/libraries/fairseq/examples/translation_moe/translation_moe_src/logsumexp_moe.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import torch
from omegaconf import II
from fairseq import utils
from fairseq.logging import metrics
from fairseq.dataclass import ChoiceEnum
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationConfig, TranslationTask
from .logsumexp_moe import LogSumExpMoE
from .mean_pool_gating_network import MeanPoolGatingNetwork
METHOD_CHOICES = ChoiceEnum(["sMoElp", "sMoEup", "hMoElp", "hMoEup"])
@dataclass
class TranslationMoEConfig(TranslationConfig):
method: METHOD_CHOICES = field(
default="hMoEup",
metadata={"help": "MoE method"},
)
num_experts: int = field(
default=3,
metadata={"help": "number of experts"},
)
mean_pool_gating_network: bool = field(
default=False,
metadata={"help": "use a simple mean-pooling gating network"},
)
mean_pool_gating_network_dropout: float = field(
default=0,
metadata={"help": "dropout for mean-pooling gating network"},
)
mean_pool_gating_network_encoder_dim: int = field(
default=0,
metadata={"help": "encoder output dim for mean-pooling gating network"},
)
gen_expert: int = field(
default=0,
metadata={"help": "which expert to use for generation"},
)
sentence_avg: bool = II("optimization.sentence_avg")
@register_task("translation_moe", dataclass=TranslationMoEConfig)
class TranslationMoETask(TranslationTask):
"""
Translation task for Mixture of Experts (MoE) models.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
cfg: TranslationMoEConfig
def __init__(self, cfg: TranslationMoEConfig, src_dict, tgt_dict):
if cfg.method == "sMoElp":
# soft MoE with learned prior
self.uniform_prior = False
self.hard_selection = False
elif cfg.method == "sMoEup":
# soft MoE with uniform prior
self.uniform_prior = True
self.hard_selection = False
elif cfg.method == "hMoElp":
# hard MoE with learned prior
self.uniform_prior = False
self.hard_selection = True
elif cfg.method == "hMoEup":
# hard MoE with uniform prior
self.uniform_prior = True
self.hard_selection = True
# add indicator tokens for each expert
for i in range(cfg.num_experts):
# add to both dictionaries in case we're sharing embeddings
src_dict.add_symbol("<expert_{}>".format(i))
tgt_dict.add_symbol("<expert_{}>".format(i))
super().__init__(cfg, src_dict, tgt_dict)
def build_model(self, cfg, from_checkpoint=False):
from fairseq import models
model = models.build_model(cfg, self)
if not self.uniform_prior and not hasattr(model, "gating_network"):
if self.cfg.mean_pool_gating_network:
if self.cfg.mean_pool_gating_network_encoder_dim > 0:
encoder_dim = self.cfg.mean_pool_gating_network_encoder_dim
elif getattr(cfg, "encoder_embed_dim", None):
# assume that encoder_embed_dim is the encoder's output dimension
encoder_dim = cfg.encoder_embed_dim
else:
raise ValueError(
"Must specify --mean-pool-gating-network-encoder-dim"
)
if self.cfg.mean_pool_gating_network_dropout > 0:
dropout = self.cfg.mean_pool_gating_network_dropout
elif getattr(cfg, "dropout", None):
dropout = cfg.dropout
else:
raise ValueError("Must specify task.mean_pool_gating_network_dropout")
model.gating_network = MeanPoolGatingNetwork(
encoder_dim,
self.cfg.num_experts,
dropout,
)
else:
raise ValueError(
"translation_moe task with learned prior requires the model to "
"have a gating network; try using --mean-pool-gating-network"
)
return model
def expert_index(self, i):
return i + self.tgt_dict.index("<expert_0>")
def _get_loss(self, sample, model, criterion):
assert hasattr(
criterion, "compute_loss"
), "translation_moe task requires the criterion to implement the compute_loss() method"
k = self.cfg.num_experts
bsz = sample["target"].size(0)
def get_lprob_y(encoder_out, prev_output_tokens_k):
net_output = model.decoder(
prev_output_tokens=prev_output_tokens_k,
encoder_out=encoder_out,
)
loss, _ = criterion.compute_loss(model, net_output, sample, reduce=False)
loss = loss.view(bsz, -1)
return -loss.sum(dim=1, keepdim=True) # -> B x 1
def get_lprob_yz(winners=None):
encoder_out = model.encoder(
src_tokens=sample["net_input"]["src_tokens"],
src_lengths=sample["net_input"]["src_lengths"],
)
if winners is None:
lprob_y = []
for i in range(k):
prev_output_tokens_k = sample["net_input"][
"prev_output_tokens"
].clone()
assert not prev_output_tokens_k.requires_grad
prev_output_tokens_k[:, 0] = self.expert_index(i)
lprob_y.append(get_lprob_y(encoder_out, prev_output_tokens_k))
lprob_y = torch.cat(lprob_y, dim=1) # -> B x K
else:
prev_output_tokens_k = sample["net_input"]["prev_output_tokens"].clone()
prev_output_tokens_k[:, 0] = self.expert_index(winners)
lprob_y = get_lprob_y(encoder_out, prev_output_tokens_k) # -> B
if self.uniform_prior:
lprob_yz = lprob_y
else:
lprob_z = model.gating_network(encoder_out) # B x K
if winners is not None:
lprob_z = lprob_z.gather(dim=1, index=winners.unsqueeze(-1))
lprob_yz = lprob_y + lprob_z.type_as(lprob_y) # B x K
return lprob_yz
# compute responsibilities without dropout
with utils.model_eval(model): # disable dropout
with torch.no_grad(): # disable autograd
lprob_yz = get_lprob_yz() # B x K
prob_z_xy = torch.nn.functional.softmax(lprob_yz, dim=1)
assert not prob_z_xy.requires_grad
# compute loss with dropout
if self.hard_selection:
winners = prob_z_xy.max(dim=1)[1]
loss = -get_lprob_yz(winners)
else:
lprob_yz = get_lprob_yz() # B x K
loss = -LogSumExpMoE.apply(lprob_yz, prob_z_xy, 1)
loss = loss.sum()
sample_size = (
sample["target"].size(0) if self.cfg.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": bsz,
"sample_size": sample_size,
"posterior": prob_z_xy.float().sum(dim=0).cpu(),
}
return loss, sample_size, logging_output
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(
self,
generator,
models,
sample,
prefix_tokens=None,
expert=None,
constraints=None,
):
expert = expert or self.cfg.gen_expert
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=self.expert_index(expert),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
metrics.log_scalar(
"posterior",
sum(log["posterior"] for log in logging_outputs if "posterior" in log),
)
| EXA-1-master | exa/libraries/fairseq/examples/translation_moe/translation_moe_src/translation_moe.py |
from . import criterions, models, tasks # noqa
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Flashlight decoders.
"""
import gc
import itertools as it
import os.path as osp
from typing import List
import warnings
from collections import deque, namedtuple
import numpy as np
import torch
from examples.speech_recognition.data.replabels import unpack_replabels
from fairseq import tasks
from fairseq.utils import apply_to_sample
from omegaconf import open_dict
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
try:
from flashlight.lib.text.dictionary import create_word_dict, load_words
from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from flashlight.lib.text.decoder import (
CriterionType,
LexiconDecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
)
except:
warnings.warn(
"flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM = object
LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
# criterion-specific init
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
self.asg_transitions = None
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out) # no need to normalize emissions
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
if args.lexicon:
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
self.unit_lm,
)
else:
assert args.unit_lm, "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
"""Returns frame numbers corresponding to every non-blank token.
Parameters
----------
token_idxs : List[int]
IDs of decoded tokens.
Returns
-------
List[int]
Frame numbers corresponding to every non-blank token.
"""
timesteps = []
for i, token_idx in enumerate(token_idxs):
if token_idx == self.blank:
continue
if i == 0 or token_idx != token_idxs[i-1]:
timesteps.append(i)
return timesteps
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"timesteps": self.get_timesteps(result.tokens),
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"])
class FairseqLM(LM):
def __init__(self, dictionary, model):
LM.__init__(self)
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing):
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(self, state: LMState, token_index: int, no_cache: bool = False):
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size):
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState):
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self):
self.states = {}
self.stateq = deque()
gc.collect()
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(args.kenlm_model)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
assert args.unit_lm, "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score}
if self.lexicon:
hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/w2l_decoder.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
import editdistance
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.logging.meters import StopwatchMeter, TimeMeter
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def add_asr_eval_argument(parser):
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary\
output units",
)
try:
parser.add_argument(
"--lm-weight",
"--lm_weight",
type=float,
default=0.2,
help="weight for lm while interpolating with neural score",
)
except:
pass
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
parser.add_argument(
"--w2l-decoder",
choices=["viterbi", "kenlm", "fairseqlm"],
help="use a w2l decoder",
)
parser.add_argument("--lexicon", help="lexicon for w2l decoder")
parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
parser.add_argument("--beam-threshold", type=float, default=25.0)
parser.add_argument("--beam-size-token", type=float, default=100)
parser.add_argument("--word-score", type=float, default=1.0)
parser.add_argument("--unk-weight", type=float, default=-math.inf)
parser.add_argument("--sil-weight", type=float, default=0.0)
parser.add_argument(
"--dump-emissions",
type=str,
default=None,
help="if present, dumps emissions into this file and exits",
)
parser.add_argument(
"--dump-features",
type=str,
default=None,
help="if present, dumps features into this file and exits",
)
parser.add_argument(
"--load-emissions",
type=str,
default=None,
help="if present, loads emissions from this file",
)
return parser
def check_args(args):
# assert args.path is not None, "--path required for generation!"
# assert args.results_path is not None, "--results_path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def get_dataset_itr(args, task, models):
return task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id),
file=res_files["hypo.units"],
)
print(
"{} ({}-{})".format(hyp_words, speaker, id),
file=res_files["hypo.words"],
)
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(tgt_pieces, speaker, id),
file=res_files["ref.units"],
)
print(
"{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
)
if not args.quiet:
logger.info("HYPO:" + hyp_words)
logger.info("TARGET:" + tgt_words)
logger.info("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def prepare_result_files(args):
def get_res_file(file_prefix):
if args.num_shards > 1:
file_prefix = f"{args.shard_id}_{file_prefix}"
path = os.path.join(
args.results_path,
"{}-{}-{}.txt".format(
file_prefix, os.path.basename(args.path), args.gen_subset
),
)
return open(path, "w", buffering=1)
if not args.results_path:
return None
return {
"hypo.words": get_res_file("hypo.word"),
"hypo.units": get_res_file("hypo.units"),
"ref.words": get_res_file("ref.word"),
"ref.units": get_res_file("ref.units"),
}
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
class ExistingEmissionsDecoder(object):
def __init__(self, decoder, emissions):
self.decoder = decoder
self.emissions = emissions
def generate(self, models, sample, **unused):
ids = sample["id"].cpu().numpy()
try:
emissions = np.stack(self.emissions[ids])
except:
print([x.shape for x in self.emissions[ids]])
raise Exception("invalid sizes")
emissions = torch.from_numpy(emissions)
return self.decoder.decode(emissions)
def main(args, task=None, model_state=None):
check_args(args)
use_fp16 = args.fp16
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 4000000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
logger.info("| decoding with criterion {}".format(args.criterion))
task = tasks.setup_task(args)
# Load ensemble
if args.load_emissions:
models, criterions = [], []
task.load_dataset(args.gen_subset)
else:
logger.info("| loading model(s) from {}".format(args.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(args.path, separator="\\"),
arg_overrides=ast.literal_eval(args.model_overrides),
task=task,
suffix=args.checkpoint_suffix,
strict=(args.checkpoint_shard_count == 1),
num_shards=args.checkpoint_shard_count,
state=model_state,
)
optimize_models(args, use_cuda, models)
task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
args.data, args.gen_subset, len(task.dataset(args.gen_subset))
)
)
# hack to pass transitions to W2lDecoder
if args.criterion == "asg_loss":
raise NotImplementedError("asg_loss is currently not supported")
# trans = criterions[0].asg.trans.data
# args.asg_transitions = torch.flatten(trans).tolist()
# Load dataset (possibly sharded)
itr = get_dataset_itr(args, task, models)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(args):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, task.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, task.target_dictionary)
elif w2l_decoder == "fairseqlm":
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(args, task.target_dictionary)
else:
print(
"only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
)
# please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
generator = build_generator(args)
if args.load_emissions:
generator = ExistingEmissionsDecoder(
generator, np.load(args.load_emissions, allow_pickle=True)
)
logger.info("loaded emissions from " + args.load_emissions)
num_sentences = 0
if args.results_path is not None and not os.path.exists(args.results_path):
os.makedirs(args.results_path)
max_source_pos = (
utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
),
)
if max_source_pos is not None:
max_source_pos = max_source_pos[0]
if max_source_pos is not None:
max_source_pos = max_source_pos[0] - 1
if args.dump_emissions:
emissions = {}
if args.dump_features:
features = {}
models[0].bert.proj = None
else:
res_files = prepare_result_files(args)
errs_t = 0
lengths_t = 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if use_fp16:
sample = utils.apply_to_sample(apply_half, sample)
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
gen_timer.start()
if args.dump_emissions:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
emm = emm.transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
emissions[id.item()] = emm[i]
continue
elif args.dump_features:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
padding = (
encoder_out["encoder_padding_mask"][i].cpu().numpy()
if encoder_out["encoder_padding_mask"] is not None
else None
)
features[id.item()] = (feat[i], padding)
continue
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
speaker = None
# id = task.dataset(args.gen_subset).ids[int(sample_id)]
id = sample_id
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
# Process top predictions
errs, length = process_predictions(
args,
hypos[i],
None,
tgt_dict,
target_tokens,
res_files,
speaker,
id,
)
errs_t += errs
lengths_t += length
wps_meter.update(num_generated_tokens)
t.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
wer = None
if args.dump_emissions:
emm_arr = []
for i in range(len(emissions)):
emm_arr.append(emissions[i])
np.save(args.dump_emissions, emm_arr)
logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
elif args.dump_features:
feat_arr = []
for i in range(len(features)):
feat_arr.append(features[i])
np.save(args.dump_features, feat_arr)
logger.info(f"saved {len(features)} emissions to {args.dump_features}")
else:
if lengths_t > 0:
wer = errs_t * 100.0 / lengths_t
logger.info(f"WER: {wer}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
"sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
return task, wer
def make_parser():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
return parser
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/infer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import re
import sys
import torch
from examples.speech_recognition.data import AsrDataset
from examples.speech_recognition.data.replabels import replabel_symbol
from fairseq.data import Dictionary
from fairseq.tasks import LegacyFairseqTask, register_task
def get_asr_dataset_from_json(data_json_path, tgt_dict):
"""
Parse data json and create dataset.
See scripts/asr_prep_json.py which pack json from raw files
Json example:
{
"utts": {
"4771-29403-0025": {
"input": {
"length_ms": 170,
"path": "/tmp/file1.flac"
},
"output": {
"text": "HELLO \n",
"token": "HE LLO",
"tokenid": "4815, 861"
}
},
"1564-142299-0096": {
...
}
}
"""
if not os.path.isfile(data_json_path):
raise FileNotFoundError("Dataset not found: {}".format(data_json_path))
with open(data_json_path, "rb") as f:
data_samples = json.load(f)["utts"]
assert len(data_samples) != 0
sorted_samples = sorted(
data_samples.items(),
key=lambda sample: int(sample[1]["input"]["length_ms"]),
reverse=True,
)
aud_paths = [s[1]["input"]["path"] for s in sorted_samples]
ids = [s[0] for s in sorted_samples]
speakers = []
for s in sorted_samples:
m = re.search("(.+?)-(.+?)-(.+?)", s[0])
speakers.append(m.group(1) + "_" + m.group(2))
frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples]
tgt = [
[int(i) for i in s[1]["output"]["tokenid"].split(", ")]
for s in sorted_samples
]
# append eos
tgt = [[*t, tgt_dict.eos()] for t in tgt]
return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers)
@register_task("speech_recognition")
class SpeechRecognitionTask(LegacyFairseqTask):
"""
Task for training speech recognition model.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="path to data directory")
parser.add_argument(
"--silence-token", default="\u2581", help="token for silence (used by w2l)"
)
parser.add_argument(
"--max-source-positions",
default=sys.maxsize,
type=int,
metavar="N",
help="max number of frames in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries)."""
dict_path = os.path.join(args.data, "dict.txt")
if not os.path.isfile(dict_path):
raise FileNotFoundError("Dict not found: {}".format(dict_path))
tgt_dict = Dictionary.load(dict_path)
if args.criterion == "ctc_loss":
tgt_dict.add_symbol("<ctc_blank>")
elif args.criterion == "asg_loss":
for i in range(1, args.max_replabel + 1):
tgt_dict.add_symbol(replabel_symbol(i))
print("| dictionary: {} types".format(len(tgt_dict)))
return cls(args, tgt_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
data_json_path = os.path.join(self.args.data, "{}.json".format(split))
self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)
def build_generator(self, models, args, **unused):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, self.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, self.target_dictionary)
elif w2l_decoder == "fairseqlm":
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(args, self.target_dictionary)
else:
return super().build_generator(models, args)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.tgt_dict
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
return None
def max_positions(self):
"""Return the max speech and sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/tasks/speech_recognition.py |
import importlib
import os
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
task_name = file[: file.find(".py")]
importlib.import_module("examples.speech_recognition.tasks." + task_name)
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/tasks/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
import hydra
from hydra.core.config_store import ConfigStore
import logging
from omegaconf import MISSING, OmegaConf
import os
import os.path as osp
from pathlib import Path
import subprocess
from typing import Optional
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass import FairseqDataclass
script_dir = Path(__file__).resolve().parent
config_path = script_dir / "config"
logger = logging.getLogger(__name__)
@dataclass
class KaldiInitializerConfig(FairseqDataclass):
data_dir: str = MISSING
fst_dir: Optional[str] = None
in_labels: str = MISSING
out_labels: Optional[str] = None
wav2letter_lexicon: Optional[str] = None
lm_arpa: str = MISSING
kaldi_root: str = MISSING
blank_symbol: str = "<s>"
silence_symbol: Optional[str] = None
def create_units(fst_dir: Path, in_labels: str, vocab: Dictionary) -> Path:
in_units_file = fst_dir / f"kaldi_dict.{in_labels}.txt"
if not in_units_file.exists():
logger.info(f"Creating {in_units_file}")
with open(in_units_file, "w") as f:
print("<eps> 0", file=f)
i = 1
for symb in vocab.symbols[vocab.nspecial :]:
if not symb.startswith("madeupword"):
print(f"{symb} {i}", file=f)
i += 1
return in_units_file
def create_lexicon(
cfg: KaldiInitializerConfig,
fst_dir: Path,
unique_label: str,
in_units_file: Path,
out_words_file: Path,
) -> (Path, Path):
disambig_in_units_file = fst_dir / f"kaldi_dict.{cfg.in_labels}_disambig.txt"
lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}.txt"
disambig_lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}_disambig.txt"
if (
not lexicon_file.exists()
or not disambig_lexicon_file.exists()
or not disambig_in_units_file.exists()
):
logger.info(f"Creating {lexicon_file} (in units file: {in_units_file})")
assert cfg.wav2letter_lexicon is not None or cfg.in_labels == cfg.out_labels
if cfg.wav2letter_lexicon is not None:
lm_words = set()
with open(out_words_file, "r") as lm_dict_f:
for line in lm_dict_f:
lm_words.add(line.split()[0])
num_skipped = 0
total = 0
with open(cfg.wav2letter_lexicon, "r") as w2l_lex_f, open(
lexicon_file, "w"
) as out_f:
for line in w2l_lex_f:
items = line.rstrip().split("\t")
assert len(items) == 2, items
if items[0] in lm_words:
print(items[0], items[1], file=out_f)
else:
num_skipped += 1
logger.debug(
f"Skipping word {items[0]} as it was not found in LM"
)
total += 1
if num_skipped > 0:
logger.warning(
f"Skipped {num_skipped} out of {total} words as they were not found in LM"
)
else:
with open(in_units_file, "r") as in_f, open(lexicon_file, "w") as out_f:
for line in in_f:
symb = line.split()[0]
if symb != "<eps>" and symb != "<ctc_blank>" and symb != "<SIL>":
print(symb, symb, file=out_f)
lex_disambig_path = (
Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_lex_disambig.pl"
)
res = subprocess.run(
[lex_disambig_path, lexicon_file, disambig_lexicon_file],
check=True,
capture_output=True,
)
ndisambig = int(res.stdout)
disamib_path = Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_disambig.pl"
res = subprocess.run(
[disamib_path, "--include-zero", in_units_file, str(ndisambig)],
check=True,
capture_output=True,
)
with open(disambig_in_units_file, "wb") as f:
f.write(res.stdout)
return disambig_lexicon_file, disambig_in_units_file
def create_G(
kaldi_root: Path, fst_dir: Path, lm_arpa: Path, arpa_base: str
) -> (Path, Path):
out_words_file = fst_dir / f"kaldi_dict.{arpa_base}.txt"
grammar_graph = fst_dir / f"G_{arpa_base}.fst"
if not grammar_graph.exists() or not out_words_file.exists():
logger.info(f"Creating {grammar_graph}")
arpa2fst = kaldi_root / "src/lmbin/arpa2fst"
subprocess.run(
[
arpa2fst,
"--disambig-symbol=#0",
f"--write-symbol-table={out_words_file}",
lm_arpa,
grammar_graph,
],
check=True,
)
return grammar_graph, out_words_file
def create_L(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
lexicon_file: Path,
in_units_file: Path,
out_words_file: Path,
) -> Path:
lexicon_graph = fst_dir / f"L.{unique_label}.fst"
if not lexicon_graph.exists():
logger.info(f"Creating {lexicon_graph} (in units: {in_units_file})")
make_lex = kaldi_root / "egs/wsj/s5/utils/make_lexicon_fst.pl"
fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile"
fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops"
fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort"
def write_disambig_symbol(file):
with open(file, "r") as f:
for line in f:
items = line.rstrip().split()
if items[0] == "#0":
out_path = str(file) + "_disamig"
with open(out_path, "w") as out_f:
print(items[1], file=out_f)
return out_path
return None
in_disambig_sym = write_disambig_symbol(in_units_file)
assert in_disambig_sym is not None
out_disambig_sym = write_disambig_symbol(out_words_file)
assert out_disambig_sym is not None
try:
with open(lexicon_graph, "wb") as out_f:
res = subprocess.run(
[make_lex, lexicon_file], capture_output=True, check=True
)
assert len(res.stderr) == 0, res.stderr.decode("utf-8")
res = subprocess.run(
[
fstcompile,
f"--isymbols={in_units_file}",
f"--osymbols={out_words_file}",
"--keep_isymbols=false",
"--keep_osymbols=false",
],
input=res.stdout,
capture_output=True,
)
assert len(res.stderr) == 0, res.stderr.decode("utf-8")
res = subprocess.run(
[fstaddselfloops, in_disambig_sym, out_disambig_sym],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstarcsort, "--sort_type=olabel"],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(lexicon_graph)
raise
except AssertionError:
os.remove(lexicon_graph)
raise
return lexicon_graph
def create_LG(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
lexicon_graph: Path,
grammar_graph: Path,
) -> Path:
lg_graph = fst_dir / f"LG.{unique_label}.fst"
if not lg_graph.exists():
logger.info(f"Creating {lg_graph}")
fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose"
fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar"
fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded"
fstpushspecial = kaldi_root / "src/fstbin/fstpushspecial"
fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort"
try:
with open(lg_graph, "wb") as out_f:
res = subprocess.run(
[fsttablecompose, lexicon_graph, grammar_graph],
capture_output=True,
check=True,
)
res = subprocess.run(
[
fstdeterminizestar,
"--use-log=true",
],
input=res.stdout,
capture_output=True,
)
res = subprocess.run(
[fstminimizeencoded],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstpushspecial],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstarcsort, "--sort_type=ilabel"],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(lg_graph)
raise
return lg_graph
def create_H(
kaldi_root: Path,
fst_dir: Path,
disambig_out_units_file: Path,
in_labels: str,
vocab: Dictionary,
blk_sym: str,
silence_symbol: Optional[str],
) -> (Path, Path, Path):
h_graph = (
fst_dir / f"H.{in_labels}{'_' + silence_symbol if silence_symbol else ''}.fst"
)
h_out_units_file = fst_dir / f"kaldi_dict.h_out.{in_labels}.txt"
disambig_in_units_file_int = Path(str(h_graph) + "isym_disambig.int")
disambig_out_units_file_int = Path(str(disambig_out_units_file) + ".int")
if (
not h_graph.exists()
or not h_out_units_file.exists()
or not disambig_in_units_file_int.exists()
):
logger.info(f"Creating {h_graph}")
eps_sym = "<eps>"
num_disambig = 0
osymbols = []
with open(disambig_out_units_file, "r") as f, open(
disambig_out_units_file_int, "w"
) as out_f:
for line in f:
symb, id = line.rstrip().split()
if line.startswith("#"):
num_disambig += 1
print(id, file=out_f)
else:
if len(osymbols) == 0:
assert symb == eps_sym, symb
osymbols.append((symb, id))
i_idx = 0
isymbols = [(eps_sym, 0)]
imap = {}
for i, s in enumerate(vocab.symbols):
i_idx += 1
isymbols.append((s, i_idx))
imap[s] = i_idx
fst_str = []
node_idx = 0
root_node = node_idx
special_symbols = [blk_sym]
if silence_symbol is not None:
special_symbols.append(silence_symbol)
for ss in special_symbols:
fst_str.append("{} {} {} {}".format(root_node, root_node, ss, eps_sym))
for symbol, _ in osymbols:
if symbol == eps_sym or symbol.startswith("#"):
continue
node_idx += 1
# 1. from root to emitting state
fst_str.append("{} {} {} {}".format(root_node, node_idx, symbol, symbol))
# 2. from emitting state back to root
fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym))
# 3. from emitting state to optional blank state
pre_node = node_idx
node_idx += 1
for ss in special_symbols:
fst_str.append("{} {} {} {}".format(pre_node, node_idx, ss, eps_sym))
# 4. from blank state back to root
fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym))
fst_str.append("{}".format(root_node))
fst_str = "\n".join(fst_str)
h_str = str(h_graph)
isym_file = h_str + ".isym"
with open(isym_file, "w") as f:
for sym, id in isymbols:
f.write("{} {}\n".format(sym, id))
with open(h_out_units_file, "w") as f:
for sym, id in osymbols:
f.write("{} {}\n".format(sym, id))
with open(disambig_in_units_file_int, "w") as f:
disam_sym_id = len(isymbols)
for _ in range(num_disambig):
f.write("{}\n".format(disam_sym_id))
disam_sym_id += 1
fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile"
fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops"
fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort"
try:
with open(h_graph, "wb") as out_f:
res = subprocess.run(
[
fstcompile,
f"--isymbols={isym_file}",
f"--osymbols={h_out_units_file}",
"--keep_isymbols=false",
"--keep_osymbols=false",
],
input=str.encode(fst_str),
capture_output=True,
check=True,
)
res = subprocess.run(
[
fstaddselfloops,
disambig_in_units_file_int,
disambig_out_units_file_int,
],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstarcsort, "--sort_type=olabel"],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(h_graph)
raise
return h_graph, h_out_units_file, disambig_in_units_file_int
def create_HLGa(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
h_graph: Path,
lg_graph: Path,
disambig_in_words_file_int: Path,
) -> Path:
hlga_graph = fst_dir / f"HLGa.{unique_label}.fst"
if not hlga_graph.exists():
logger.info(f"Creating {hlga_graph}")
fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose"
fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar"
fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols"
fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal"
fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded"
try:
with open(hlga_graph, "wb") as out_f:
res = subprocess.run(
[
fsttablecompose,
h_graph,
lg_graph,
],
capture_output=True,
check=True,
)
res = subprocess.run(
[fstdeterminizestar, "--use-log=true"],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstrmsymbols, disambig_in_words_file_int],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstrmepslocal],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstminimizeencoded],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(hlga_graph)
raise
return hlga_graph
def create_HLa(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
h_graph: Path,
l_graph: Path,
disambig_in_words_file_int: Path,
) -> Path:
hla_graph = fst_dir / f"HLa.{unique_label}.fst"
if not hla_graph.exists():
logger.info(f"Creating {hla_graph}")
fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose"
fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar"
fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols"
fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal"
fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded"
try:
with open(hla_graph, "wb") as out_f:
res = subprocess.run(
[
fsttablecompose,
h_graph,
l_graph,
],
capture_output=True,
check=True,
)
res = subprocess.run(
[fstdeterminizestar, "--use-log=true"],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstrmsymbols, disambig_in_words_file_int],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstrmepslocal],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstminimizeencoded],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(hla_graph)
raise
return hla_graph
def create_HLG(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
hlga_graph: Path,
prefix: str = "HLG",
) -> Path:
hlg_graph = fst_dir / f"{prefix}.{unique_label}.fst"
if not hlg_graph.exists():
logger.info(f"Creating {hlg_graph}")
add_self_loop = script_dir / "add-self-loop-simple"
kaldi_src = kaldi_root / "src"
kaldi_lib = kaldi_src / "lib"
try:
if not add_self_loop.exists():
fst_include = kaldi_root / "tools/openfst-1.6.7/include"
add_self_loop_src = script_dir / "add-self-loop-simple.cc"
subprocess.run(
[
"c++",
f"-I{kaldi_src}",
f"-I{fst_include}",
f"-L{kaldi_lib}",
add_self_loop_src,
"-lkaldi-base",
"-lkaldi-fstext",
"-o",
add_self_loop,
],
check=True,
)
my_env = os.environ.copy()
my_env["LD_LIBRARY_PATH"] = f"{kaldi_lib}:{my_env['LD_LIBRARY_PATH']}"
subprocess.run(
[
add_self_loop,
hlga_graph,
hlg_graph,
],
check=True,
capture_output=True,
env=my_env,
)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
raise
return hlg_graph
def initalize_kaldi(cfg: KaldiInitializerConfig) -> Path:
if cfg.fst_dir is None:
cfg.fst_dir = osp.join(cfg.data_dir, "kaldi")
if cfg.out_labels is None:
cfg.out_labels = cfg.in_labels
kaldi_root = Path(cfg.kaldi_root)
data_dir = Path(cfg.data_dir)
fst_dir = Path(cfg.fst_dir)
fst_dir.mkdir(parents=True, exist_ok=True)
arpa_base = osp.splitext(osp.basename(cfg.lm_arpa))[0]
unique_label = f"{cfg.in_labels}.{arpa_base}"
with open(data_dir / f"dict.{cfg.in_labels}.txt", "r") as f:
vocab = Dictionary.load(f)
in_units_file = create_units(fst_dir, cfg.in_labels, vocab)
grammar_graph, out_words_file = create_G(
kaldi_root, fst_dir, Path(cfg.lm_arpa), arpa_base
)
disambig_lexicon_file, disambig_L_in_units_file = create_lexicon(
cfg, fst_dir, unique_label, in_units_file, out_words_file
)
h_graph, h_out_units_file, disambig_in_units_file_int = create_H(
kaldi_root,
fst_dir,
disambig_L_in_units_file,
cfg.in_labels,
vocab,
cfg.blank_symbol,
cfg.silence_symbol,
)
lexicon_graph = create_L(
kaldi_root,
fst_dir,
unique_label,
disambig_lexicon_file,
disambig_L_in_units_file,
out_words_file,
)
lg_graph = create_LG(
kaldi_root, fst_dir, unique_label, lexicon_graph, grammar_graph
)
hlga_graph = create_HLGa(
kaldi_root, fst_dir, unique_label, h_graph, lg_graph, disambig_in_units_file_int
)
hlg_graph = create_HLG(kaldi_root, fst_dir, unique_label, hlga_graph)
# for debugging
# hla_graph = create_HLa(kaldi_root, fst_dir, unique_label, h_graph, lexicon_graph, disambig_in_units_file_int)
# hl_graph = create_HLG(kaldi_root, fst_dir, unique_label, hla_graph, prefix="HL_looped")
# create_HLG(kaldi_root, fst_dir, "phnc", h_graph, prefix="H_looped")
return hlg_graph
@hydra.main(config_path=config_path, config_name="kaldi_initializer")
def cli_main(cfg: KaldiInitializerConfig) -> None:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
initalize_kaldi(cfg)
if __name__ == "__main__":
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "kaldi_initializer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "kaldi_initializer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=KaldiInitializerConfig)
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/kaldi/kaldi_initializer.py |
EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/kaldi/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from concurrent.futures import ThreadPoolExecutor
import logging
from omegaconf import MISSING
import os
import torch
from typing import Optional
import warnings
from dataclasses import dataclass
from fairseq.dataclass import FairseqDataclass
from .kaldi_initializer import KaldiInitializerConfig, initalize_kaldi
logger = logging.getLogger(__name__)
@dataclass
class KaldiDecoderConfig(FairseqDataclass):
hlg_graph_path: Optional[str] = None
output_dict: str = MISSING
kaldi_initializer_config: Optional[KaldiInitializerConfig] = None
acoustic_scale: float = 0.5
max_active: int = 10000
beam_delta: float = 0.5
hash_ratio: float = 2.0
is_lattice: bool = False
lattice_beam: float = 10.0
prune_interval: int = 25
determinize_lattice: bool = True
prune_scale: float = 0.1
max_mem: int = 0
phone_determinize: bool = True
word_determinize: bool = True
minimize: bool = True
num_threads: int = 1
class KaldiDecoder(object):
def __init__(
self,
cfg: KaldiDecoderConfig,
beam: int,
nbest: int = 1,
):
try:
from kaldi.asr import FasterRecognizer, LatticeFasterRecognizer
from kaldi.base import set_verbose_level
from kaldi.decoder import (
FasterDecoder,
FasterDecoderOptions,
LatticeFasterDecoder,
LatticeFasterDecoderOptions,
)
from kaldi.lat.functions import DeterminizeLatticePhonePrunedOptions
from kaldi.fstext import read_fst_kaldi, SymbolTable
except:
warnings.warn(
"pykaldi is required for this functionality. Please install from https://github.com/pykaldi/pykaldi"
)
# set_verbose_level(2)
self.acoustic_scale = cfg.acoustic_scale
self.nbest = nbest
if cfg.hlg_graph_path is None:
assert (
cfg.kaldi_initializer_config is not None
), "Must provide hlg graph path or kaldi initializer config"
cfg.hlg_graph_path = initalize_kaldi(cfg.kaldi_initializer_config)
assert os.path.exists(cfg.hlg_graph_path), cfg.hlg_graph_path
if cfg.is_lattice:
self.dec_cls = LatticeFasterDecoder
opt_cls = LatticeFasterDecoderOptions
self.rec_cls = LatticeFasterRecognizer
else:
assert self.nbest == 1, "nbest > 1 requires lattice decoder"
self.dec_cls = FasterDecoder
opt_cls = FasterDecoderOptions
self.rec_cls = FasterRecognizer
self.decoder_options = opt_cls()
self.decoder_options.beam = beam
self.decoder_options.max_active = cfg.max_active
self.decoder_options.beam_delta = cfg.beam_delta
self.decoder_options.hash_ratio = cfg.hash_ratio
if cfg.is_lattice:
self.decoder_options.lattice_beam = cfg.lattice_beam
self.decoder_options.prune_interval = cfg.prune_interval
self.decoder_options.determinize_lattice = cfg.determinize_lattice
self.decoder_options.prune_scale = cfg.prune_scale
det_opts = DeterminizeLatticePhonePrunedOptions()
det_opts.max_mem = cfg.max_mem
det_opts.phone_determinize = cfg.phone_determinize
det_opts.word_determinize = cfg.word_determinize
det_opts.minimize = cfg.minimize
self.decoder_options.det_opts = det_opts
self.output_symbols = {}
with open(cfg.output_dict, "r") as f:
for line in f:
items = line.rstrip().split()
assert len(items) == 2
self.output_symbols[int(items[1])] = items[0]
logger.info(f"Loading FST from {cfg.hlg_graph_path}")
self.fst = read_fst_kaldi(cfg.hlg_graph_path)
self.symbol_table = SymbolTable.read_text(cfg.output_dict)
self.executor = ThreadPoolExecutor(max_workers=cfg.num_threads)
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions, padding = self.get_emissions(models, encoder_input)
return self.decode(emissions, padding)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
model = models[0]
all_encoder_out = [m(**encoder_input) for m in models]
if len(all_encoder_out) > 1:
if "encoder_out" in all_encoder_out[0]:
encoder_out = {
"encoder_out": sum(e["encoder_out"] for e in all_encoder_out)
/ len(all_encoder_out),
"encoder_padding_mask": all_encoder_out[0]["encoder_padding_mask"],
}
padding = encoder_out["encoder_padding_mask"]
else:
encoder_out = {
"logits": sum(e["logits"] for e in all_encoder_out)
/ len(all_encoder_out),
"padding_mask": all_encoder_out[0]["padding_mask"],
}
padding = encoder_out["padding_mask"]
else:
encoder_out = all_encoder_out[0]
padding = (
encoder_out["padding_mask"]
if "padding_mask" in encoder_out
else encoder_out["encoder_padding_mask"]
)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out, normalize=True)
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return (
emissions.cpu().float().transpose(0, 1),
padding.cpu() if padding is not None and padding.any() else None,
)
def decode_one(self, logits, padding):
from kaldi.matrix import Matrix
decoder = self.dec_cls(self.fst, self.decoder_options)
asr = self.rec_cls(
decoder, self.symbol_table, acoustic_scale=self.acoustic_scale
)
if padding is not None:
logits = logits[~padding]
mat = Matrix(logits.numpy())
out = asr.decode(mat)
if self.nbest > 1:
from kaldi.fstext import shortestpath
from kaldi.fstext.utils import (
convert_compact_lattice_to_lattice,
convert_lattice_to_std,
convert_nbest_to_list,
get_linear_symbol_sequence,
)
lat = out["lattice"]
sp = shortestpath(lat, nshortest=self.nbest)
sp = convert_compact_lattice_to_lattice(sp)
sp = convert_lattice_to_std(sp)
seq = convert_nbest_to_list(sp)
results = []
for s in seq:
_, o, w = get_linear_symbol_sequence(s)
words = list(self.output_symbols[z] for z in o)
results.append(
{
"tokens": words,
"words": words,
"score": w.value,
"emissions": logits,
}
)
return results
else:
words = out["text"].split()
return [
{
"tokens": words,
"words": words,
"score": out["likelihood"],
"emissions": logits,
}
]
def decode(self, emissions, padding):
if padding is None:
padding = [None] * len(emissions)
ret = list(
map(
lambda e, p: self.executor.submit(self.decode_one, e, p),
emissions,
padding,
)
)
return ret
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/kaldi/kaldi_decoder.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import concurrent.futures
import json
import multiprocessing
import os
from collections import namedtuple
from itertools import chain
import sentencepiece as spm
from fairseq.data import Dictionary
MILLISECONDS_TO_SECONDS = 0.001
def process_sample(aud_path, lable, utt_id, sp, tgt_dict):
import torchaudio
input = {}
output = {}
si, ei = torchaudio.info(aud_path)
input["length_ms"] = int(
si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS
)
input["path"] = aud_path
token = " ".join(sp.EncodeAsPieces(lable))
ids = tgt_dict.encode_line(token, append_eos=False)
output["text"] = lable
output["token"] = token
output["tokenid"] = ", ".join(map(str, [t.tolist() for t in ids]))
return {utt_id: {"input": input, "output": output}}
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--audio-dirs",
nargs="+",
default=["-"],
required=True,
help="input directories with audio files",
)
parser.add_argument(
"--labels",
required=True,
help="aggregated input labels with format <ID LABEL> per line",
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument(
"--spm-model",
required=True,
help="sentencepiece model to use for encoding",
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument(
"--dictionary",
required=True,
help="file to load fairseq dictionary from",
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav")
parser.add_argument(
"--output",
required=True,
type=argparse.FileType("w"),
help="path to save json output",
)
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.spm_model.name)
tgt_dict = Dictionary.load(args.dictionary)
labels = {}
for line in args.labels:
(utt_id, label) = line.split(" ", 1)
labels[utt_id] = label
if len(labels) == 0:
raise Exception("No labels found in ", args.labels_path)
Sample = namedtuple("Sample", "aud_path utt_id")
samples = []
for path, _, files in chain.from_iterable(
os.walk(path) for path in args.audio_dirs
):
for f in files:
if f.endswith(args.audio_format):
if len(os.path.splitext(f)) != 2:
raise Exception("Expect <utt_id.extension> file name. Got: ", f)
utt_id = os.path.splitext(f)[0]
if utt_id not in labels:
continue
samples.append(Sample(os.path.join(path, f), utt_id))
utts = {}
num_cpu = multiprocessing.cpu_count()
with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor:
future_to_sample = {
executor.submit(
process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict
): s
for s in samples
}
for future in concurrent.futures.as_completed(future_to_sample):
try:
data = future.result()
except Exception as exc:
print("generated an exception: ", exc)
else:
utts.update(data)
json.dump({"utts": utts}, args.output, indent=4)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/datasets/asr_prep_json.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from collections import deque
from enum import Enum
import numpy as np
"""
Utility modules for computation of Word Error Rate,
Alignments, as well as more granular metrics like
deletion, insersion and substitutions.
"""
class Code(Enum):
match = 1
substitution = 2
insertion = 3
deletion = 4
class Token(object):
def __init__(self, lbl="", st=np.nan, en=np.nan):
if np.isnan(st):
self.label, self.start, self.end = "", 0.0, 0.0
else:
self.label, self.start, self.end = lbl, st, en
class AlignmentResult(object):
def __init__(self, refs, hyps, codes, score):
self.refs = refs # std::deque<int>
self.hyps = hyps # std::deque<int>
self.codes = codes # std::deque<Code>
self.score = score # float
def coordinate_to_offset(row, col, ncols):
return int(row * ncols + col)
def offset_to_row(offset, ncols):
return int(offset / ncols)
def offset_to_col(offset, ncols):
return int(offset % ncols)
def trimWhitespace(str):
return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str)))
def str2toks(str):
pieces = trimWhitespace(str).split(" ")
toks = []
for p in pieces:
toks.append(Token(p, 0.0, 0.0))
return toks
class EditDistance(object):
def __init__(self, time_mediated):
self.time_mediated_ = time_mediated
self.scores_ = np.nan # Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic>
self.backtraces_ = (
np.nan
) # Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic> backtraces_;
self.confusion_pairs_ = {}
def cost(self, ref, hyp, code):
if self.time_mediated_:
if code == Code.match:
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end)
elif code == Code.insertion:
return hyp.end - hyp.start
elif code == Code.deletion:
return ref.end - ref.start
else: # substitution
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1
else:
if code == Code.match:
return 0
elif code == Code.insertion or code == Code.deletion:
return 3
else: # substitution
return 4
def get_result(self, refs, hyps):
res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan)
num_rows, num_cols = self.scores_.shape
res.score = self.scores_[num_rows - 1, num_cols - 1]
curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols)
while curr_offset != 0:
curr_row = offset_to_row(curr_offset, num_cols)
curr_col = offset_to_col(curr_offset, num_cols)
prev_offset = self.backtraces_[curr_row, curr_col]
prev_row = offset_to_row(prev_offset, num_cols)
prev_col = offset_to_col(prev_offset, num_cols)
res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++
res.hyps.appendleft(curr_col - 1)
if curr_row - 1 == prev_row and curr_col == prev_col:
res.codes.appendleft(Code.deletion)
elif curr_row == prev_row and curr_col - 1 == prev_col:
res.codes.appendleft(Code.insertion)
else:
# assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col)
ref_str = refs[res.refs[0]].label
hyp_str = hyps[res.hyps[0]].label
if ref_str == hyp_str:
res.codes.appendleft(Code.match)
else:
res.codes.appendleft(Code.substitution)
confusion_pair = "%s -> %s" % (ref_str, hyp_str)
if confusion_pair not in self.confusion_pairs_:
self.confusion_pairs_[confusion_pair] = 1
else:
self.confusion_pairs_[confusion_pair] += 1
curr_offset = prev_offset
return res
def align(self, refs, hyps):
if len(refs) == 0 and len(hyps) == 0:
return np.nan
# NOTE: we're not resetting the values in these matrices because every value
# will be overridden in the loop below. If this assumption doesn't hold,
# be sure to set all entries in self.scores_ and self.backtraces_ to 0.
self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1))
self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1))
num_rows, num_cols = self.scores_.shape
for i in range(num_rows):
for j in range(num_cols):
if i == 0 and j == 0:
self.scores_[i, j] = 0.0
self.backtraces_[i, j] = 0
continue
if i == 0:
self.scores_[i, j] = self.scores_[i, j - 1] + self.cost(
None, hyps[j - 1], Code.insertion
)
self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols)
continue
if j == 0:
self.scores_[i, j] = self.scores_[i - 1, j] + self.cost(
refs[i - 1], None, Code.deletion
)
self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols)
continue
# Below here both i and j are greater than 0
ref = refs[i - 1]
hyp = hyps[j - 1]
best_score = self.scores_[i - 1, j - 1] + (
self.cost(ref, hyp, Code.match)
if (ref.label == hyp.label)
else self.cost(ref, hyp, Code.substitution)
)
prev_row = i - 1
prev_col = j - 1
ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion)
if ins < best_score:
best_score = ins
prev_row = i
prev_col = j - 1
delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion)
if delt < best_score:
best_score = delt
prev_row = i - 1
prev_col = j
self.scores_[i, j] = best_score
self.backtraces_[i, j] = coordinate_to_offset(
prev_row, prev_col, num_cols
)
return self.get_result(refs, hyps)
class WERTransformer(object):
def __init__(self, hyp_str, ref_str, verbose=True):
self.ed_ = EditDistance(False)
self.id2oracle_errs_ = {}
self.utts_ = 0
self.words_ = 0
self.insertions_ = 0
self.deletions_ = 0
self.substitutions_ = 0
self.process(["dummy_str", hyp_str, ref_str])
if verbose:
print("'%s' vs '%s'" % (hyp_str, ref_str))
self.report_result()
def process(self, input): # std::vector<std::string>&& input
if len(input) < 3:
print(
"Input must be of the form <id> ... <hypo> <ref> , got ",
len(input),
" inputs:",
)
return None
# Align
# std::vector<Token> hyps;
# std::vector<Token> refs;
hyps = str2toks(input[-2])
refs = str2toks(input[-1])
alignment = self.ed_.align(refs, hyps)
if alignment is None:
print("Alignment is null")
return np.nan
# Tally errors
ins = 0
dels = 0
subs = 0
for code in alignment.codes:
if code == Code.substitution:
subs += 1
elif code == Code.insertion:
ins += 1
elif code == Code.deletion:
dels += 1
# Output
row = input
row.append(str(len(refs)))
row.append(str(ins))
row.append(str(dels))
row.append(str(subs))
# print(row)
# Accumulate
kIdIndex = 0
kNBestSep = "/"
pieces = input[kIdIndex].split(kNBestSep)
if len(pieces) == 0:
print(
"Error splitting ",
input[kIdIndex],
" on '",
kNBestSep,
"', got empty list",
)
return np.nan
id = pieces[0]
if id not in self.id2oracle_errs_:
self.utts_ += 1
self.words_ += len(refs)
self.insertions_ += ins
self.deletions_ += dels
self.substitutions_ += subs
self.id2oracle_errs_[id] = [ins, dels, subs]
else:
curr_err = ins + dels + subs
prev_err = np.sum(self.id2oracle_errs_[id])
if curr_err < prev_err:
self.id2oracle_errs_[id] = [ins, dels, subs]
return 0
def report_result(self):
# print("---------- Summary ---------------")
if self.words_ == 0:
print("No words counted")
return
# 1-best
best_wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
print(
"\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, "
"%0.2f%% dels, %0.2f%% subs)"
% (
best_wer,
self.utts_,
self.words_,
100.0 * self.insertions_ / self.words_,
100.0 * self.deletions_ / self.words_,
100.0 * self.substitutions_ / self.words_,
)
)
def wer(self):
if self.words_ == 0:
wer = np.nan
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
return wer
def stats(self):
if self.words_ == 0:
stats = {}
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
stats = dict(
{
"wer": wer,
"utts": self.utts_,
"numwords": self.words_,
"ins": self.insertions_,
"dels": self.deletions_,
"subs": self.substitutions_,
"confusion_pairs": self.ed_.confusion_pairs_,
}
)
return stats
def calc_wer(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.wer()
def calc_wer_stats(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.stats()
def get_wer_alignment_codes(hyp_str, ref_str):
"""
INPUT: hypothesis string, reference string
OUTPUT: List of alignment codes (intermediate results from WER computation)
"""
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes
def merge_counts(x, y):
# Merge two hashes which have 'counts' as their values
# This can be used for example to merge confusion pair counts
# conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs'])
for k, v in y.items():
if k not in x:
x[k] = 0
x[k] += v
return x
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/utils/wer_utils.py |
import importlib
import os
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module("examples.speech_recognition.models." + model_name)
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LinearizedConvolution,
TransformerDecoderLayer,
TransformerEncoderLayer,
VGGBlock,
)
@register_model("asr_vggtransformer")
class VGGTransformerModel(FairseqEncoderDecoderModel):
"""
Transformers with convolutional context for ASR
https://arxiv.org/abs/1904.11660
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock:
[(out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
use_layer_norm), ...])
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help=""""
a tuple containing the configuration of the encoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]')
""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--tgt-embed-dim",
type=int,
metavar="N",
help="embedding dimension of the decoder target tokens",
)
parser.add_argument(
"--transformer-dec-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the decoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]
""",
)
parser.add_argument(
"--conv-dec-config",
type=str,
metavar="EXPR",
help="""
an array of tuples for the decoder 1-D convolution config
[(out_channels, conv_kernel_size, use_layer_norm), ...]""",
)
@classmethod
def build_encoder(cls, args, task):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, task):
return TransformerDecoder(
dictionary=task.target_dictionary,
embed_dim=args.tgt_embed_dim,
transformer_config=eval(args.transformer_dec_config),
conv_config=eval(args.conv_dec_config),
encoder_output_dim=args.enc_output_dim,
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
base_architecture(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2
DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2
# 256: embedding dimension
# 4: number of heads
# 1024: FFN
# True: apply layerNorm before (dropout + resiaul) instead of after
# 0.2 (dropout): dropout after MultiheadAttention and second FC
# 0.2 (attention_dropout): dropout in MultiheadAttention
# 0.2 (relu_dropout): dropout after ReLu
DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2
DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2
# TODO: repace transformer encoder config from one liner
# to explicit args to get rid of this transformation
def prepare_transformer_encoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = None
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args
class VGGTransformerEncoder(FairseqEncoder):
"""VGG + Transformer encoder"""
def __init__(
self,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
"""constructor for VGGTransformerEncoder
Args:
- input_feat_per_channel: feature dim (not including stacked,
just base feature)
- in_channel: # input channels (e.g., if stack 8 feature vector
together, this is 8)
- vggblock_config: configuration of vggblock, see comments on
DEFAULT_ENC_VGGBLOCK_CONFIG
- transformer_config: configuration of transformer layer, see comments
on DEFAULT_ENC_TRANSFORMER_CONFIG
- encoder_output_dim: final transformer output embedding dimension
- transformer_context: (left, right) if set, self-attention will be focused
on (t-left, t+right)
- transformer_sampling: an iterable of int, must match with
len(transformer_config), transformer_sampling[i] indicates sampling
factor for i-th transformer layer, after multihead att and feedfoward
part
"""
super().__init__(None)
self.num_vggblocks = 0
if vggblock_config is not None:
if not isinstance(vggblock_config, Iterable):
raise ValueError("vggblock_config is not iterable")
self.num_vggblocks = len(vggblock_config)
self.conv_layers = nn.ModuleList()
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
self.pooling_kernel_sizes = []
if vggblock_config is not None:
for _, config in enumerate(vggblock_config):
(
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
layer_norm,
) = config
self.conv_layers.append(
VGGBlock(
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim=input_feat_per_channel,
layer_norm=layer_norm,
)
)
self.pooling_kernel_sizes.append(pooling_kernel_size)
in_channels = out_channels
input_feat_per_channel = self.conv_layers[-1].output_dim
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim
)
# transformer_input_dim is the output dimension of VGG part
self.validate_transformer_config(transformer_config)
self.transformer_context = self.parse_transformer_context(transformer_context)
self.transformer_sampling = self.parse_transformer_sampling(
transformer_sampling, len(transformer_config)
)
self.transformer_layers = nn.ModuleList()
if transformer_input_dim != transformer_config[0][0]:
self.transformer_layers.append(
Linear(transformer_input_dim, transformer_config[0][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.transformer_layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[i])
)
)
self.encoder_output_dim = encoder_output_dim
self.transformer_layers.extend(
[
Linear(transformer_config[-1][0], encoder_output_dim),
LayerNorm(encoder_output_dim),
]
)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
x = x.transpose(1, 2).contiguous()
# (B, C, T, feat)
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1)
x = x.contiguous().view(output_seq_len, bsz, -1)
input_lengths = src_lengths.clone()
for s in self.pooling_kernel_sizes:
input_lengths = (input_lengths.float() / s).ceil().long()
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor)
transformer_layer_idx = 0
for layer_idx in range(len(self.transformer_layers)):
if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer):
x = self.transformer_layers[layer_idx](
x, encoder_padding_mask, attn_mask
)
if self.transformer_sampling[transformer_layer_idx] != 1:
sampling_factor = self.transformer_sampling[transformer_layer_idx]
x, encoder_padding_mask, attn_mask = self.slice(
x, encoder_padding_mask, attn_mask, sampling_factor
)
transformer_layer_idx += 1
else:
x = self.transformer_layers[layer_idx](x)
# encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate
# whether encoder_output[t, b] is valid or not (valid=0, invalid=1)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask.t()
if encoder_padding_mask is not None
else None,
# (B, T) --> (T, B)
}
def infer_conv_output_dim(self, in_channels, input_dim):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def validate_transformer_config(self, transformer_config):
for config in transformer_config:
input_dim, num_heads = config[:2]
if input_dim % num_heads != 0:
msg = (
"ERROR in transformer config {}: ".format(config)
+ "input dimension {} ".format(input_dim)
+ "not dividable by number of heads {}".format(num_heads)
)
raise ValueError(msg)
def parse_transformer_context(self, transformer_context):
"""
transformer_context can be the following:
- None; indicates no context is used, i.e.,
transformer can access full context
- a tuple/list of two int; indicates left and right context,
any number <0 indicates infinite context
* e.g., (5, 6) indicates that for query at x_t, transformer can
access [t-5, t+6] (inclusive)
* e.g., (-1, 6) indicates that for query at x_t, transformer can
access [0, t+6] (inclusive)
"""
if transformer_context is None:
return None
if not isinstance(transformer_context, Iterable):
raise ValueError("transformer context must be Iterable if it is not None")
if len(transformer_context) != 2:
raise ValueError("transformer context must have length 2")
left_context = transformer_context[0]
if left_context < 0:
left_context = None
right_context = transformer_context[1]
if right_context < 0:
right_context = None
if left_context is None and right_context is None:
return None
return (left_context, right_context)
def parse_transformer_sampling(self, transformer_sampling, num_layers):
"""
parsing transformer sampling configuration
Args:
- transformer_sampling, accepted input:
* None, indicating no sampling
* an Iterable with int (>0) as element
- num_layers, expected number of transformer layers, must match with
the length of transformer_sampling if it is not None
Returns:
- A tuple with length num_layers
"""
if transformer_sampling is None:
return (1,) * num_layers
if not isinstance(transformer_sampling, Iterable):
raise ValueError(
"transformer_sampling must be an iterable if it is not None"
)
if len(transformer_sampling) != num_layers:
raise ValueError(
"transformer_sampling {} does not match with the number "
"of layers {}".format(transformer_sampling, num_layers)
)
for layer, value in enumerate(transformer_sampling):
if not isinstance(value, int):
raise ValueError("Invalid value in transformer_sampling: ")
if value < 1:
raise ValueError(
"{} layer's subsampling is {}.".format(layer, value)
+ " This is not allowed! "
)
return transformer_sampling
def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
"""
embedding is a (T, B, D) tensor
padding_mask is a (B, T) tensor or None
attn_mask is a (T, T) tensor or None
"""
embedding = embedding[::sampling_factor, :, :]
if padding_mask is not None:
padding_mask = padding_mask[:, ::sampling_factor]
if attn_mask is not None:
attn_mask = attn_mask[::sampling_factor, ::sampling_factor]
return embedding, padding_mask, attn_mask
def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
"""
create attention mask according to sequence lengths and transformer
context
Args:
- input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is
the length of b-th sequence
- subsampling_factor: int
* Note that the left_context and right_context is specified in
the input frame-level while input to transformer may already
go through subsampling (e.g., the use of striding in vggblock)
we use subsampling_factor to scale the left/right context
Return:
- a (T, T) binary tensor or None, where T is max(input_lengths)
* if self.transformer_context is None, None
* if left_context is None,
* attn_mask[t, t + right_context + 1:] = 1
* others = 0
* if right_context is None,
* attn_mask[t, 0:t - left_context] = 1
* others = 0
* elsif
* attn_mask[t, t - left_context: t + right_context + 1] = 0
* others = 1
"""
if self.transformer_context is None:
return None
maxT = torch.max(input_lengths).item()
attn_mask = torch.zeros(maxT, maxT)
left_context = self.transformer_context[0]
right_context = self.transformer_context[1]
if left_context is not None:
left_context = math.ceil(self.transformer_context[0] / subsampling_factor)
if right_context is not None:
right_context = math.ceil(self.transformer_context[1] / subsampling_factor)
for t in range(maxT):
if left_context is not None:
st = 0
en = max(st, t - left_context)
attn_mask[t, st:en] = 1
if right_context is not None:
st = t + right_context + 1
st = min(st, maxT - 1)
attn_mask[t, st:] = 1
return attn_mask.to(input_lengths.device)
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(
self,
dictionary,
embed_dim=512,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
conv_config=DEFAULT_DEC_CONV_CONFIG,
encoder_output_dim=512,
):
super().__init__(dictionary)
vocab_size = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx)
self.conv_layers = nn.ModuleList()
for i in range(len(conv_config)):
out_channels, kernel_size, layer_norm = conv_config[i]
if i == 0:
conv_layer = LinearizedConv1d(
embed_dim, out_channels, kernel_size, padding=kernel_size - 1
)
else:
conv_layer = LinearizedConv1d(
conv_config[i - 1][0],
out_channels,
kernel_size,
padding=kernel_size - 1,
)
self.conv_layers.append(conv_layer)
if layer_norm:
self.conv_layers.append(nn.LayerNorm(out_channels))
self.conv_layers.append(nn.ReLU())
self.layers = nn.ModuleList()
if conv_config[-1][0] != transformer_config[0][0]:
self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0]))
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[i])
)
)
self.fc_out = Linear(transformer_config[-1][0], vocab_size)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
target_padding_mask = (
(prev_output_tokens == self.padding_idx).to(prev_output_tokens.device)
if incremental_state is None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens
x = self.embed_tokens(prev_output_tokens)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
for layer in self.conv_layers:
if isinstance(layer, LinearizedConvolution):
x = layer(x, incremental_state)
else:
x = layer(x)
# B x T x C -> T x B x C
x = self._transpose_if_inference(x, incremental_state)
# decoder layers
for layer in self.layers:
if isinstance(layer, TransformerDecoderLayer):
x, *_ = layer(
x,
(encoder_out["encoder_out"] if encoder_out is not None else None),
(
encoder_out["encoder_padding_mask"].t()
if encoder_out["encoder_padding_mask"] is not None
else None
),
incremental_state,
self_attn_mask=(
self.buffered_future_mask(x)
if incremental_state is None
else None
),
self_attn_padding_mask=(
target_padding_mask if incremental_state is None else None
),
)
else:
x = layer(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.fc_out(x)
return x, None
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def _transpose_if_inference(self, x, incremental_state):
if incremental_state:
x = x.transpose(0, 1)
return x
@register_model("asr_vggtransformer_encoder")
class VGGTransformerEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock
[(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...]
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the Transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ]""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="encoder output dimension, projecting the LSTM output",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--transformer-context",
type=str,
metavar="EXPR",
help="""
either None or a tuple of two ints, indicating left/right context a
transformer can have access to""",
)
parser.add_argument(
"--transformer-sampling",
type=str,
metavar="EXPR",
help="""
either None or a tuple of ints, indicating sampling factor in each layer""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
base_architecture_enconly(args)
encoder = VGGTransformerEncoderOnly(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
transformer_context=eval(args.transformer_context),
transformer_sampling=eval(args.transformer_sampling),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (T, B, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (T, B, D) tensor
# we need to transoose to get (B, T, D) tensor
lprobs = lprobs.transpose(0, 1).contiguous()
lprobs.batch_first = True
return lprobs
class VGGTransformerEncoderOnly(VGGTransformerEncoder):
def __init__(
self,
vocab_size,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
super().__init__(
input_feat_per_channel=input_feat_per_channel,
vggblock_config=vggblock_config,
transformer_config=transformer_config,
encoder_output_dim=encoder_output_dim,
in_channels=in_channels,
transformer_context=transformer_context,
transformer_sampling=transformer_sampling,
)
self.fc_out = Linear(self.encoder_output_dim, vocab_size)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
enc_out = super().forward(src_tokens, src_lengths)
x = self.fc_out(enc_out["encoder_out"])
# x = F.log_softmax(x, dim=-1)
# Note: no need this line, because model.get_normalized_prob will call
# log_softmax
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B)
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
# nn.init.uniform_(m.weight, -0.1, 0.1)
# nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
# m.weight.data.uniform_(-0.1, 0.1)
# if bias:
# m.bias.data.uniform_(-0.1, 0.1)
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
# seq2seq models
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.transformer_dec_config = getattr(
args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG)
args.transformer_context = getattr(args, "transformer_context", "None")
@register_model_architecture("asr_vggtransformer", "vggtransformer_1")
def vggtransformer_1(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_2")
def vggtransformer_2(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_base")
def vggtransformer_base(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6"
)
# Size estimations:
# Encoder:
# - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K
# Transformer:
# - input dimension adapter: 2560 x 512 -> 1.31M
# - transformer_layers (x12) --> 37.74M
# * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M
# * FFN weight: 512*2048*2 = 2.097M
# - output dimension adapter: 512 x 512 -> 0.26 M
# Decoder:
# - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3
# - transformer_layer: (x6) --> 25.16M
# * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M
# * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M
# * FFN: 512*2048*2 = 2.097M
# Final FC:
# - FC: 512*5000 = 256K (assuming vocab size 5K)
# In total:
# ~65 M
# CTC models
def base_architecture_enconly(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.transformer_context = getattr(args, "transformer_context", "None")
args.transformer_sampling = getattr(args, "transformer_sampling", "None")
@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1")
def vggtransformer_enc_1(args):
# vggtransformer_1 is the same as vggtransformer_enc_big, except the number
# of layers is increased to 16
# keep it here for backward compatiablity purpose
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/models/vggtransformer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules.fairseq_dropout import FairseqDropout
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(
FairseqDropout(dropout, module_name=self.__class__.__name__)
)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = self.dropouts[layer_idx](x)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = self.dropouts[-1](x)
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/models/w2l_conv_glu_enc.py |
EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/new/__init__.py |
|
#!/usr/bin/env python -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import hashlib
import logging
import os
import shutil
import sys
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import editdistance
import torch
import torch.distributed as dist
from examples.speech_recognition.new.decoders.decoder_config import (
DecoderConfig,
FlashlightDecoderConfig,
)
from examples.speech_recognition.new.decoders.decoder import Decoder
from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
FairseqDataclass,
)
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.logging.progress_bar import BaseProgressBar
from fairseq.models.fairseq_model import FairseqModel
from omegaconf import OmegaConf
import hydra
from hydra.core.config_store import ConfigStore
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
config_path = Path(__file__).resolve().parent / "conf"
@dataclass
class DecodingConfig(DecoderConfig, FlashlightDecoderConfig):
unique_wer_file: bool = field(
default=False,
metadata={"help": "If set, use a unique file for storing WER"},
)
results_path: Optional[str] = field(
default=None,
metadata={
"help": "If set, write hypothesis and reference sentences into this directory"
},
)
@dataclass
class InferConfig(FairseqDataclass):
task: Any = None
decoding: DecodingConfig = DecodingConfig()
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
class InferenceProcessor:
cfg: InferConfig
def __init__(self, cfg: InferConfig) -> None:
self.cfg = cfg
self.task = tasks.setup_task(cfg.task)
models, saved_cfg = self.load_model_ensemble()
self.models = models
self.saved_cfg = saved_cfg
self.tgt_dict = self.task.target_dictionary
self.task.load_dataset(
self.cfg.dataset.gen_subset,
task_cfg=saved_cfg.task,
)
self.generator = Decoder(cfg.decoding, self.tgt_dict)
self.gen_timer = StopwatchMeter()
self.wps_meter = TimeMeter()
self.num_sentences = 0
self.total_errors = 0
self.total_length = 0
self.hypo_words_file = None
self.hypo_units_file = None
self.ref_words_file = None
self.ref_units_file = None
self.progress_bar = self.build_progress_bar()
def __enter__(self) -> "InferenceProcessor":
if self.cfg.decoding.results_path is not None:
self.hypo_words_file = self.get_res_file("hypo.word")
self.hypo_units_file = self.get_res_file("hypo.units")
self.ref_words_file = self.get_res_file("ref.word")
self.ref_units_file = self.get_res_file("ref.units")
return self
def __exit__(self, *exc) -> bool:
if self.cfg.decoding.results_path is not None:
self.hypo_words_file.close()
self.hypo_units_file.close()
self.ref_words_file.close()
self.ref_units_file.close()
return False
def __iter__(self) -> Any:
for sample in self.progress_bar:
if not self.cfg.common.cpu:
sample = utils.move_to_cuda(sample)
# Happens on the last batch.
if "net_input" not in sample:
continue
yield sample
def log(self, *args, **kwargs):
self.progress_bar.log(*args, **kwargs)
def print(self, *args, **kwargs):
self.progress_bar.print(*args, **kwargs)
def get_res_file(self, fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
if self.data_parallel_world_size > 1:
fname = f"{fname}.{self.data_parallel_rank}"
return open(fname, "w", buffering=1)
def merge_shards(self) -> None:
"""Merges all shard files into shard 0, then removes shard suffix."""
shard_id = self.data_parallel_rank
num_shards = self.data_parallel_world_size
if self.data_parallel_world_size > 1:
def merge_shards_with_root(fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
logger.info("Merging %s on shard %d", fname, shard_id)
base_fpath = Path(f"{fname}.0")
with open(base_fpath, "a") as out_file:
for s in range(1, num_shards):
shard_fpath = Path(f"{fname}.{s}")
with open(shard_fpath, "r") as in_file:
for line in in_file:
out_file.write(line)
shard_fpath.unlink()
shutil.move(f"{fname}.0", fname)
dist.barrier() # ensure all shards finished writing
if shard_id == (0 % num_shards):
merge_shards_with_root("hypo.word")
if shard_id == (1 % num_shards):
merge_shards_with_root("hypo.units")
if shard_id == (2 % num_shards):
merge_shards_with_root("ref.word")
if shard_id == (3 % num_shards):
merge_shards_with_root("ref.units")
dist.barrier()
def optimize_model(self, model: FairseqModel) -> None:
model.make_generation_fast_()
if self.cfg.common.fp16:
model.half()
if not self.cfg.common.cpu:
model.cuda()
def load_model_ensemble(self) -> Tuple[List[FairseqModel], FairseqDataclass]:
arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides)
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(self.cfg.common_eval.path, separator="\\"),
arg_overrides=arg_overrides,
task=self.task,
suffix=self.cfg.checkpoint.checkpoint_suffix,
strict=(self.cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=self.cfg.checkpoint.checkpoint_shard_count,
)
for model in models:
self.optimize_model(model)
return models, saved_cfg
def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None:
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.gen_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
).next_epoch_itr(shuffle=False)
def build_progress_bar(
self,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default_log_format: str = "tqdm",
) -> BaseProgressBar:
return progress_bar.progress_bar(
iterator=self.get_dataset_itr(),
log_format=self.cfg.common.log_format,
log_interval=self.cfg.common.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=self.cfg.common.tensorboard_logdir,
default_log_format=default_log_format,
)
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
def process_sentence(
self,
sample: Dict[str, Any],
hypo: Dict[str, Any],
sid: int,
batch_id: int,
) -> Tuple[int, int]:
speaker = None # Speaker can't be parsed from dataset.
if "target_label" in sample:
toks = sample["target_label"]
else:
toks = sample["target"]
toks = toks[batch_id, :]
# Processes hypothesis.
hyp_pieces = self.tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, self.cfg.common_eval.post_process)
# Processes target.
target_tokens = utils.strip_pad(toks, self.tgt_dict.pad())
tgt_pieces = self.tgt_dict.string(target_tokens.int().cpu())
tgt_words = post_process(tgt_pieces, self.cfg.common_eval.post_process)
if self.cfg.decoding.results_path is not None:
print(f"{hyp_pieces} ({speaker}-{sid})", file=self.hypo_units_file)
print(f"{hyp_words} ({speaker}-{sid})", file=self.hypo_words_file)
print(f"{tgt_pieces} ({speaker}-{sid})", file=self.ref_units_file)
print(f"{tgt_words} ({speaker}-{sid})", file=self.ref_words_file)
if not self.cfg.common_eval.quiet:
logger.info(f"HYPO: {hyp_words}")
logger.info(f"REF: {tgt_words}")
logger.info("---------------------")
hyp_words, tgt_words = hyp_words.split(), tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def process_sample(self, sample: Dict[str, Any]) -> None:
self.gen_timer.start()
hypos = self.task.inference_step(
generator=self.generator,
models=self.models,
sample=sample,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
self.gen_timer.stop(num_generated_tokens)
self.wps_meter.update(num_generated_tokens)
for batch_id, sample_id in enumerate(sample["id"].tolist()):
errs, length = self.process_sentence(
sample=sample,
sid=sample_id,
batch_id=batch_id,
hypo=hypos[batch_id][0],
)
self.total_errors += errs
self.total_length += length
self.log({"wps": round(self.wps_meter.avg)})
if "nsentences" in sample:
self.num_sentences += sample["nsentences"]
else:
self.num_sentences += sample["id"].numel()
def log_generation_time(self) -> None:
logger.info(
"Processed %d sentences (%d tokens) in %.1fs %.2f "
"sentences per second, %.2f tokens per second)",
self.num_sentences,
self.gen_timer.n,
self.gen_timer.sum,
self.num_sentences / (self.gen_timer.sum + 1e-6),
1.0 / (self.gen_timer.avg + 1e-6),
)
def parse_wer(wer_file: Path) -> float:
with open(wer_file, "r") as f:
return float(f.readline().strip().split(" ")[1])
def get_wer_file(cfg: InferConfig) -> Path:
"""Hashes the decoding parameters to a unique file ID."""
base_path = "wer"
if cfg.decoding.results_path is not None:
base_path = os.path.join(cfg.decoding.results_path, base_path)
if cfg.decoding.unique_wer_file:
yaml_str = OmegaConf.to_yaml(cfg.decoding)
fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16)
return Path(f"{base_path}.{fid % 1000000}")
else:
return Path(base_path)
def main(cfg: InferConfig) -> float:
"""Entry point for main processing logic.
Args:
cfg: The inferance configuration to use.
wer: Optional shared memory pointer for returning the WER. If not None,
the final WER value will be written here instead of being returned.
Returns:
The final WER if `wer` is None, otherwise None.
"""
yaml_str, wer_file = OmegaConf.to_yaml(cfg.decoding), get_wer_file(cfg)
# Validates the provided configuration.
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 4000000
if not cfg.common.cpu and not torch.cuda.is_available():
raise ValueError("CUDA not found; set `cpu=True` to run without CUDA")
logger.info(cfg.common_eval.path)
with InferenceProcessor(cfg) as processor:
for sample in processor:
processor.process_sample(sample)
processor.log_generation_time()
if cfg.decoding.results_path is not None:
processor.merge_shards()
errs_t, leng_t = processor.total_errors, processor.total_length
if cfg.common.cpu:
logger.warning("Merging WER requires CUDA.")
elif processor.data_parallel_world_size > 1:
stats = torch.LongTensor([errs_t, leng_t]).cuda()
dist.all_reduce(stats, op=dist.ReduceOp.SUM)
errs_t, leng_t = stats[0].item(), stats[1].item()
wer = errs_t * 100.0 / leng_t
if distributed_utils.is_master(cfg.distributed_training):
with open(wer_file, "w") as f:
f.write(
(
f"WER: {wer}\n"
f"err / num_ref_words = {errs_t} / {leng_t}\n\n"
f"{yaml_str}"
)
)
return wer
@hydra.main(config_path=config_path, config_name="infer")
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
utils.import_user_module(cfg.common)
# logger.info("Config:\n%s", OmegaConf.to_yaml(cfg))
wer = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
wer = parse_wer(get_wer_file(cfg))
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
logger.info("Word error rate: %.4f", wer)
if cfg.is_ax:
return wer, None
return wer
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/new/infer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Union
from fairseq.data.dictionary import Dictionary
from .decoder_config import DecoderConfig, FlashlightDecoderConfig
from .base_decoder import BaseDecoder
def Decoder(
cfg: Union[DecoderConfig, FlashlightDecoderConfig], tgt_dict: Dictionary
) -> BaseDecoder:
if cfg.type == "viterbi":
from .viterbi_decoder import ViterbiDecoder
return ViterbiDecoder(tgt_dict)
if cfg.type == "kenlm":
from .flashlight_decoder import KenLMDecoder
return KenLMDecoder(cfg, tgt_dict)
if cfg.type == "fairseqlm":
from .flashlight_decoder import FairseqLMDecoder
return FairseqLMDecoder(cfg, tgt_dict)
raise NotImplementedError(f"Invalid decoder name: {cfg.name}")
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/new/decoders/decoder.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from typing import List, Dict
from .base_decoder import BaseDecoder
class ViterbiDecoder(BaseDecoder):
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
def get_pred(e):
toks = e.argmax(dim=-1).unique_consecutive()
return toks[toks != self.blank]
return [[{"tokens": get_pred(x), "score": 0}] for x in emissions]
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/new/decoders/viterbi_decoder.py |
EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/new/decoders/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools as it
from typing import Any, Dict, List
import torch
from fairseq.data.dictionary import Dictionary
from fairseq.models.fairseq_model import FairseqModel
class BaseDecoder:
def __init__(self, tgt_dict: Dictionary) -> None:
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
def generate(
self, models: List[FairseqModel], sample: Dict[str, Any], **unused
) -> List[List[Dict[str, torch.LongTensor]]]:
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(
self,
models: List[FairseqModel],
encoder_input: Dict[str, Any],
) -> torch.FloatTensor:
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out)
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs: torch.IntTensor) -> torch.LongTensor:
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
raise NotImplementedError
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/new/decoders/base_decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import Optional
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.dataclass.constants import ChoiceEnum
from omegaconf import MISSING
DECODER_CHOICES = ChoiceEnum(["viterbi", "kenlm", "fairseqlm"])
@dataclass
class DecoderConfig(FairseqDataclass):
type: DECODER_CHOICES = field(
default="viterbi",
metadata={"help": "The type of decoder to use"},
)
@dataclass
class FlashlightDecoderConfig(FairseqDataclass):
nbest: int = field(
default=1,
metadata={"help": "Number of decodings to return"},
)
unitlm: bool = field(
default=False,
metadata={"help": "If set, use unit language model"},
)
lmpath: str = field(
default=MISSING,
metadata={"help": "Language model for KenLM decoder"},
)
lexicon: Optional[str] = field(
default=None,
metadata={"help": "Lexicon for Flashlight decoder"},
)
beam: int = field(
default=50,
metadata={"help": "Number of beams to use for decoding"},
)
beamthreshold: float = field(
default=50.0,
metadata={"help": "Threshold for beam search decoding"},
)
beamsizetoken: Optional[int] = field(
default=None, metadata={"help": "Beam size to use"}
)
wordscore: float = field(
default=-1,
metadata={"help": "Word score for KenLM decoder"},
)
unkweight: float = field(
default=-math.inf,
metadata={"help": "Unknown weight for KenLM decoder"},
)
silweight: float = field(
default=0,
metadata={"help": "Silence weight for KenLM decoder"},
)
lmweight: float = field(
default=2,
metadata={"help": "Weight for LM while interpolating score"},
)
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/new/decoders/decoder_config.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import os.path as osp
import warnings
from collections import deque, namedtuple
from typing import Any, Dict, Tuple
import numpy as np
import torch
from fairseq import tasks
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models.fairseq_model import FairseqModel
from fairseq.utils import apply_to_sample
from omegaconf import open_dict, OmegaConf
from typing import List
from .decoder_config import FlashlightDecoderConfig
from .base_decoder import BaseDecoder
try:
from flashlight.lib.text.decoder import (
LM,
CriterionType,
DecodeResult,
KenLM,
LexiconDecoder,
LexiconDecoderOptions,
LexiconFreeDecoder,
LexiconFreeDecoderOptions,
LMState,
SmearingMode,
Trie,
)
from flashlight.lib.text.dictionary import create_word_dict, load_words
except ImportError:
warnings.warn(
"flashlight python bindings are required to use this functionality. "
"Please install from "
"https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM = object
LMState = object
class KenLMDecoder(BaseDecoder):
def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None:
super().__init__(tgt_dict)
self.nbest = cfg.nbest
self.unitlm = cfg.unitlm
if cfg.lexicon:
self.lexicon = load_words(cfg.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for word, spellings in self.lexicon.items():
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{word} {spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
word_score=cfg.wordscore,
unk_score=cfg.unkweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unitlm,
)
else:
assert self.unitlm, "Lexicon-free decoding requires unit LM"
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
"""Returns frame numbers corresponding to every non-blank token.
Parameters
----------
token_idxs : List[int]
IDs of decoded tokens.
Returns
-------
List[int]
Frame numbers corresponding to every non-blank token.
"""
timesteps = []
for i, token_idx in enumerate(token_idxs):
if token_idx == self.blank:
continue
if i == 0 or token_idx != token_idxs[i-1]:
timesteps.append(i)
return timesteps
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"timesteps": self.get_timesteps(result.tokens),
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple(
"FairseqLMState",
[
"prefix",
"incremental_state",
"probs",
],
)
class FairseqLM(LM):
def __init__(self, dictionary: Dictionary, model: FairseqModel) -> None:
super().__init__()
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
if torch.cuda.is_available():
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing: bool) -> LMState:
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(
self,
state: LMState,
token_index: int,
no_cache: bool = False,
) -> Tuple[LMState, int]:
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size: int) -> None:
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState) -> Tuple[LMState, int]:
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self) -> None:
self.states = {}
self.stateq = deque()
gc.collect()
class FairseqLMDecoder(BaseDecoder):
def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None:
super().__init__(tgt_dict)
self.nbest = cfg.nbest
self.unitlm = cfg.unitlm
self.lexicon = load_words(cfg.lexicon) if cfg.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(cfg.lmpath, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
if not OmegaConf.is_dict(lm_args):
lm_args = OmegaConf.create(lm_args)
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(cfg.lmpath)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unitlm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
word_score=cfg.wordscore,
unk_score=cfg.unkweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unitlm,
)
else:
assert self.unitlm, "Lexicon-free decoding requires unit LM"
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
B, T, N = emissions.size()
hypos = []
def make_hypo(result: DecodeResult) -> Dict[str, Any]:
hypo = {
"tokens": self.get_tokens(result.tokens),
"score": result.score,
}
if self.lexicon:
hypo["words"] = [
self.idx_to_wrd[x] if self.unitlm else self.word_dict[x]
for x in result.words
if x >= 0
]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/new/decoders/flashlight_decoder.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Replabel transforms for use with flashlight's ASG criterion.
"""
def replabel_symbol(i):
"""
Replabel symbols used in flashlight, currently just "1", "2", ...
This prevents training with numeral tokens, so this might change in the future
"""
return str(i)
def pack_replabels(tokens, dictionary, max_reps):
"""
Pack a token sequence so that repeated symbols are replaced by replabels
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_value_to_idx = [0] * (max_reps + 1)
for i in range(1, max_reps + 1):
replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i))
result = []
prev_token = -1
num_reps = 0
for token in tokens:
if token == prev_token and num_reps < max_reps:
num_reps += 1
else:
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
num_reps = 0
result.append(token)
prev_token = token
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
return result
def unpack_replabels(tokens, dictionary, max_reps):
"""
Unpack a token sequence so that replabels are replaced by repeated symbols
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_idx_to_value = {}
for i in range(1, max_reps + 1):
replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i
result = []
prev_token = -1
for token in tokens:
try:
for _ in range(replabel_idx_to_value[token]):
result.append(prev_token)
prev_token = -1
except KeyError:
result.append(token)
prev_token = token
return result
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/data/replabels.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .asr_dataset import AsrDataset
__all__ = [
"AsrDataset",
]
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module contains collection of classes which implement
collate functionalities for various tasks.
Collaters should know what data to expect for each sample
and they should pack / collate them into batches
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
class Seq2SeqCollater(object):
"""
Implements collate function mainly for seq2seq tasks
This expects each sample to contain feature (src_tokens) and
targets.
This collator is also used for aligned training task.
"""
def __init__(
self,
feature_index=0,
label_index=1,
pad_index=1,
eos_index=2,
move_eos_to_beginning=True,
):
self.feature_index = feature_index
self.label_index = label_index
self.pad_index = pad_index
self.eos_index = eos_index
self.move_eos_to_beginning = move_eos_to_beginning
def _collate_frames(self, frames):
"""Convert a list of 2d frames into a padded 3d tensor
Args:
frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
len_max = max(frame.size(0) for frame in frames)
f_dim = frames[0].size(1)
res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)
for i, v in enumerate(frames):
res[i, : v.size(0)] = v
return res
def collate(self, samples):
"""
utility function to collate samples into batch for speech recognition.
"""
if len(samples) == 0:
return {}
# parse samples into torch tensors
parsed_samples = []
for s in samples:
# skip invalid samples
if s["data"][self.feature_index] is None:
continue
source = s["data"][self.feature_index]
if isinstance(source, (np.ndarray, np.generic)):
source = torch.from_numpy(source)
target = s["data"][self.label_index]
if isinstance(target, (np.ndarray, np.generic)):
target = torch.from_numpy(target).long()
elif isinstance(target, list):
target = torch.LongTensor(target)
parsed_sample = {"id": s["id"], "source": source, "target": target}
parsed_samples.append(parsed_sample)
samples = parsed_samples
id = torch.LongTensor([s["id"] for s in samples])
frames = self._collate_frames([s["source"] for s in samples])
# sort samples by descending number of frames
frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples])
frames_lengths, sort_order = frames_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
frames = frames.index_select(0, sort_order)
target = None
target_lengths = None
prev_output_tokens = None
if samples[0].get("target", None) is not None:
ntokens = sum(len(s["target"]) for s in samples)
target = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, sort_order)
target_lengths = torch.LongTensor(
[s["target"].size(0) for s in samples]
).index_select(0, sort_order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=self.move_eos_to_beginning,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {"src_tokens": frames, "src_lengths": frames_lengths},
"target": target,
"target_lengths": target_lengths,
"nsentences": len(samples),
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/data/collaters.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def calc_mean_invstddev(feature):
if len(feature.size()) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = feature.mean(0)
var = feature.var(0)
# avoid division by ~zero
eps = 1e-8
if (var < eps).any():
return mean, 1.0 / (torch.sqrt(var) + eps)
return mean, 1.0 / torch.sqrt(var)
def apply_mv_norm(features):
# If there is less than 2 spectrograms, the variance cannot be computed (is NaN)
# and normalization is not possible, so return the item as it is
if features.size(0) < 2:
return features
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def lengths_to_encoder_padding_mask(lengths, batch_first=False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = 0 for t < lengths[b] and 1 otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) >= lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
def encoder_padding_mask_to_lengths(
encoder_padding_mask, max_lengths, batch_size, device
):
"""
convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor
Conventionally, encoder output contains a encoder_padding_mask, which is
a 2-D mask in a shape (T, B), whose (t, b) element indicate whether
encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we
need to convert this mask tensor to a 1-D tensor in shape (B, ), where
[b] denotes the valid length of b-th sequence
Args:
encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,
indicating all are valid
Return:
seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the
number of valid elements of b-th sequence
max_lengths: maximum length of all sequence, if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(0)
batch_size: batch size; if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(1)
device: which device to put the result on
"""
if encoder_padding_mask is None:
return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device)
assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match"
assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match"
return max_lengths - torch.sum(encoder_padding_mask, dim=0)
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/data/data_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from fairseq.data import FairseqDataset
from . import data_utils
from .collaters import Seq2SeqCollater
class AsrDataset(FairseqDataset):
"""
A dataset representing speech and corresponding transcription.
Args:
aud_paths: (List[str]): A list of str with paths to audio files.
aud_durations_ms (List[int]): A list of int containing the durations of
audio files.
tgt (List[torch.LongTensor]): A list of LongTensors containing the indices
of target transcriptions.
tgt_dict (~fairseq.data.Dictionary): target vocabulary.
ids (List[str]): A list of utterance IDs.
speakers (List[str]): A list of speakers corresponding to utterances.
num_mel_bins (int): Number of triangular mel-frequency bins (default: 80)
frame_length (float): Frame length in milliseconds (default: 25.0)
frame_shift (float): Frame shift in milliseconds (default: 10.0)
"""
def __init__(
self,
aud_paths,
aud_durations_ms,
tgt,
tgt_dict,
ids,
speakers,
num_mel_bins=80,
frame_length=25.0,
frame_shift=10.0,
):
assert frame_length > 0
assert frame_shift > 0
assert all(x > frame_length for x in aud_durations_ms)
self.frame_sizes = [
int(1 + (d - frame_length) / frame_shift) for d in aud_durations_ms
]
assert len(aud_paths) > 0
assert len(aud_paths) == len(aud_durations_ms)
assert len(aud_paths) == len(tgt)
assert len(aud_paths) == len(ids)
assert len(aud_paths) == len(speakers)
self.aud_paths = aud_paths
self.tgt_dict = tgt_dict
self.tgt = tgt
self.ids = ids
self.speakers = speakers
self.num_mel_bins = num_mel_bins
self.frame_length = frame_length
self.frame_shift = frame_shift
self.s2s_collater = Seq2SeqCollater(
0,
1,
pad_index=self.tgt_dict.pad(),
eos_index=self.tgt_dict.eos(),
move_eos_to_beginning=True,
)
def __getitem__(self, index):
import torchaudio
import torchaudio.compliance.kaldi as kaldi
tgt_item = self.tgt[index] if self.tgt is not None else None
path = self.aud_paths[index]
if not os.path.exists(path):
raise FileNotFoundError("Audio file not found: {}".format(path))
sound, sample_rate = torchaudio.load_wav(path)
output = kaldi.fbank(
sound,
num_mel_bins=self.num_mel_bins,
frame_length=self.frame_length,
frame_shift=self.frame_shift,
)
output_cmvn = data_utils.apply_mv_norm(output)
return {"id": index, "data": [output_cmvn.detach(), tgt_item]}
def __len__(self):
return len(self.aud_paths)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[int]): sample indices to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
return self.s2s_collater.collate(samples)
def num_tokens(self, index):
return self.frame_sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.frame_sizes[index],
len(self.tgt[index]) if self.tgt is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self))
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/data/asr_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/criterions/cross_entropy_acc.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from examples.speech_recognition.data.replabels import pack_replabels
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("asg_loss")
class ASGCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(
self,
task,
silence_token,
asg_transitions_init,
max_replabel,
linseg_updates,
hide_linseg_messages,
):
from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode
super().__init__(task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(silence_token)
if silence_token in self.tgt_dict
else None
)
self.max_replabel = max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = linseg_updates
self.linseg_message_state = "none" if hide_linseg_messages else "start"
@classmethod
def build_criterion(cls, args, task):
return cls(
task,
args.silence_token,
args.asg_transitions_init,
args.max_replabel,
args.linseg_updates,
args.hide_linseg_messages,
)
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/criterions/ASG_loss.py |
import importlib
import os
# ASG loss requires flashlight bindings
files_to_skip = set()
try:
import flashlight.lib.sequence.criterion
except ImportError:
files_to_skip.add("ASG_loss.py")
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip:
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_recognition.criterions." + criterion_name
)
| EXA-1-master | exa/libraries/fairseq/examples/speech_recognition/criterions/__init__.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from sacremoses.normalize import MosesPunctNormalizer
def main(args):
normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn)
for line in sys.stdin:
print(normalizer.normalize(line.rstrip()), flush=True)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--lang", "-l", default="en")
parser.add_argument("--penn", "-p", action="store_true")
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/constrained_decoding/normalize.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import sacremoses
def main(args):
"""Tokenizes, preserving tabs"""
mt = sacremoses.MosesTokenizer(lang=args.lang)
def tok(s):
return mt.tokenize(s, return_str=True)
for line in sys.stdin:
parts = list(map(tok, line.split("\t")))
print(*parts, sep="\t", flush=True)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--lang", "-l", default="en")
parser.add_argument("--penn", "-p", action="store_true")
parser.add_argument("--fields", "-f", help="fields to tokenize")
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/constrained_decoding/tok.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from .adaptive_span_model import TransformerSeq as AdaptiveSpanTransformerModel
logger = logging.getLogger(__name__)
@dataclass
class AdaptiveSpanSmallConfig(FairseqDataclass):
# defaults come from https://github.com/facebookresearch/adaptive-span/blob/master/experiments/enwik8_small.sh
vocab_size: int = 50
d_model: int = 256
n_head: int = 4
d_inner: int = 1024
n_layer: int = 8
attn_span: int = 1024
dropout: float = 0.0
emb_dropout: float = 0.0
adapt_span_ramp: int = 32
adapt_span_init: float = 0.0
aux_loss_scaler: float = 0.000002
adapt_span_layer: bool = False
@register_model("adaptive_span", dataclass=AdaptiveSpanSmallConfig)
class AdaptiveSpanTransformer(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: AdaptiveSpanSmallConfig, task):
return cls(AdaptiveSpanDecoder(cfg, task))
def get_aux_loss(self):
return self.decoder.get_aux_loss()
def get_current_max_span(self):
return self.decoder.get_current_max_span()
def get_current_avg_span(self):
return self.decoder.get_current_avg_span()
class AdaptiveSpanDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
super().__init__(task.target_dictionary)
self.config = cfg
config = AdaptiveSpanSmallConfig(
vocab_size=len(task.target_dictionary),
d_model=cfg.d_model,
n_head=cfg.n_head,
d_inner=cfg.d_inner,
n_layer=cfg.n_layer,
attn_span=cfg.attn_span,
dropout=cfg.dropout,
emb_dropout=cfg.emb_dropout,
adapt_span_ramp=cfg.adapt_span_ramp,
adapt_span_init=cfg.adapt_span_init,
aux_loss_scaler=cfg.aux_loss_scaler,
adapt_span_layer=cfg.adapt_span_layer,
)
logger.info(config)
self.model = AdaptiveSpanTransformerModel(**config.__dict__)
self._mems = None
def forward(
self,
src_tokens,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
bsz = src_tokens.size(0)
if incremental_state is not None: # used during inference
mems = self.get_incremental_state("mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
if mems is None:
# first time init
mems = self.init_hid_cache(bsz)
output = self.model(x=src_tokens, h_cache=mems,)
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.config.attn_span
def init_hid_cache(self, batch_sz):
hid = []
for layer in self.model.layers:
param = next(self.model.parameters())
h = torch.zeros(
batch_sz,
layer.get_cache_size(),
self.config.d_model,
dtype=param.dtype,
device=param.device,
)
hid.append(h)
return hid
def get_aux_loss(self):
return self.model.get_aux_loss()
def get_current_max_span(self):
return self.model.get_current_max_span()
def get_current_avg_span(self):
return self.model.get_current_avg_span()
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
raise NotImplementedError("This is required for generation/beam search")
# mems = self.get_incremental_state(incremental_state, "mems")
# if mems is not None:
# new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
# self.set_incremental_state(incremental_state, "mems", new_mems)
| EXA-1-master | exa/libraries/fairseq/examples/adaptive_span/adaptive_span_model_wrapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules.layer_norm import LayerNorm
from .adaptive_span_attention import AdaptiveSpan
# Size notations:
# B = batch_size, H = d_model, M = block_size, L = attn_span
def _skew(X, pad_value):
"""shift every row 1 step to right"""
# X = B x M x L
B, M, L = X.size()
X = F.pad(X, (0, M + 1), value=pad_value) # B x M x (L+M+1)
X = X.view(B, -1) # B x ML+MM+M
X = X[:, :-M] # B x ML+MM
X = X.view(B, M, M + L) # B x M x L+M
return X
def _unskew(X):
"""reverse _skew operation"""
# X = B x M x L+M
B, M, L = X.size()
L -= M
X = X.view(B, -1) # B x ML+MM
X = F.pad(X, (0, M)) # B x ML+MM+M
X = X.view(B, M, M + L + 1) # B x M x L+M+1
X = X[:, :, :L] # B x M x L
return X
class SeqAttention(nn.Module):
"""Sequential self-attention layer.
Each token will attend to its previous fixed number of steps.
Note that attention doesn't include the current step itself.
"""
def __init__(self, d_model, n_head, attn_span, dropout, adapt_span_layer, **kargs):
nn.Module.__init__(self)
self.dropout = nn.Dropout(dropout)
self.d_model = d_model # size of a single head
self.attn_span = attn_span
self.adaptive_span = AdaptiveSpan(
attn_span=attn_span,
n_head=n_head,
adapt_span_layer=adapt_span_layer,
**kargs
)
def forward(self, query, key, value, key_pe):
# query size = B x M x H
# key, value sizes = B x (M+L) x H
key, value, key_pe = self.adaptive_span.trim_memory(query, key, value, key_pe)
# compute attention from context
# B x M (dest) x (M+L) (src)
attn_cont = torch.matmul(query, key.transpose(-1, -2))
attn_cont = _unskew(attn_cont) # B x M x L
# compute the effect of position embedding
attn_pos = torch.matmul(query, key_pe) # B x M x L_pos
attn = attn_cont + attn_pos
attn = attn / math.sqrt(self.d_model) # B x M X L_pos
attn = F.softmax(attn.float(), dim=-1).type_as(attn)
# trim attention lengths according to the learned span
attn = self.adaptive_span(attn)
attn = self.dropout(attn) # B x M X L_pos
attn_cont = _skew(attn, 0) # B x M X (L+M)
out = torch.matmul(attn_cont, value) # B x M x H
return out
def get_cache_size(self):
return self.adaptive_span.get_cache_size()
class MultiHeadSeqAttention(nn.Module):
def __init__(self, d_model, n_head, **kargs):
nn.Module.__init__(self)
assert d_model % n_head == 0
self.n_head = n_head
self.head_dim = d_model // n_head
self.attn = SeqAttention(d_model=self.head_dim, n_head=n_head, **kargs)
self.proj_query = nn.Linear(d_model, d_model, bias=False)
nn.init.xavier_normal_(self.proj_query.weight)
self.proj_out = nn.Linear(d_model, d_model, bias=False)
nn.init.xavier_normal_(self.proj_out.weight)
self.proj_val = nn.Linear(d_model, d_model, bias=False)
nn.init.xavier_normal_(self.proj_val.weight)
self.proj_key = nn.Linear(d_model, d_model, bias=False)
nn.init.xavier_normal_(self.proj_key.weight)
def head_reshape(self, x):
K = self.n_head
D = self.head_dim
x = x.view(x.size()[:-1] + (K, D)) # B x (M+L) x K x D
x = x.transpose(1, 2).contiguous() # B x K x (M+L) x D
x = x.view(-1, x.size(-2), x.size(-1)) # B_K x (M+L) x D
return x
def forward(self, query, key, value, key_pe):
B = query.size(0)
K = self.n_head
D = self.head_dim
M = query.size(1)
query = self.proj_query(query)
query = self.head_reshape(query)
value = self.proj_val(value)
value = self.head_reshape(value)
key = self.proj_key(key)
key = self.head_reshape(key)
out = self.attn(query, key, value, key_pe) # B_K x M x D
out = out.view(B, K, M, D) # B x K x M x D
out = out.transpose(1, 2).contiguous() # B x M x K x D
out = out.view(B, M, -1) # B x M x K_D
out = self.proj_out(out)
return out
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_inner, dropout, **kargs):
nn.Module.__init__(self)
self.fc1 = nn.Linear(d_model, d_inner)
self.fc2 = nn.Linear(d_inner, d_model)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, h):
h1 = F.relu(self.fc1(h))
h1 = self.dropout(h1)
h2 = self.fc2(h1)
return h2
class TransformerSeqLayer(nn.Module):
def __init__(self, d_model, **kargs):
nn.Module.__init__(self)
self.attn = MultiHeadSeqAttention(d_model=d_model, **kargs)
self.norm1 = LayerNorm(d_model)
self.ff = FeedForwardLayer(d_model=d_model, **kargs)
self.norm2 = LayerNorm(d_model)
def forward(self, h, h_cache, key_pe):
# h = B x M x H
# h_cache = B x L x H
h_all = torch.cat([h_cache, h], dim=1) # B x (M+L) x H
attn_out = self.attn(h, h_all, h_all, key_pe)
h = self.norm1(h + attn_out) # B x M x H
if self.ff is not None:
ff_out = self.ff(h)
out = self.norm2(h + ff_out) # B x M x H
else:
out = h
return out
def get_cache_size(self):
return self.attn.attn.get_cache_size()
class TransformerSeq(nn.Module):
def __init__(
self,
vocab_size,
d_model,
n_head,
n_layer,
attn_span,
emb_dropout,
aux_loss_scaler,
adapt_span_layer,
**kargs
):
nn.Module.__init__(self)
# token embeddings
self.in_emb = nn.Embedding(vocab_size, d_model)
nn.init.normal_(self.in_emb.weight, mean=0, std=d_model ** -0.5)
self.out_emb = nn.Linear(d_model, vocab_size)
self.aux_loss_scaler = aux_loss_scaler
if emb_dropout > 0:
self.emb_dropout = nn.Dropout(emb_dropout)
else:
self.emb_dropout = None
# position embeddings
self.key_pe = nn.Parameter(torch.randn(1, d_model // n_head, attn_span))
self.layers = nn.ModuleList()
self.layers.extend(
TransformerSeqLayer(
d_model=d_model,
n_head=n_head,
attn_span=attn_span,
adapt_span_layer=adapt_span_layer,
**kargs
)
for _ in range(n_layer)
)
def forward(self, x, h_cache, target=None):
# x size = B x M
block_size = x.size(1)
h = self.in_emb(x) # B x M x H
if self.emb_dropout is not None:
h = self.emb_dropout(h)
h_cache_next = []
for l, layer in enumerate(self.layers):
cache_size = layer.attn.attn.get_cache_size()
if cache_size > block_size:
h_cache_next_l = torch.cat(
[h_cache[l][:, -cache_size + block_size :, :], h], dim=1
).detach()
else:
h_cache_next_l = h[:, -cache_size:, :].detach()
h_cache_next.append(h_cache_next_l)
h = layer(h, h_cache[l], self.key_pe) # B x M x H
if self.emb_dropout is not None:
h = self.emb_dropout(h)
out = F.log_softmax(self.out_emb(h).float(), dim=-1).type_as(h)
dummy_loss = None
return out, h_cache_next, dummy_loss
def get_aux_loss(self):
loss = 0.0
for layer in self.layers:
loss += layer.attn.attn.adaptive_span.get_loss()
return self.aux_loss_scaler * loss
def get_current_max_span(self):
max_span = 0.0
for layer in self.layers:
max_span = max(
max_span, layer.attn.attn.adaptive_span.get_current_max_span()
)
return max_span
def get_current_avg_span(self):
avg_span = 0.0
for layer in self.layers:
avg_span += layer.attn.attn.adaptive_span.get_current_avg_span()
return avg_span / len(self.layers)
| EXA-1-master | exa/libraries/fairseq/examples/adaptive_span/adaptive_span_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the current directory
cur_dir = os.path.dirname(__file__)
for file in os.listdir(cur_dir):
path = os.path.join(cur_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
mod_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module(__name__ + "." + mod_name)
| EXA-1-master | exa/libraries/fairseq/examples/adaptive_span/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import torch.nn.functional as F
from fairseq import utils
from fairseq.logging import metrics
from fairseq.criterions import register_criterion
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class AdaptiveSpanCriterionConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
@register_criterion("adaptive_span_loss", dataclass=AdaptiveSpanCriterionConfig)
class AdaptiveSpanCriterion(CrossEntropyCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task, sentence_avg)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss here is summed, different from the adaptive span code
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, aux_loss, avg_span, max_span = self.compute_loss(
model, net_output, sample, reduce=reduce
)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
loss /= sample_size
total_loss = loss + aux_loss
sample_size = 1
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"total_loss": total_loss.data,
"avg_span": avg_span * sample_size,
"max_span": max_span * sample_size,
}
return total_loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
loss, _ = super().compute_loss(model, net_output, sample, reduce)
aux_loss = model.get_aux_loss()
avg_span = model.get_current_avg_span()
max_span = model.get_current_max_span()
return loss, aux_loss, avg_span, max_span
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
total_loss_sum = sum(log.get("total_loss", 0) for log in logging_outputs)
avg_span_sum = sum(log.get("avg_span", 0) for log in logging_outputs)
max_span_sum = sum(log.get("max_span", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("avg_span", avg_span_sum / sample_size, sample_size, round=3)
metrics.log_scalar("max_span", max_span_sum / sample_size, sample_size, round=3)
# total loss contains the L1 norm on adaptive-span
metrics.log_scalar(
"total_loss",
total_loss_sum / sample_size / math.log(2),
sample_size,
round=3,
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| EXA-1-master | exa/libraries/fairseq/examples/adaptive_span/adaptive_span_loss.py |
../truncated_bptt/truncated_bptt_lm_task.py | EXA-1-master | exa/libraries/fairseq/examples/adaptive_span/truncated_bptt_lm_task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveMask(nn.Module):
"""Soft masking function for adaptive size.
It masks out the last K values of an input. The masking value
goes from 1 to 0 gradually, so K can be learned with
back-propagation.
Args:
max_size: maximum size (i.e. input dimension)
ramp_size: size of the ramp going from 0 to 1
init_val: initial size proportion not to be masked out
shape: learn multiple sizes independent of each other
"""
def __init__(self, max_size, ramp_size, init_val=0, shape=(1,)):
nn.Module.__init__(self)
self._max_size = max_size
self._ramp_size = ramp_size
self.current_val = nn.Parameter(torch.zeros(*shape) + init_val)
mask_template = torch.linspace(1 - max_size, 0, steps=max_size)
self.register_buffer("mask_template", mask_template)
def forward(self, x):
mask = self.mask_template.float() + self.current_val.float() * self._max_size
mask = mask / self._ramp_size + 1
mask = mask.clamp(0, 1)
if x.size(-1) < self._max_size:
# the input could have been trimmed beforehand to save computation
mask = mask.narrow(-1, self._max_size - x.size(-1), x.size(-1))
x = (x * mask).type_as(x)
return x
def get_current_max_size(self, include_ramp=True):
current_size = math.ceil(self.current_val.max().item() * self._max_size)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def get_current_avg_size(self, include_ramp=True):
current_size = math.ceil(
self.current_val.float().mean().item() * self._max_size
)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def clamp_param(self):
"""this need to be called after each update"""
self.current_val.data.clamp_(0, 1)
class AdaptiveSpan(nn.Module):
"""Adaptive attention span for Transformerself.
This module learns an attention span length from data for each
self-attention head.
Args:
attn_span: maximum attention span
adapt_span_loss: loss coefficient for the span length
adapt_span_ramp: length of the masking ramp
adapt_span_init: initial size ratio
adapt_span_cache: adapt cache size to reduce memory usage
"""
def __init__(
self,
attn_span,
adapt_span_ramp,
adapt_span_init,
n_head,
adapt_span_layer,
**kargs
):
nn.Module.__init__(self)
self._max_span = attn_span
self._n_head = n_head
self._adapt_span_layer = adapt_span_layer
if self._adapt_span_layer:
self._mask = AdaptiveMask(
max_size=self._max_span,
ramp_size=adapt_span_ramp,
init_val=adapt_span_init,
)
else:
self._mask = AdaptiveMask(
max_size=self._max_span,
ramp_size=adapt_span_ramp,
init_val=adapt_span_init,
shape=(n_head, 1, 1),
)
def forward(self, attn, normalize=True):
"""mask attention with the right span"""
# batch and head dimensions are merged together, so separate them first
self.clamp_param()
if self._adapt_span_layer:
attn = self._mask(attn)
else:
B = attn.size(0) # batch size
M = attn.size(1) # block size
attn = attn.reshape(B // self._n_head, self._n_head, M, -1)
attn = self._mask(attn)
attn = attn.view(B, M, -1)
return attn
def get_trim_len(self):
"""how much of memory can be trimmed to reduce computation"""
L = self._max_span
trim_len = min(L - 1, L - self._mask.get_current_max_size())
# too fine granularity might be bad for the memory management
trim_len = math.floor(trim_len / 64) * 64
return trim_len
def trim_memory(self, query, key, value, key_pe):
"""trim out unnecessary memory beforehand to reduce computation"""
trim_len = self.get_trim_len()
cache_size = key.size(1) - query.size(1)
trim_len_cache = trim_len - (self._max_span - cache_size)
if trim_len_cache > 0:
key = key[:, trim_len_cache:, :]
value = value[:, trim_len_cache:, :]
elif trim_len_cache < 0:
# cache is too short! this happens when validation resumes
# after a lot of updates.
key = F.pad(key, [0, 0, -trim_len_cache, 0])
value = F.pad(value, [0, 0, -trim_len_cache, 0])
if trim_len > 0:
if key_pe is not None:
key_pe = key_pe[:, :, trim_len:]
return key, value, key_pe
def get_cache_size(self):
"""determine how long the cache should be"""
trim_len = self.get_trim_len()
# give a buffer of 64 steps since a span might increase
# in future updates
return min(self._max_span, self._max_span - trim_len + 64)
def get_loss(self):
"""a loss term for regularizing the span length"""
return self._max_span * self._mask.current_val.float().mean()
def get_current_max_span(self):
return self._mask.get_current_max_size()
def get_current_avg_span(self):
return self._mask.get_current_avg_size()
def clamp_param(self):
self._mask.clamp_param()
| EXA-1-master | exa/libraries/fairseq/examples/adaptive_span/adaptive_span_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.optim import Adagrad
from fairseq.optim import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adagrad_with_grad_clip")
class FairseqAdagradWithGradClip(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = AdagradWithGradClip(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--adagrad-clip', default=0.0, type=float, metavar='D',
help='internal grad clip')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"weight_decay": self.args.weight_decay,
"grad_clip": self.args.adagrad_clip,
}
@property
def supports_flat_params(self):
return False
def _clip_grad(clr, grad, group_grad_clip):
if group_grad_clip > 0:
norm = grad.norm(2).item()
if norm > group_grad_clip:
clr *= group_grad_clip / (norm + 1e-10)
return clr
class AdagradWithGradClip(Adagrad):
"""Adagrad algorithm with custom gradient clipping"""
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
grad_clip=0,
):
Adagrad.__init__(
self,
params,
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value,
)
self.defaults["grad_clip"] = grad_clip
self.param_groups[0].setdefault("grad_clip", grad_clip)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state["step"] += 1
if group["weight_decay"] != 0:
if p.grad.data.is_sparse:
raise RuntimeError(
"weight_decay option is "
"not compatible with sparse "
"gradients"
)
grad = grad.add(group["weight_decay"], p.data)
clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"])
# clip
clr = _clip_grad(clr=clr, grad=grad, group_grad_clip=group["grad_clip"])
if grad.is_sparse:
# the update is non-linear so indices must be unique
grad = grad.coalesce()
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
state["sum"].add_(make_sparse(grad_values.pow(2)))
std = state["sum"]._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state["sum"].addcmul_(1, grad, grad)
std = state["sum"].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
| EXA-1-master | exa/libraries/fairseq/examples/adaptive_span/adagrad_with_grad_clip.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.models.bart import BARTModel
import argparse
XSUM_KWARGS = dict(beam=6, lenpen=1.0, max_len_b=60, min_len=10, no_repeat_ngram_size=3)
CNN_KWARGS = dict(beam=4, lenpen=2.0, max_len_b=140, min_len=55, no_repeat_ngram_size=3)
@torch.no_grad()
def generate(bart, infile, outfile="bart_hypo.txt", bsz=32, n_obs=None, **eval_kwargs):
count = 1
# if n_obs is not None: bsz = min(bsz, n_obs)
with open(infile) as source, open(outfile, "w") as fout:
sline = source.readline().strip()
slines = [sline]
for sline in source:
if n_obs is not None and count > n_obs:
break
if count % bsz == 0:
hypotheses_batch = bart.sample(slines, **eval_kwargs)
for hypothesis in hypotheses_batch:
fout.write(hypothesis + "\n")
fout.flush()
slines = []
slines.append(sline.strip())
count += 1
if slines != []:
hypotheses_batch = bart.sample(slines, **eval_kwargs)
for hypothesis in hypotheses_batch:
fout.write(hypothesis + "\n")
fout.flush()
def main():
"""
Usage::
python examples/bart/summarize.py \
--model-dir $HOME/bart.large.cnn \
--model-file model.pt \
--src $HOME/data-bin/cnn_dm/test.source
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-dir",
required=True,
type=str,
default="bart.large.cnn/",
help="path containing model file and src_dict.txt",
)
parser.add_argument(
"--model-file",
default="checkpoint_best.pt",
help="where in model_dir are weights saved",
)
parser.add_argument(
"--src", default="test.source", help="text to summarize", type=str
)
parser.add_argument(
"--out", default="test.hypo", help="where to save summaries", type=str
)
parser.add_argument("--bsz", default=32, help="where to save summaries", type=int)
parser.add_argument(
"--n", default=None, help="how many examples to summarize", type=int
)
parser.add_argument(
"--xsum-kwargs",
action="store_true",
default=False,
help="if true use XSUM_KWARGS else CNN_KWARGS",
)
args = parser.parse_args()
eval_kwargs = XSUM_KWARGS if args.xsum_kwargs else CNN_KWARGS
if args.model_dir == "pytorch/fairseq":
bart = torch.hub.load("pytorch/fairseq", args.model_file)
else:
bart = BARTModel.from_pretrained(
args.model_dir,
checkpoint_file=args.model_file,
data_name_or_path=args.model_dir,
)
bart = bart.eval()
if torch.cuda.is_available():
bart = bart.cuda().half()
generate(
bart, args.src, bsz=args.bsz, n_obs=args.n, outfile=args.out, **eval_kwargs
)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/bart/summarize.py |
import argparse
from pathlib import Path
import soundfile
def get_insl_frame(parse):
out = []
def is_ont_token(tok):
return tok[0] in ["[", "]"];
res = []
x = []
for tok in parse.split():
if is_ont_token(tok):
res.extend('_'.join(x))
x = []
res.append(tok.upper())
else:
x.append(tok.upper())
return " ".join(res) + ' | '
def sequencify_utterance(utterance):
utterance = utterance.upper()
utterance = utterance.replace(' ', '|') + '|'
utterance = list(utterance)
utterance = ' '.join(utterance)
return utterance
def generate_fairseq_manifests(manifest, output_path, audio_root=None):
with open(manifest, 'r') as i:
parses = []
utterances = []
filepaths = []
keys = None
for (idx, line) in enumerate(i):
if idx == 0: keys = line.strip().split('\t')
else:
data = { k: v for (k, v) in zip(keys, line.split('\t'))}
parses.append(get_insl_frame(data['decoupled_normalized_seqlogical']))
utterances.append(sequencify_utterance(data['normalized_utterance']))
filepaths.append(data['file_id'])
parses_fp = output_path.with_suffix('.parse')
with open(str(parses_fp), 'w') as o:
for p in parses:
o.write(p + '\n')
utterances_fp = output_path.with_suffix('.ltr')
with open(str(utterances_fp), 'w') as o:
for u in utterances:
o.write(u + '\n')
filepaths_fp = output_path.with_suffix('.tsv')
with open(str(filepaths_fp), 'w') as o:
o.write(str(audio_root) + '\n')
for f in filepaths:
fullpath = audio_root / f
assert fullpath.exists(), f'{fullpath}'
frames = soundfile.info(fullpath).frames
o.write(f'{f}\t{frames}\n')
def main(args):
splits = ['train', 'eval', 'test']
root = Path(args.stop_root)
output_root = Path(args.output)
for split in splits:
stop_manifest_path = root / 'manifests' / (split + '.tsv')
output_path = output_root / (split)
generate_fairseq_manifests(stop_manifest_path, output_path, root)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--stop_root', type=str,
help='path to stop root directory')
parser.add_argument('--output', type=str,
help='output directory')
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/audio_nlp/nlu/generate_manifests.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import argparse
from utils.dedup import deup
import sys
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--from-folder", type=str, required=True,
help="the data folder to be dedup")
parser.add_argument("--to-folder", type=str, required=True,
help="the data folder to save deduped data")
parser.add_argument('--directions', type=str, default=None, required=False)
args = parser.parse_args()
if args.directions is None:
raw_files = glob.glob(f'{args.from_folder}/train*')
directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files]
else:
directions = args.directions.split(',')
directions = sorted(set(directions))
for direction in directions:
src, tgt = direction.split('-')
src_file = f'{args.from_folder}/train.{src}-{tgt}.{src}'
tgt_file = f'{args.from_folder}/train.{src}-{tgt}.{tgt}'
src_file_out = f'{args.to_folder}/train.{src}-{tgt}.{src}'
tgt_file_out = f'{args.to_folder}/train.{src}-{tgt}.{tgt}'
assert src_file != src_file_out
assert tgt_file != tgt_file_out
print(f'deduping {src_file}, {tgt_file}')
deup(src_file, tgt_file, src_file_out, tgt_file_out)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/dedup_all.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import pandas as pd
import sys
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
def load_langs(path):
with open(path) as fr:
langs = [l.strip() for l in fr]
return langs
def load_sentences(raw_data, split, direction):
src, tgt = direction.split('-')
src_path = f"{raw_data}/{split}.{direction}.{src}"
tgt_path = f"{raw_data}/{split}.{direction}.{tgt}"
if os.path.exists(src_path) and os.path.exists(tgt_path):
return [(src, open(src_path).read().splitlines()), (tgt, open(tgt_path).read().splitlines())]
else:
return []
def swap_direction(d):
src, tgt = d.split('-')
return f'{tgt}-{src}'
def get_all_test_data(raw_data, directions, split='test'):
test_data = [
x
for dd in directions
for d in [dd, swap_direction(dd)]
for x in load_sentences(raw_data, split, d)
]
# all_test_data = {s for _, d in test_data for s in d}
all_test_data = {}
for lang, d in test_data:
for s in d:
s = s.strip()
lgs = all_test_data.get(s, set())
lgs.add(lang)
all_test_data[s] = lgs
return all_test_data, test_data
def check_train_sentences(src_path, tgt_path, direction, all_test_data, mess_up_train={}):
# src, tgt = direction.split('-')
print(f'check training data for {direction} in {src_path} and {tgt_path}')
size = 0
overlapped_size_counted_dup = 0
if not os.path.exists(tgt_path) or not os.path.exists(src_path):
return mess_up_train, size, overlapped_size_counted_dup
with open(src_path) as f, open(tgt_path) as g:
for src_line, tgt_line in zip(f, g):
s = src_line.strip()
t = tgt_line.strip()
size += 1
if s in all_test_data:
langs = mess_up_train.get(s, set())
langs.add(direction)
mess_up_train[s] = langs
overlapped_size_counted_dup += 1
if t in all_test_data:
langs = mess_up_train.get(t, set())
langs.add(direction)
mess_up_train[t] = langs
overlapped_size_counted_dup += 1
print(f'{direction}: size={size}, overlapped={overlapped_size_counted_dup}')
return mess_up_train, size, overlapped_size_counted_dup
def check_train_all(raw_data, directions, all_test_data):
mess_up_train = {}
data_sizes = {}
# raw_data = '~chau/data-bin/MineBART/multilingual_mined_100M/en_XX/et_EE-en_XX/all.{en_XX, et_EE}'
print(f'checking training data againsts # {len(all_test_data)} sentences')
print(f'example test data: ', [s for i, s in enumerate(all_test_data.keys()) if i < 10])
for direction in directions:
src, tgt = direction.split('-')
path = f'{raw_data}/en_XX/{direction}/all'
src_path = f'{path}.{src}'
tgt_path = f'{path}.{tgt}'
print(f'checking {src_path} {tgt_path}')
_, size, overlapped_size_counted_dup = check_train_sentences(src_path, tgt_path, direction, all_test_data, mess_up_train)
data_sizes[direction] = (size, overlapped_size_counted_dup)
return mess_up_train, data_sizes
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--folder", type=str, required=True,
help="the data folder ")
parser.add_argument("--test-data", type=str, required=True,
help="the test data folder ")
parser.add_argument('--directions', type=str, default=None, required=False)
args = parser.parse_args()
directions = args.directions.split(',')
directions = sorted(set(directions))
results = []
# print(f'checking where {args.split} split data are in training')
# print(f'direction\tcommon_count\tsrc common\ttgt common\tfrom_size\tto_size')
raw_data = args.folder
all_test_data, test_data = get_all_test_data(args.test_data, directions, split='test')
mess_up_train, data_sizes = check_train_all(raw_data, directions, all_test_data)
print(data_sizes)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/check_valid_test_overlaps.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os, sys
import subprocess
import re
from subprocess import check_call, check_output
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
BLEU_REGEX = re.compile("^BLEU\\S* = (\\S+) ")
def run_eval_bleu(cmd):
output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip()
print(output)
bleu = -1.0
for line in output.strip().split('\n'):
m = BLEU_REGEX.search(line)
if m is not None:
bleu = m.groups()[0]
bleu = float(bleu)
break
return bleu
def check_data_test_bleu(raw_folder, data_lang_pairs):
not_matchings = []
for sacrebleu_set, src_tgts in data_lang_pairs:
for src_tgt in src_tgts:
print(f'checking test bleus for: {src_tgt} at {sacrebleu_set}')
src, tgt = src_tgt.split('-')
ssrc, stgt = src[:2], tgt[:2]
if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'):
# reversed direction may have different test set
test_src = f'{raw_folder}/test.{tgt}-{src}.{src}'
else:
test_src = f'{raw_folder}/test.{src}-{tgt}.{src}'
cmd1 = f'cat {test_src} | sacrebleu -t "{sacrebleu_set}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""'
test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}'
cmd2 = f'cat {test_tgt} | sacrebleu -t "{sacrebleu_set}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""'
bleu1 = run_eval_bleu(cmd1)
if bleu1 != 100.0:
not_matchings.append(f'{sacrebleu_set}:{src_tgt} source side not matching: {test_src}')
bleu2 = run_eval_bleu(cmd2)
if bleu2 != 100.0:
not_matchings.append(f'{sacrebleu_set}:{src_tgt} target side not matching: {test_tgt}')
return not_matchings
if __name__ == "__main__":
to_data_path = f'{WORKDIR_ROOT}/iwsltv2'
not_matching = check_data_test_bleu(
f'{to_data_path}/raw',
[
('iwslt17', ['en_XX-ar_AR', 'en_XX-ko_KR', 'ar_AR-en_XX', 'ko_KR-en_XX']),
('iwslt17', ['en_XX-it_IT', 'en_XX-nl_XX', 'it_IT-en_XX', 'nl_XX-en_XX']),
('iwslt17/tst2015', ['en_XX-vi_VN', "vi_VN-en_XX"]),
]
)
if len(not_matching) > 0:
print('the following datasets do not have matching test datasets:\n\t', '\n\t'.join(not_matching))
| EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import os
import csv
from collections import defaultdict
from six.moves import zip
import io
import wget
import sys
from subprocess import check_call, check_output
# scripts and data locations
CWD = os.getcwd()
UTILS = f"{CWD}/utils"
MOSES = f"{UTILS}/mosesdecoder"
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
# please donwload mosesdecoder here:
detok_cmd = f'{MOSES}/scripts/tokenizer/detokenizer.perl'
def call(cmd):
print(f"Executing: {cmd}")
check_call(cmd, shell=True)
class MultiLingualAlignedCorpusReader(object):
"""A class to read TED talk dataset
"""
def __init__(self, corpus_path, delimiter='\t',
target_token=True, bilingual=True, corpus_type='file',
lang_dict={'source': ['fr'], 'target': ['en']},
eval_lang_dict=None, zero_shot=False,
detok=True,
):
self.empty_line_flag = 'NULL'
self.corpus_path = corpus_path
self.delimiter = delimiter
self.bilingual = bilingual
self.lang_dict = lang_dict
self.lang_set = set()
self.target_token = target_token
self.zero_shot = zero_shot
self.eval_lang_dict = eval_lang_dict
self.corpus_type = corpus_type
self.detok = detok
for list_ in self.lang_dict.values():
for lang in list_:
self.lang_set.add(lang)
self.data = dict()
self.data['train'] = self.read_aligned_corpus(split_type='train')
self.data['test'] = self.read_aligned_corpus(split_type='test')
self.data['dev'] = self.read_aligned_corpus(split_type='dev')
def read_data(self, file_loc_):
data_list = list()
with io.open(file_loc_, 'r', encoding='utf8') as fp:
for line in fp:
try:
text = line.strip()
except IndexError:
text = self.empty_line_flag
data_list.append(text)
return data_list
def filter_text(self, dict_):
if self.target_token:
field_index = 1
else:
field_index = 0
data_dict = defaultdict(list)
list1 = dict_['source']
list2 = dict_['target']
for sent1, sent2 in zip(list1, list2):
try:
src_sent = ' '.join(sent1.split()[field_index: ])
except IndexError:
src_sent = 'NULL'
if src_sent.find(self.empty_line_flag) != -1 or len(src_sent) == 0:
continue
elif sent2.find(self.empty_line_flag) != -1 or len(sent2) == 0:
continue
else:
data_dict['source'].append(sent1)
data_dict['target'].append(sent2)
return data_dict
def read_file(self, split_type, data_type):
return self.data[split_type][data_type]
def save_file(self, path_, split_type, data_type, lang):
tok_file = tok_file_name(path_, lang)
with io.open(tok_file, 'w', encoding='utf8') as fp:
for line in self.data[split_type][data_type]:
fp.write(line + '\n')
if self.detok:
de_tok(tok_file, lang)
def add_target_token(self, list_, lang_id):
new_list = list()
token = '__' + lang_id + '__'
for sent in list_:
new_list.append(token + ' ' + sent)
return new_list
def read_from_single_file(self, path_, s_lang, t_lang):
data_dict = defaultdict(list)
with io.open(path_, 'r', encoding='utf8') as fp:
reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
data_dict['source'].append(row[s_lang])
data_dict['target'].append(row[t_lang])
if self.target_token:
text = self.add_target_token(data_dict['source'], t_lang)
data_dict['source'] = text
return data_dict['source'], data_dict['target']
def read_aligned_corpus(self, split_type='train'):
data_dict = defaultdict(list)
iterable = []
s_list = []
t_list = []
if self.zero_shot:
if split_type == "train":
iterable = zip(self.lang_dict['source'], self.lang_dict['target'])
else:
iterable = zip(self.eval_lang_dict['source'], self.eval_lang_dict['target'])
elif self.bilingual:
iterable = itertools.product(self.lang_dict['source'], self.lang_dict['target'])
for s_lang, t_lang in iterable:
if s_lang == t_lang:
continue
if self.corpus_type == 'file':
split_type_file_path = os.path.join(self.corpus_path,
"all_talks_{}.tsv".format(split_type))
s_list, t_list = self.read_from_single_file(split_type_file_path,
s_lang=s_lang,
t_lang=t_lang)
data_dict['source'] += s_list
data_dict['target'] += t_list
new_data_dict = self.filter_text(data_dict)
return new_data_dict
def read_langs(corpus_path):
split_type_file_path = os.path.join(corpus_path, 'extracted',
"all_talks_dev.tsv")
with io.open(split_type_file_path, 'r', encoding='utf8') as fp:
reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
header = next(reader)
return [k for k in header.keys() if k != 'talk_name']
def extra_english(corpus_path, split):
split_type_file_path = os.path.join(corpus_path,
f"all_talks_{split}.tsv")
output_split_type_file_path = os.path.join(corpus_path,
f"all_talks_{split}.en")
with io.open(split_type_file_path, 'r', encoding='utf8') as fp, io.open(output_split_type_file_path, 'w', encoding='utf8') as fw:
reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
line = row['en']
fw.write(line + '\n')
de_tok(output_split_type_file_path, 'en')
def tok_file_name(filename, lang):
seps = filename.split('.')
seps.insert(-1, 'tok')
tok_file = '.'.join(seps)
return tok_file
def de_tok(tok_file, lang):
# seps = tok_file.split('.')
# seps.insert(-1, 'detok')
# de_tok_file = '.'.join(seps)
de_tok_file = tok_file.replace('.tok.', '.')
cmd = 'perl {detok_cmd} -l {lang} < {tok_file} > {de_tok_file}'.format(
detok_cmd=detok_cmd, tok_file=tok_file,
de_tok_file=de_tok_file, lang=lang[:2])
call(cmd)
def extra_bitex(
ted_data_path,
lsrc_lang,
ltrg_lang,
target_token,
output_data_path,
):
def get_ted_lang(lang):
long_langs = ['pt-br', 'zh-cn', 'zh-tw', 'fr-ca']
if lang[:5] in long_langs:
return lang[:5]
elif lang[:4] =='calv':
return lang[:5]
elif lang in ['pt_BR', 'zh_CN', 'zh_TW', 'fr_CA']:
return lang.lower().replace('_', '-')
return lang[:2]
src_lang = get_ted_lang(lsrc_lang)
trg_lang = get_ted_lang(ltrg_lang)
train_lang_dict={'source': [src_lang], 'target': [trg_lang]}
eval_lang_dict = {'source': [src_lang], 'target': [trg_lang]}
obj = MultiLingualAlignedCorpusReader(corpus_path=ted_data_path,
lang_dict=train_lang_dict,
target_token=target_token,
corpus_type='file',
eval_lang_dict=eval_lang_dict,
zero_shot=False,
bilingual=True)
os.makedirs(output_data_path, exist_ok=True)
lsrc_lang = lsrc_lang.replace('-', '_')
ltrg_lang = ltrg_lang.replace('-', '_')
obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}",
split_type='train', data_type='source', lang=src_lang)
obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}",
split_type='train', data_type='target', lang=trg_lang)
obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}",
split_type='test', data_type='source', lang=src_lang)
obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}",
split_type='test', data_type='target', lang=trg_lang)
obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}",
split_type='dev', data_type='source', lang=src_lang)
obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}",
split_type='dev', data_type='target', lang=trg_lang)
def bar_custom(current, total, width=80):
print("Downloading: %d%% [%d / %d] Ks" % (current / total * 100, current / 1000, total / 1000), end='\r')
def download_and_extract(download_to, extract_to):
url = 'http://phontron.com/data/ted_talks.tar.gz'
filename = f"{download_to}/ted_talks.tar.gz"
if os.path.exists(filename):
print(f'{filename} has already been downloaded so skip')
else:
filename = wget.download(url, filename, bar=bar_custom)
if os.path.exists(f'{extract_to}/all_talks_train.tsv'):
print(f'Already extracted so skip')
else:
extract_cmd = f'tar xzfv "{filename}" -C "{extract_to}"'
call(extract_cmd)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ted_data_path', type=str, default=WORKDIR_ROOT, required=False)
parser.add_argument(
'--direction-list',
type=str,
# default=None,
#for ML50
default=(
"bn_IN-en_XX,he_IL-en_XX,fa_IR-en_XX,id_ID-en_XX,sv_SE-en_XX,pt_XX-en_XX,ka_GE-en_XX,ka_GE-en_XX,th_TH-en_XX,"
"mr_IN-en_XX,hr_HR-en_XX,uk_UA-en_XX,az_AZ-en_XX,mk_MK-en_XX,gl_ES-en_XX,sl_SI-en_XX,mn_MN-en_XX,"
#non-english directions
# "fr_XX-de_DE," # replaced with wmt20
# "ja_XX-ko_KR,es_XX-pt_XX,ru_RU-sv_SE,hi_IN-bn_IN,id_ID-ar_AR,cs_CZ-pl_PL,ar_AR-tr_TR"
),
required=False)
parser.add_argument('--target-token', action='store_true', default=False)
parser.add_argument('--extract-all-english', action='store_true', default=False)
args = parser.parse_args()
import sys
import json
# TED Talks data directory
ted_data_path = args.ted_data_path
download_to = f'{ted_data_path}/downloads'
extract_to = f'{ted_data_path}/extracted'
#DESTDIR=${WORKDIR_ROOT}/ML50/raw/
output_path = f'{ted_data_path}/ML50/raw'
os.makedirs(download_to, exist_ok=True)
os.makedirs(extract_to, exist_ok=True)
os.makedirs(output_path, exist_ok=True)
download_and_extract(download_to, extract_to)
if args.extract_all_english:
for split in ['train', 'dev', 'test']:
extra_english(ted_data_path, split)
exit(0)
if args.direction_list is not None:
directions = args.direction_list.strip().split(',')
directions = [tuple(d.strip().split('-', 1)) for d in directions if d]
else:
langs = read_langs(ted_data_path)
# directions = [
# '{}.{}'.format(src, tgt)
# for src in langs
# for tgt in langs
# if src < tgt
# ]
directions = [('en', tgt) for tgt in langs if tgt != 'en']
print(f'num directions={len(directions)}: {directions}')
for src_lang, trg_lang in directions:
print('--working on {}-{}'.format(src_lang, trg_lang))
extra_bitex(
extract_to,
src_lang,
trg_lang,
target_token=args.target_token,
output_data_path=output_path
)
| EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py |
from typing import NamedTuple, List
from urllib.parse import urlparse
import os, sys
import subprocess
from subprocess import check_call, check_output
import glob
import wget
import re
import multiprocessing as mp
from functools import partial
import pathlib
from collections import OrderedDict
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
# scripts and data locations
CWD = os.getcwd()
UTILS = f"{CWD}/utils"
MOSES = f"{UTILS}/mosesdecoder"
SGM_TOOL = f'{MOSES}/scripts/ems/support/input-from-sgm.perl'
TMX2CORPUS = f"{UTILS}/tmx2corpus"
TMX_TOOL = f'python {TMX2CORPUS}/tmx2corpus.py'
to_data_path = f'{WORKDIR_ROOT}/wmt'
download_to = f'{to_data_path}/downloads'
manually_downloads = f'{to_data_path}/downloads'
extract_to = f'{to_data_path}/extracted'
#DESTDIR=${WORKDIR_ROOT}/ML50/raw/
raw_data = f'{WORKDIR_ROOT}/ML50/raw'
####
class DLDataset(NamedTuple):
name: str
train_urls: List[str]
valid_urls: List[str]
test_urls: List[str]
train_files_patterns: List[str] = []
valid_files_patterns: List[str] = []
test_files_patterns: List[str] = []
def bar_custom(current, total, width=80):
print("Downloading: %d%% [%d / %d] Ks" % (current / total * 100, current / 1000, total / 1000), end='\r')
def get_downloaded_file(dl_folder, url):
if isinstance(url, tuple):
url, f = url
else:
url_f = urlparse(url)
# f = os.path.split(url_f.path)[-1]
f = '_'.join(url_f.path.split('/')[1:])
return url, f"{dl_folder}/{f}"
def download_parts_and_combine(dl_folder, urls, filename):
parts = []
for url_record in urls:
url, part_file = get_downloaded_file(dl_folder, url_record)
if os.path.exists(part_file):
print(f'{part_file} has already been downloaded so skip')
else:
part_file = wget.download(url, part_file, bar=bar_custom)
parts.append(part_file)
def get_combine_cmd(parts):
#default as tar.gz.??
return f'cat {" ".join(parts)} > {filename}'
combine_cmd = get_combine_cmd(parts)
call(combine_cmd, debug=True)
return filename
def download_a_url(dl_folder, url):
url, filename = get_downloaded_file(dl_folder, url)
if os.path.exists(filename):
print(f'{filename} has already been downloaded so skip')
return filename
print(f'downloading {url} to {filename}')
if isinstance(url, list) or isinstance(url, tuple):
download_parts_and_combine(dl_folder, url, filename)
else:
wget.download(url, filename, bar=bar_custom)
print(f'dowloaded: {filename}')
return filename
def download_files(dl_folder, urls, completed_urls={}):
for url_record in urls:
url, _ = get_downloaded_file(dl_folder, url_record)
filename = download_a_url(dl_folder, url_record)
completed_urls[str(url)] = filename
return completed_urls
def check_need_manual_downalod(dl_folder, to_manually_download_urls):
to_be_manually_dowloaded = []
manually_completed_urls = {}
for url_record, instruction in to_manually_download_urls:
url, filename = get_downloaded_file(dl_folder, url_record)
if not os.path.exists(filename):
print(f'{url} need to be download manually, please download it manually following {instruction}; and copy it to {filename}')
to_be_manually_dowloaded.append((url, filename))
else:
manually_completed_urls[url] = filename
# if len(to_be_manually_dowloaded) > 0:
# raise ValueError('Missing files that need to be downloaded manually; stop the process now.')
return to_be_manually_dowloaded
def download_dataset(to_folder, dl_dataset, completed_urls={}):
download_files(to_folder, dl_dataset.train_urls, completed_urls)
download_files(to_folder, dl_dataset.valid_urls, completed_urls)
download_files(to_folder, dl_dataset.test_urls, completed_urls)
print('completed downloading')
return completed_urls
def call(cmd, debug=False):
if debug:
print(cmd)
check_call(cmd, shell=True)
def get_extract_name(file_path):
path = os.path.split(file_path)
return path[-1] + '_extract' #.split('.')[0]
def extract_file(downloaded_file, extract_folder, get_extract_name=get_extract_name, debug=False):
extract_name = get_extract_name(downloaded_file)
extract_to = f'{extract_folder}/{extract_name}'
os.makedirs(extract_to, exist_ok=True)
if os.path.exists(f'{extract_to}/DONE'):
print(f'{downloaded_file} has already been extracted to {extract_to} so skip')
return extract_to
def get_extract_cmd(filename):
if filename.endswith('.tgz') or filename.endswith('tar.gz'):
return f'tar xzfv {filename} -C {extract_to}'
elif filename.endswith('.gz.tar'):
return f'tar xfv {filename} -C {extract_to}; (cd {extract_to}; gzip -d *.gz; [ $? -eq 0 ] || gzip -d */*.gz)'
elif filename.endswith('.tar'):
return f'tar xfv {filename} -C {extract_to}'
elif filename.endswith('.gz'):
return f'cp {filename} {extract_to}; (cd {extract_to}; gzip -d *.gz)'
elif filename.endswith('.zip'):
return f'unzip {filename} -d {extract_to}'
extract_cmd = get_extract_cmd(downloaded_file)
print(f'extracting {downloaded_file}')
if isinstance(extract_cmd, list):
for c in extract_cmd:
call(c, debug=debug)
else:
call(extract_cmd, debug=debug)
call(f'echo DONE > {extract_to}/DONE')
return extract_to
def extract_all_files(
completed_urls, extract_folder,
get_extract_name=get_extract_name,
completed_extraction={},
debug=False):
extracted_folders = OrderedDict()
for url, downloaded_file in set(completed_urls.items()):
if downloaded_file in completed_extraction:
print(f'{downloaded_file} is already extracted; so skip')
continue
folder = extract_file(downloaded_file, extract_folder, get_extract_name, debug)
extracted_folders[url] = folder
return extracted_folders
def my_glob(folder):
for p in [f'{folder}/*', f'{folder}/*/*', f'{folder}/*/*/*']:
for f in glob.glob(p):
yield f
def sgm2raw(sgm, debug):
to_file = sgm[0:len(sgm) - len('.sgm')]
if os.path.exists(to_file):
debug and print(f'{sgm} already converted to {to_file}; so skip')
return to_file
cmd = f'{SGM_TOOL} < {sgm} > {to_file}'
call(cmd, debug)
return to_file
def tmx2raw(tmx, debug):
to_file = tmx[0:len(tmx) - len('.tmx')]
to_folder = os.path.join(*os.path.split(tmx)[:-1])
if os.path.exists(f'{to_folder}/bitext.en'):
debug and print(f'{tmx} already extracted to {to_file}; so skip')
return to_file
cmd = f'(cd {to_folder}; {TMX_TOOL} {tmx})'
call(cmd, debug)
return to_file
CZENG16_REGEX = re.compile(r'.*?data.plaintext-format/0[0-9]train$')
WMT19_WIKITITLES_REGEX = re.compile(r'.*?wikititles-v1.(\w\w)-en.tsv.gz')
TSV_REGEX = re.compile(r'.*?(\w\w)-(\w\w).tsv$')
def cut_wikitles(wiki_file, debug):
# different languages have different file names:
if wiki_file.endswith('wiki/fi-en/titles.fi-en'):
to_file1 = f'{wiki_file}.fi'
to_file2 = f'{wiki_file}.en'
BACKSLASH = '\\'
cmd1 = f"cat {wiki_file} | sed 's/|||/{BACKSLASH}t/g' |cut -f1 |awk '{{$1=$1}};1' > {to_file1}"
cmd2 = f"cat {wiki_file} | sed 's/|||/{BACKSLASH}t/g' |cut -f2 |awk '{{$1=$1}};1' > {to_file2}"
# elif WMT19_WIKITITLES_REGEX.match(wiki_file):
# src = WMT19_WIKITITLES_REGEX.match(wiki_file).groups()[0]
# to_file1 = f'{wiki_file}.{src}'
# to_file2 = f'{wiki_file}.en'
# cmd1 = f"cat {wiki_file} | cut -f1 |awk '{{$1=$1}};1' > {to_file1}"
# cmd2 = f"cat {wiki_file} | cut -f2 |awk '{{$1=$1}};1' > {to_file2}"
else:
return None
if os.path.exists(to_file1) and os.path.exists(to_file2):
debug and print(f'{wiki_file} already processed to {to_file1} and {to_file2}; so skip')
return wiki_file
call(cmd1, debug=debug)
call(cmd2, debug=debug)
return wiki_file
def cut_tsv(file, debug):
m = TSV_REGEX.match(file)
if m is None:
raise ValueError(f'{file} is not matching tsv pattern')
src = m.groups()[0]
tgt = m.groups()[1]
to_file1 = f'{file}.{src}'
to_file2 = f'{file}.{tgt}'
cmd1 = f"cat {file} | cut -f1 |awk '{{$1=$1}};1' > {to_file1}"
cmd2 = f"cat {file} | cut -f2 |awk '{{$1=$1}};1' > {to_file2}"
if os.path.exists(to_file1) and os.path.exists(to_file2):
debug and print(f'{file} already processed to {to_file1} and {to_file2}; so skip')
return file
call(cmd1, debug=debug)
call(cmd2, debug=debug)
return file
def convert_file_if_needed(file, debug):
if file.endswith('.sgm'):
return sgm2raw(file, debug)
elif file.endswith('.tmx'):
return tmx2raw(file, debug)
elif file.endswith('wiki/fi-en/titles.fi-en'):
return cut_wikitles(file, debug)
# elif WMT19_WIKITITLES_REGEX.match(file):
# return cut_wikitles(file, debug)
elif file.endswith('.tsv'):
return cut_tsv(file, debug)
elif CZENG16_REGEX.match(file):
return convert2czeng17(file, debug)
else:
return file
def convert_files_if_needed(extracted_foldrs, my_glob=my_glob, debug=False):
return {
url: list(sorted(set(convert_file_if_needed(f, debug)) for f in sorted(set(my_glob(folder)))))
for url, folder in extracted_foldrs.items()
}
def match_patt(file_path, file_pattern, src, tgt, lang):
return file_pattern.format(src=src, tgt=tgt, lang=lang) in file_path
def match_patts(file_path, file_patterns, src, tgt, lang):
for file_pattern in file_patterns:
params = { k: v for k, v in [('src', src), ('tgt', tgt), ('lang', lang)] if k in file_pattern}
matching = file_pattern.format(**params)
if isinstance(file_pattern, tuple):
pattern, directions = file_pattern
if f'{src}-{tgt}' in directions and matching in file_path:
return True
else:
if matching in file_path:
return True
return False
def extracted_glob(extracted_folder, file_patterns, src, tgt, lang):
def get_matching_pattern(file_pattern):
params = {
k: v
for k, v in [('src', src), ('tgt', tgt), ('lang', lang)]
if '{' + k + '}' in file_pattern
}
file_pattern = re.sub(r'{src:(.*?)}', r'\1' if lang == src else '', file_pattern)
file_pattern = re.sub(r'{tgt:(.*?)}', r'\1' if lang == tgt else '', file_pattern)
file_pattern = file_pattern.format(**params)
return file_pattern
for file_pattern in file_patterns:
if isinstance(file_pattern, tuple):
file_pattern, lang_pairs = file_pattern
if f'{src}-{tgt}' not in lang_pairs:
continue
# print('working on pattern: ', file_pattern, lang_pairs )
matching_pattern = get_matching_pattern(file_pattern)
if matching_pattern is None:
continue
glob_patterns = f'{extracted_folder}/{matching_pattern}'
# print('glob_patterns: ', glob_patterns)
for f in glob.glob(glob_patterns):
yield f
# for debug usage
def all_extracted_files(split, src, tgt, extracted_folders, split_urls):
def get_url(url):
if isinstance(url, tuple):
url, downloaded_file = url
return url
return [
f
for url in split_urls
for f in my_glob(extracted_folders[str(get_url(url))])
]
def concat_files(split, src, tgt, extracted_folders, split_urls, path_patterns, to_folder, debug=False):
# if debug:
# print('extracted files to be filtered by patterns: ',
# '\n\t'.join(sorted(all_extracted_files(split, src, tgt, extracted_folders, split_urls))))
for lang in [src, tgt]:
to_file = f'{to_folder}/{split}.{src}-{tgt}.{lang}'
s_src, s_tgt, s_lang = src.split('_')[0], tgt.split('_')[0], lang.split('_')[0]
files = []
for url in split_urls:
if isinstance(url, tuple):
url, downloaded_file = url
if str(url) not in extracted_folders:
print(f'warning: {url} not in extracted files')
for extracted_file in set(
extracted_glob(
extracted_folders[str(url)], path_patterns,
s_src, s_tgt, s_lang)):
files.append(extracted_file)
if len(files) == 0:
print('warning: ', f'No files found for split {to_file}')
continue
files = sorted(set(files))
print(f'concating {len(files)} files into {to_file}')
cmd = ['cat'] + [f'"{f}"' for f in files] + [f'>{to_file}']
cmd = " ".join(cmd)
call(cmd, debug=debug)
UTILS = os.path.join(pathlib.Path(__file__).parent, 'utils')
LID_MODEL = f'{download_to}/lid.176.bin'
LID_MULTI = f'{UTILS}/fasttext_multi_filter.py'
def lid_filter(split, src, tgt, from_folder, to_folder, debug=False):
if not os.path.exists(LID_MODEL):
call(f'wget -nc https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin -O {LID_MODEL}')
from_prefix = f'{from_folder}/{split}.{src}-{tgt}'
to_prefix = f'{to_folder}/{split}.{src}-{tgt}'
if os.path.exists(f'{from_prefix}.{src}') and os.path.exists(f'{from_prefix}.{tgt}'):
s_src, s_tgt = src.split('_')[0], tgt.split('_')[0]
cmd = (
f'python {LID_MULTI} --model {LID_MODEL} --inputs {from_prefix}.{src} {from_prefix}.{tgt} '
f'--langs {s_src} {s_tgt} --outputs {to_prefix}.{src} {to_prefix}.{tgt}'
)
print(f'filtering {from_prefix}')
call(cmd, debug=debug)
def concat_into_splits(dl_dataset, src, tgt, extracted_folders, to_folder, debug):
to_folder_tmp = f"{to_folder}_tmp"
os.makedirs(to_folder_tmp, exist_ok=True)
concat_files('train', src, tgt,
extracted_folders,
split_urls=dl_dataset.train_urls,
path_patterns=dl_dataset.train_files_patterns,
to_folder=to_folder_tmp, debug=debug)
lid_filter('train', src, tgt, to_folder_tmp, to_folder, debug)
concat_files('valid', src, tgt,
extracted_folders,
split_urls=dl_dataset.valid_urls,
path_patterns=dl_dataset.valid_files_patterns,
to_folder=to_folder, debug=debug)
concat_files('test', src, tgt,
extracted_folders,
split_urls=dl_dataset.test_urls,
path_patterns=dl_dataset.test_files_patterns,
to_folder=to_folder, debug=debug)
def download_multi(dl_folder, extract_folder, urls, num_processes=8, debug=False):
pool = mp.Pool(processes=num_processes)
download_f = partial(download_a_url, dl_folder)
downloaded_files = pool.imap_unordered(download_f, urls)
pool.close()
pool.join()
BLEU_REGEX = re.compile("^BLEU\\S* = (\\S+) ")
def run_eval_bleu(cmd):
output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip()
print(output)
bleu = -1.0
for line in output.strip().split('\n'):
m = BLEU_REGEX.search(line)
if m is not None:
bleu = m.groups()[0]
bleu = float(bleu)
break
return bleu
def check_wmt_test_bleu(raw_folder, wmt_lang_pairs):
not_matchings = []
for wmt, src_tgts in wmt_lang_pairs:
for src_tgt in src_tgts:
print(f'checking test bleus for: {src_tgt} at {wmt}')
src, tgt = src_tgt.split('-')
ssrc, stgt = src[:2], tgt[:2]
if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'):
# reversed direction may have different test set
test_src = f'{raw_folder}/test.{tgt}-{src}.{src}'
else:
test_src = f'{raw_folder}/test.{src}-{tgt}.{src}'
cmd1 = f'cat {test_src} | sacrebleu -t "{wmt}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""'
test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}'
cmd2 = f'cat {test_tgt} | sacrebleu -t "{wmt}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""'
bleu1 = run_eval_bleu(cmd1)
if bleu1 != 100.0:
not_matchings.append(f'{wmt}:{src_tgt} source side not matching: {test_src}')
bleu2 = run_eval_bleu(cmd2)
if bleu2 != 100.0:
not_matchings.append(f'{wmt}:{src_tgt} target side not matching: {test_tgt}')
return not_matchings
def download_and_extract(
to_folder, lang_pairs, dl_dataset,
to_manually_download_urls,
completed_urls={}, completed_extraction={},
debug=False):
dl_folder = f'{to_folder}/downloads'
extract_folder = f'{to_folder}/extracted'
raw_folder = f'{to_folder}/raw'
lid_filtered = f'{to_folder}/lid_filtered'
os.makedirs(extract_folder, exist_ok=True)
os.makedirs(raw_folder, exist_ok=True)
os.makedirs(lid_filtered, exist_ok=True)
to_be_manually_dowloaded = check_need_manual_downalod(dl_folder, to_manually_download_urls)
completed_urls = download_dataset(
dl_folder, dl_dataset, completed_urls)
if debug:
print('completed urls: ', completed_urls)
extracted_folders = extract_all_files(
completed_urls,
extract_folder=extract_folder,
completed_extraction=completed_extraction,
debug=debug)
if debug:
print('download files have been extracted to folders: ', extracted_folders)
converted_files = convert_files_if_needed(extracted_folders, debug=False)
for src_tgt in lang_pairs:
print(f'working on {dl_dataset.name}: {src_tgt}')
src, tgt = src_tgt.split('-')
concat_into_splits(dl_dataset,
src=src, tgt=tgt,
extracted_folders=extracted_folders,
to_folder=raw_folder, debug=debug)
print('completed data into: ', raw_folder)
def download_czang16(download_to, username=None):
wgets = [
f'wget --user={username} --password=czeng -P {download_to} http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar'
for i in range(10)]
cmds = []
for i, cmd in enumerate(wgets):
filename = f'{download_to}/data-plaintext-format.{i}.tar'
if os.path.exists(filename):
print(f'{filename} has already been downloaded; so skip')
continue
cmds.append(cmd)
if cmds and username is None:
raise ValueError('No czeng username is given; please register at http://ufal.mff.cuni.cz/czeng/czeng16 to obtain username to download')
for cmd in cmds:
call(cmd)
print('done with downloading czeng1.6')
def download_czeng17_script(download_to, extract_folder, debug=False):
url = 'http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip'
filename = f'{download_to}/convert_czeng16_to_17.pl.zip'
extract_to = f'{extract_folder}/{get_extract_name(filename)}'
script_path = f'{extract_to}/convert_czeng16_to_17.pl'
if not os.path.exists(script_path):
wget.download(url, filename, bar=bar_custom)
extract_to = extract_file(f'{download_to}/convert_czeng16_to_17.pl.zip', extract_folder, get_extract_name=get_extract_name, debug=debug)
return script_path
czeng17_script_path = ""
def convert2czeng17(file, debug):
en_file = f'{file}.en'
cs_file = f'{file}.cs'
if not os.path.exists(en_file) or not os.path.exists(cs_file):
cs_cmd = f'cat {file} | perl {czeng17_script_path} | cut -f3 > {cs_file}'
en_cmd = f'cat {file} | perl {czeng17_script_path} | cut -f4 > {en_file}'
call(cs_cmd, debug)
call(en_cmd, debug)
else:
print(f'already extracted: {en_file} and {cs_file}')
return file
def extract_czeng17(extract_folder, debug=False):
url = 'http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip'
filename = f'{download_to}/convert_czeng16_to_17.pl.zip'
extract_to = f'{extract_folder}/{get_extract_name(filename)}'
script_path = f'{extract_to}/convert_czeng16_to_17.pl'
if not os.path.exists(script_path):
wget.download(url, filename, bar=bar_custom)
extract_to = extract_file(f'{download_to}/convert_czeng16_to_17.pl.zip', extract_folder, get_extract_name=get_extract_name, debug=debug)
return script_path
#########
# definitions of wmt data sources
# for es-en
# Punctuation in the official test sets will be encoded with ASCII characters (not complex Unicode characters) as much as possible. You may want to normalize your system's output before submission. You are able able to use a rawer version of the test sets that does not have this normalization.
# script to normalize punctuation: http://www.statmt.org/wmt11/normalize-punctuation.perl
wmt13_es_en = DLDataset(
name='wmt13_es-en',
train_urls=[
'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz',
'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz',
'http://www.statmt.org/wmt13/training-parallel-un.tgz',
'http://www.statmt.org/wmt13/training-parallel-nc-v8.tgz',
],
valid_urls=[
('http://www.statmt.org/wmt13/dev.tgz', 'wmt13_dev.tgz')
],
test_urls=[
('http://www.statmt.org/wmt13/test.tgz', 'wmt13_test.tgz')
],
train_files_patterns=[
('*/europarl-v7.{src}-{tgt}.{lang}', ['es-en']),
('*commoncrawl.{src}-{tgt}.{lang}', ['es-en']),
('*/news-commentary-v8.{src}-{tgt}.{lang}', ['es-en']),
('un/*undoc.2000.{src}-{tgt}.{lang}', ['es-en']),
] ,
valid_files_patterns=[
('dev/newstest2012.{lang}', ['es-en'])
],
test_files_patterns=[
('test/newstest*.{lang}', ['es-en'])
],
)
wmt14_de_fr_en = DLDataset(
name='wmt14_de_fr_en',
train_urls=[
'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz',
'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz',
'http://www.statmt.org/wmt13/training-parallel-un.tgz',
'http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz',
('http://www.statmt.org/wmt10/training-giga-fren.tar', 'training-giga-fren.gz.tar'), #it is actuall a gz.tar
],
valid_urls=[
('http://www.statmt.org/wmt14/dev.tgz', 'wmt14_dev.tgz'),
],
test_urls=[
('http://www.statmt.org/wmt14/test-full.tgz', 'wmt14_test_full.tgz'), # cleaned test sets
],
train_files_patterns=[
('*/europarl-v7.{src}-{tgt}.{lang}', ['fr-en', 'de-en']),
('*commoncrawl.{src}-{tgt}.{lang}', ['fr-en', 'de-en']),
('*/*news-commentary-v9.{src}-{tgt}.{lang}', ['fr-en', 'de-en']),
('un/undoc.2000.{src}-{tgt}.{lang}', ['fr-en']),
('*giga-{src}{tgt}*{lang}', ['fr-en'])
],
valid_files_patterns=[
('dev/newstest2013.{lang}', ['fr-en', 'de-en'])
],
test_files_patterns=[
('test-full/newstest*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['en-de', 'de-en', 'fr-en', 'en-fr']),
],
)
# pip install git+https://github.com/amake/tmx2corpus.git
wmt16_ro_en = DLDataset(
name='wmt16_ro-en',
train_urls=[
('http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz', 'wmt16_training-parallel-ep-v8.tgz'),
('http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-ro.tmx.gz', 'en-ro.tmx.gz'),
],
valid_urls=[
('http://data.statmt.org/wmt16/translation-task/dev-romanian-updated.tgz', 'wmt16_dev.tgz')
],
test_urls=[
('http://data.statmt.org/wmt16/translation-task/test.tgz', 'wmt16_test.tgz')
],
train_files_patterns=[
('*/*europarl-v8.{src}-{tgt}.{lang}', ['ro-en']),
('bitext.{lang}', ['ro-en']) #setimes from tmux
] ,
valid_files_patterns=[
('dev/newsdev2016*{src}{tgt}*.{lang}', ['ro-en', 'ro-en'])
],
test_files_patterns=[
('test/newstest*{src}{tgt}*.{lang}', ['ro-en', 'en-ro'])
],
)
cwmt_wmt_instruction = 'cwmt download instruction at: http://nlp.nju.edu.cn/cwmt-wmt'
wmt17_fi_lv_tr_zh_en_manual_downloads = [
# fake urls to have unique keys for the data
( ('http://nlp.nju.edu.cn/cwmt-wmt/CASIA2015.zip', 'CASIA2015.zip'), cwmt_wmt_instruction),
( ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2011.zip', 'CASICT2011.zip'), cwmt_wmt_instruction),
( ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2015.zip', 'CASICT2015.zip'), cwmt_wmt_instruction),
( ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2015.zip', 'Datum2015.zip'), cwmt_wmt_instruction),
( ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2017.zip', 'Datum2017.zip'), cwmt_wmt_instruction),
( ('http://nlp.nju.edu.cn/cwmt-wmt/NEU2017.zip', 'NEU2017.zip'), cwmt_wmt_instruction),
]
wmt17_fi_lv_tr_zh_en = DLDataset(
name='wmt17_fi_lv_tr_zh_en',
train_urls=[
('http://data.statmt.org/wmt17/translation-task/training-parallel-ep-v8.tgz', 'wmt17_training-parallel-ep-v8.tgz'),
'http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz',
'http://www.statmt.org/wmt15/wiki-titles.tgz',
('http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-tr.tmx.gz', 'en-tr.tmx.gz'),
('http://data.statmt.org/wmt17/translation-task/rapid2016.tgz', 'wmt17_rapid2016.tgz'),
'http://data.statmt.org/wmt17/translation-task/leta.v1.tgz',
'http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz',
'http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz',
(('https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.00',
'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.01',), 'UNv1.0.en-zh.tar.gz'),
#manually download files:
('http://nlp.nju.edu.cn/cwmt-wmt/CASIA2015.zip', 'CASIA2015.zip'),
('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2011.zip', 'CASICT2011.zip'),
('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2015.zip', 'CASICT2015.zip'),
('http://nlp.nju.edu.cn/cwmt-wmt/Datum2015.zip', 'Datum2015.zip'),
('http://nlp.nju.edu.cn/cwmt-wmt/Datum2017.zip', 'Datum2017.zip'),
('http://nlp.nju.edu.cn/cwmt-wmt/NEU2017.zip', 'NEU2017.zip'),
],
valid_urls=[
('http://data.statmt.org/wmt17/translation-task/dev.tgz', 'wmt17_dev.tgz'),
],
test_urls=[
#NEW: Improved translations for zh test sets
('http://data.statmt.org/wmt17/translation-task/test-update-1.tgz', 'wmt17_test_zh_en.tgz'),
('http://data.statmt.org/wmt17/translation-task/test.tgz', 'wmt17_test_others.tgz')
],
train_files_patterns=[
('casict*/cas*{src:ch}{tgt:en}.txt', ['zh-en', 'zh-en'] ),
('casia*/cas*{src:ch}{tgt:en}.txt', ['zh-en', 'zh-en'] ),
('dataum*/Book*{src:cn}{tgt:en}.txt', ['zh-en', 'zh-en']),
('neu*/NEU*{src:cn}{tgt:en}.txt', ['zh-en', 'zh-en'] ),
('*/*UNv1.0.en-zh.{src:zh}{tgt:en}', ['zh-en']),
('training/*news-commentary-v12.{src}-{tgt}.{lang}', ['zh-en', ]),
('*/*europarl-v8.{src}-{tgt}.{lang}', ['fi-en', 'lv-en']),
('wiki/fi-en/titles.{src}-{tgt}.{lang}', ['fi-en', ]),
('rapid2016.{tgt}-{src}.{lang}', ['fi-en', 'lv-en']),
('*/leta.{lang}', ['lv-en']),
('*/dcep.{lang}', ['lv-en']),
('*/farewell.{lang}', ['lv-en']),
('bitext.{lang}', ['tr-en']),
] ,
valid_files_patterns=[
('dev/newsdev2017*{src}{tgt}-{src:src}{tgt:ref}.{lang}',
[
'fi-en', 'lv-en', 'tr-en', 'zh-en',
'en-fi', 'en-lv', 'en-tr', 'en-zh'
]),
('dev/newstest2016*{src}{tgt}-{src:src}{tgt:ref}.{lang}',
[
'fi-en', 'tr-en',
'en-fi', 'en-tr',
]),
],
test_files_patterns=[
('test/newstest2017-{src}{tgt}-{src:src}{tgt:ref}.{lang}',
[
'fi-en', 'lv-en', 'tr-en',
'en-fi', 'en-lv', 'en-tr',
]),
('newstest2017-{src}{tgt}-{src:src}{tgt:ref}.{lang}',
[
'zh-en',
'en-zh'
]),
],
)
czeng_instruction = 'download instruction at: http://ufal.mff.cuni.cz/czeng/czeng16'
#alternative: use the prepared data but detokenize it?
wmt18_cs_et_en_manual_downloads = [
#for cs, need to register and download; Register and download CzEng 1.6.
#Better results can be obtained by using a subset of sentences, released under a new version name CzEng 1.7.
# ((f'http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar',
# f'data-plaintext-format.{i}.tar'), czeng_instruction)
# for i in range(10)
]
wmt18_cs_et_en = DLDataset(
name='wmt18_cs_et_en',
train_urls=[
'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz',
'http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz',
'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz',
'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-et.zipporah0-dedup-clean.tgz',
'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz',
'http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz',
('http://data.statmt.org/wmt18/translation-task/rapid2016.tgz', 'wmt18_rapid2016.tgz'),
# (tuple(
# (f'http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar',
# f'data-plaintext-format.{i}.tar')
# for i in range(10)
# ),
# 'czeng16_data_plaintext.gz.tar'),
],
valid_urls=[
('http://data.statmt.org/wmt18/translation-task/dev.tgz', 'wmt18_dev.tgz'),
],
test_urls=[
('http://data.statmt.org/wmt18/translation-task/test.tgz', 'wmt18_test.tgz'),
],
train_files_patterns=[
# ('*/*europarl-v7.{src}-{tgt}.{lang}', ['cs-en']),
('*/*europarl-v8.{src}-{tgt}.{lang}', ['et-en']),
# ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['cs-en', 'et-en']),
('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['et-en']),
# ('*commoncrawl.{src}-{tgt}.{lang}', ['cs-en']),
# ('*/news-commentary-v13.{src}-{tgt}.{lang}', ['cs-en']),
# ('data.plaintext-format/*train.{lang}', ['cs-en']),
('rapid2016.{tgt}-{src}.{lang}', ['et-en']),
] ,
valid_files_patterns=[
('dev/newsdev2018*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['et-en']),
# ('dev/newstest2017*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['cs-en'])
],
test_files_patterns=[
('test/newstest2018-{src}{tgt}-{src:src}{tgt:ref}.{lang}',
# ['cs-en', 'et-en']),
['et-en']),
]
)
ru_en_yandex_instruction = 'Yandex Corpus download instruction at: https://translate.yandex.ru/corpus?lang=en'
wmt19_ru_gu_kk_lt_manual_downloads = [
(('https://translate.yandex.ru/corpus?lang=en', 'wmt19_1mcorpus.zip'), ru_en_yandex_instruction)
]
wmt19_ru_gu_kk_lt = DLDataset(
name='wmt19_ru_gu_kk_lt',
train_urls=[
'http://www.statmt.org/europarl/v9/training/europarl-v9.lt-en.tsv.gz',
'https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-lt.bicleaner07.tmx.gz',
'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz',
'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz',
'http://data.statmt.org/news-commentary/v14/training/news-commentary-v14-wmt19.en-kk.tsv.gz',
'http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.en-ru.tsv.gz',
'http://data.statmt.org/wikititles/v1/wikititles-v1.kk-en.tsv.gz',
'http://data.statmt.org/wikititles/v1/wikititles-v1.ru-en.tsv.gz',
'http://data.statmt.org/wikititles/v1/wikititles-v1.kk-en.tsv.gz',
'http://data.statmt.org/wikititles/v1/wikititles-v1.lt-en.tsv.gz',
'http://data.statmt.org/wikititles/v1/wikititles-v1.gu-en.tsv.gz',
(('https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.00',
'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.01',
'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.02',),
'wmt19_UNv1.0.en-ru.tar.gz'),
'https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-lt.tmx.zip',
('https://translate.yandex.ru/corpus?lang=en', 'wmt19_1mcorpus.zip'),
],
valid_urls=[
('http://data.statmt.org/wmt19/translation-task/dev.tgz', 'wmt19_dev.tgz'),
],
test_urls=[
('http://data.statmt.org/wmt19/translation-task/test.tgz', 'wmt19_test.tgz'),
],
train_files_patterns=[
('*europarl-v9.{src}-{tgt}.tsv.{lang}', ['lt-en']),
#paracrawl
('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['ru-en']),
('bitext.{lang}', ['lt-en',]),
('*commoncrawl.{src}-{tgt}.{lang}', ['ru-en',]),
('*news-commentary-v14-wmt19.{tgt}-{src}.tsv.{lang}', ['kk-en', ]),
('*news-commentary-v14.{tgt}-{src}.tsv.{lang}', ['ru-en']),
#yandex
('corpus.{tgt}_{src}.1m.{lang}', ['ru-en']),
('wikititles_v1_wikititles-v1.{src}-{tgt}.tsv.{lang}', ['ru-en', 'kk-en', 'lt-en', 'gu-en']),
('*/UNv1.0.{tgt}-{src}.{lang}', ['ru-en']),
#rapid
('bitext.{lang}', ['lt-en'])
],
valid_files_patterns=[
('dev/newsdev2019*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['gu-en', 'kk-en', 'lt-en']),
('dev/newstest2018*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['ru-en']),
],
test_files_patterns=[
('sgm/newstest2019-{src}{tgt}-{src:src}{tgt:ref}.{lang}',
['ru-en', 'gu-en', 'kk-en', 'lt-en', 'en-ru', 'en-gu', 'en-kk', 'en-lt']),
]
)
#########
if __name__ == "__main__":
# speed up the downloads with multiple processing
dl_folder = f'{to_data_path}/downloads'
extract_folder = f'{to_data_path}/extracted'
urls = [
url
for dataset in [wmt13_es_en, wmt14_de_fr_en, wmt16_ro_en, wmt18_cs_et_en, wmt19_ru_gu_kk_lt]
for urls in [dataset.train_urls, dataset.valid_urls, dataset.test_urls]
for url in urls
]
urls = set(urls)
download_multi(dl_folder, extract_folder, urls, num_processes=8, debug=True)
# check manually downlaods
to_manually_download_urls = (
wmt17_fi_lv_tr_zh_en_manual_downloads + wmt18_cs_et_en_manual_downloads + wmt19_ru_gu_kk_lt_manual_downloads
)
to_be_manually_dowloaded = check_need_manual_downalod(dl_folder, to_manually_download_urls)
if len(to_be_manually_dowloaded) > 0:
print('Missing files that need to be downloaded manually; stop the process now.')
exit(-1)
completed_urls = {}
completed_extraction = {}
def work_on_wmt(directions, wmt_data):
download_and_extract(
to_data_path,
directions,
wmt_data,
to_manually_download_urls=to_manually_download_urls,
completed_urls=completed_urls, completed_extraction=completed_extraction, debug=True)
work_on_wmt(
['es_XX-en_XX'],
wmt13_es_en,)
work_on_wmt(
[
'fr_XX-en_XX', 'en_XX-fr_XX',
# 'en_XX-de_DE', 'de_DE-en_XX',
],
wmt14_de_fr_en,)
work_on_wmt(
['ro_RO-en_XX', 'en_XX-ro_XX'],
wmt16_ro_en,)
work_on_wmt(
[
# 'zh_CN-en_XX',
'lv_LV-en_XX', 'fi_FI-en_XX', 'tr_TR-en_XX',
#in case the reversed directions have different train/valid/test data
# 'en_XX-zh_CN',
'en_XX-lv_LV', 'en_XX-fi_FI', 'en_XX-tr_TR',
],
wmt17_fi_lv_tr_zh_en, )
# czeng17_script_path = download_czeng17_script(download_to, extract_to, debug=False)
# cz_username = None
work_on_wmt(
[
# 'cs_CZ-en_XX',
'et_EE-en_XX'],
wmt18_cs_et_en,)
work_on_wmt(
[
# 'ru_RU-en_XX', 'en_XX-ru_RU',
'gu_IN-en_XX', 'kk_KZ-en_XX', 'lt_LT-en_XX',
#in case the reversed directions have different train/valid/test data
'en_XX-gu_IN', 'en_XX-kk_KZ', 'en_XX-lt_LT'
],
wmt19_ru_gu_kk_lt,)
not_matching = check_wmt_test_bleu(
f'{to_data_path}/raw',
[
('wmt13', ['es_XX-en_XX']),
('wmt14/full', ['fr_XX-en_XX',]),
('wmt16', ['ro_RO-en_XX',]),
# ('wmt17/improved', ['zh_CN-en_XX']),
('wmt17', [ 'lv_LV-en_XX', 'fi_FI-en_XX', 'tr_TR-en_XX']),
('wmt18', ['cs_CZ-en_XX', 'et_EE-en_XX']),
('wmt19', ['gu_IN-en_XX', 'kk_KZ-en_XX', 'lt_LT-en_XX']),
#'ru_RU-en_XX',
]
)
if len(not_matching) > 0:
print('the following datasets do not have matching test datasets:\n\t', '\n\t'.join(not_matching))
| EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py |
import os, sys
import glob, itertools
import pandas as pd
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
def load_langs(path):
with open(path) as fr:
langs = [l.strip() for l in fr]
return langs
def load_sentences(raw_data, split, direction):
src, tgt = direction.split('-')
src_path = f"{raw_data}/{split}.{direction}.{src}"
tgt_path = f"{raw_data}/{split}.{direction}.{tgt}"
if os.path.exists(src_path) and os.path.exists(tgt_path):
return [(src, open(src_path).read().splitlines()), (tgt, open(tgt_path).read().splitlines())]
else:
return []
def swap_direction(d):
src, tgt = d.split('-')
return f'{tgt}-{src}'
def get_all_test_data(raw_data, directions, split='test'):
test_data = [
x
for dd in directions
for d in [dd, swap_direction(dd)]
for x in load_sentences(raw_data, split, d)
]
# all_test_data = {s for _, d in test_data for s in d}
all_test_data = {}
for lang, d in test_data:
for s in d:
s = s.strip()
lgs = all_test_data.get(s, set())
lgs.add(lang)
all_test_data[s] = lgs
return all_test_data, test_data
def check_train_sentences(raw_data, direction, all_test_data, mess_up_train={}):
src, tgt = direction.split('-')
tgt_path = f"{raw_data}/train.{direction}.{tgt}"
src_path = f"{raw_data}/train.{direction}.{src}"
print(f'check training data in {raw_data}/train.{direction}')
size = 0
if not os.path.exists(tgt_path) or not os.path.exists(src_path):
return mess_up_train, size
with open(src_path) as f, open(tgt_path) as g:
for src_line, tgt_line in zip(f, g):
s = src_line.strip()
t = tgt_line.strip()
size += 1
if s in all_test_data:
langs = mess_up_train.get(s, set())
langs.add(direction)
mess_up_train[s] = langs
if t in all_test_data:
langs = mess_up_train.get(t, set())
langs.add(direction)
mess_up_train[t] = langs
return mess_up_train, size
def check_train_all(raw_data, directions, all_test_data):
mess_up_train = {}
data_sizes = {}
for direction in directions:
_, size = check_train_sentences(raw_data, direction, all_test_data, mess_up_train)
data_sizes[direction] = size
return mess_up_train, data_sizes
def count_train_in_other_set(mess_up_train):
train_in_others = [(direction, s) for s, directions in mess_up_train.items() for direction in directions]
counts = {}
for direction, s in train_in_others:
counts[direction] = counts.get(direction, 0) + 1
return counts
def train_size_if_remove_in_otherset(data_sizes, mess_up_train):
counts_in_other = count_train_in_other_set(mess_up_train)
remain_sizes = []
for direction, count in counts_in_other.items():
remain_sizes.append((direction, data_sizes[direction] - count, data_sizes[direction], count, 100 * count / data_sizes[direction] ))
return remain_sizes
def remove_messed_up_sentences(raw_data, direction, mess_up_train, mess_up_train_pairs, corrected_langs):
split = 'train'
src_lang, tgt_lang = direction.split('-')
tgt = f"{raw_data}/{split}.{direction}.{tgt_lang}"
src = f"{raw_data}/{split}.{direction}.{src_lang}"
print(f'working on {direction}: ', src, tgt)
if not os.path.exists(tgt) or not os.path.exists(src) :
return
corrected_tgt = f"{to_folder}/{split}.{direction}.{tgt_lang}"
corrected_src = f"{to_folder}/{split}.{direction}.{src_lang}"
line_num = 0
keep_num = 0
with open(src, encoding='utf8',) as fsrc, \
open(tgt, encoding='utf8',) as ftgt, \
open(corrected_src, 'w', encoding='utf8') as fsrc_corrected, \
open(corrected_tgt, 'w', encoding='utf8') as ftgt_corrected:
for s, t in zip(fsrc, ftgt):
s = s.strip()
t = t.strip()
if t not in mess_up_train \
and s not in mess_up_train \
and (s, t) not in mess_up_train_pairs \
and (t, s) not in mess_up_train_pairs:
corrected_langs.add(direction)
print(s, file=fsrc_corrected)
print(t, file=ftgt_corrected)
keep_num += 1
line_num += 1
if line_num % 1000 == 0:
print(f'completed {line_num} lines', end='\r')
return line_num, keep_num
##########
def merge_valid_test_messup(mess_up_train_valid, mess_up_train_test):
merged_mess = []
for s in set(list(mess_up_train_valid.keys()) + list(mess_up_train_test.keys())):
if not s:
continue
valid = mess_up_train_valid.get(s, set())
test = mess_up_train_test.get(s, set())
merged_mess.append((s, valid | test))
return dict(merged_mess)
#########
def check_train_pairs(raw_data, direction, all_test_data, mess_up_train={}):
src, tgt = direction.split('-')
#a hack; TODO: check the reversed directions
path1 = f"{raw_data}/train.{src}-{tgt}.{src}"
path2 = f"{raw_data}/train.{src}-{tgt}.{tgt}"
if not os.path.exists(path1) or not os.path.exists(path2) :
return
with open(path1) as f1, open(path2) as f2:
for src_line, tgt_line in zip(f1, f2):
s = src_line.strip()
t = tgt_line.strip()
if (s, t) in all_test_data or (t, s) in all_test_data:
langs = mess_up_train.get( (s, t), set())
langs.add(src)
langs.add(tgt)
mess_up_train[(s, t)] = langs
def load_pairs(raw_data, split, direction):
src, tgt = direction.split('-')
src_f = f"{raw_data}/{split}.{direction}.{src}"
tgt_f = f"{raw_data}/{split}.{direction}.{tgt}"
if tgt != 'en_XX':
src_f, tgt_f = tgt_f, src_f
if os.path.exists(src_f) and os.path.exists(tgt_f):
return list(zip(open(src_f).read().splitlines(),
open(tgt_f).read().splitlines(),
))
else:
return []
# skip_langs = ['cs_CZ', 'en_XX', 'tl_XX', 'tr_TR']
def get_messed_up_test_pairs(split, directions):
test_pairs = [
(d, load_pairs(raw_data, split, d))
for d in directions
]
# all_test_data = {s for _, d in test_data for s in d}
all_test_pairs = {}
for direction, d in test_pairs:
src, tgt = direction.split('-')
for s in d:
langs = all_test_pairs.get(s, set())
langs.add(src)
langs.add(tgt)
all_test_pairs[s] = langs
mess_up_train_pairs = {}
for direction in directions:
check_train_pairs(raw_data, direction, all_test_pairs, mess_up_train_pairs)
return all_test_pairs, mess_up_train_pairs
if __name__ == "__main__":
#######
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--from-folder',
required=True,
type=str)
parser.add_argument(
'--to-folder',
required=True,
type=str)
parser.add_argument(
'--directions',
default=None,
type=str)
args = parser.parse_args()
raw_data = args.from_folder
to_folder = args.to_folder
os.makedirs(to_folder, exist_ok=True)
if args.directions:
directions = args.directions.split(',')
else:
raw_files = itertools.chain(
glob.glob(f'{raw_data}/train*'),
glob.glob(f'{raw_data}/valid*'),
glob.glob(f'{raw_data}/test*'),
)
directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files]
print('working on directions: ', directions)
##########
all_test_data, test_data = get_all_test_data(raw_data, directions, 'test')
print('==loaded test data==')
all_valid_data, valid_data = get_all_test_data(raw_data, directions, 'valid')
print('==loaded valid data==')
all_valid_test_data = merge_valid_test_messup(all_test_data, all_valid_data)
mess_up_train, data_sizes = check_train_all(raw_data, directions, all_valid_test_data)
print('training messing up with valid, test data:', len(mess_up_train))
data_situation = train_size_if_remove_in_otherset(data_sizes, mess_up_train)
df = pd.DataFrame(data_situation, columns=['direction', 'train_size_after_remove', 'orig_size', 'num_to_remove', 'remove_percent'])
df.sort_values('remove_percent', ascending=False)
df.to_csv(f'{raw_data}/clean_summary.tsv', sep='\t')
print(f'projected data clean summary in: {raw_data}/clean_summary.tsv')
# correct the dataset:
all_test_pairs, mess_up_test_train_pairs = get_messed_up_test_pairs('test', directions)
all_valid_pairs, mess_up_valid_train_pairs = get_messed_up_test_pairs('valid', directions)
all_messed_pairs = set(mess_up_test_train_pairs.keys()).union(set(mess_up_valid_train_pairs.keys()))
corrected_directions = set()
real_data_situation = []
for direction in directions:
org_size, new_size = remove_messed_up_sentences(raw_data, direction, mess_up_train, all_messed_pairs, corrected_directions)
if org_size == 0:
print(f"{direction} has size 0")
continue
real_data_situation.append(
(direction, new_size, org_size, org_size - new_size, (org_size - new_size) / org_size * 100)
)
print('corrected directions: ', corrected_directions)
df = pd.DataFrame(real_data_situation, columns=['direction', 'train_size_after_remove', 'orig_size', 'num_to_remove', 'remove_percent'])
df.sort_values('remove_percent', ascending=False)
df.to_csv(f'{raw_data}/actual_clean_summary.tsv', sep='\t')
print(f'actual data clean summary (which can be different from the projected one because of duplications) in: {raw_data}/actual_clean_summary.tsv')
import shutil
for direction in directions:
src_lang, tgt_lang = direction.split('-')
for split in ['train', 'valid', 'test']:
# copying valid, test and uncorrected train
if direction in corrected_directions and split == 'train':
continue
tgt = f"{raw_data}/{split}.{direction}.{tgt_lang}"
src = f"{raw_data}/{split}.{direction}.{src_lang}"
if not (os.path.exists(src) and os.path.exists(tgt)):
continue
corrected_tgt = f"{to_folder}/{split}.{direction}.{tgt_lang}"
corrected_src = f"{to_folder}/{split}.{direction}.{src_lang}"
print(f'copying {src} to {corrected_src}')
shutil.copyfile(src, corrected_src)
print(f'copying {tgt} to {corrected_tgt}')
shutil.copyfile(tgt, corrected_tgt)
print('completed') | EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/remove_valid_test_in_train.py |
import shutil
import os, sys
from subprocess import check_call, check_output
import glob
import argparse
import shutil
import pathlib
import itertools
def call_output(cmd):
print(f"Executing: {cmd}")
ret = check_output(cmd, shell=True)
print(ret)
return ret
def call(cmd):
print(cmd)
check_call(cmd, shell=True)
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
SPM_PATH = os.environ.get('SPM_PATH', None)
if SPM_PATH is None or not SPM_PATH.strip():
print("Please install sentence piecence from https://github.com/google/sentencepiece and set SPM_PATH pointing to the installed spm_encode.py. Exitting...")
sys.exit(-1)
SPM_MODEL = f'{WORKDIR_ROOT}/sentence.bpe.model'
SPM_VOCAB = f'{WORKDIR_ROOT}/dict_250k.txt'
SPM_ENCODE = f'{SPM_PATH}'
if not os.path.exists(SPM_MODEL):
call(f"wget https://dl.fbaipublicfiles.com/fairseq/models/mbart50/sentence.bpe.model -O {SPM_MODEL}")
if not os.path.exists(SPM_VOCAB):
call(f"wget https://dl.fbaipublicfiles.com/fairseq/models/mbart50/dict_250k.txt -O {SPM_VOCAB}")
def get_data_size(raw):
cmd = f'wc -l {raw}'
ret = call_output(cmd)
return int(ret.split()[0])
def encode_spm(model, direction, prefix='', splits=['train', 'test', 'valid'], pairs_per_shard=None):
src, tgt = direction.split('-')
for split in splits:
src_raw, tgt_raw = f'{RAW_DIR}/{split}{prefix}.{direction}.{src}', f'{RAW_DIR}/{split}{prefix}.{direction}.{tgt}'
if os.path.exists(src_raw) and os.path.exists(tgt_raw):
cmd = f"""python {SPM_ENCODE} \
--model {model}\
--output_format=piece \
--inputs {src_raw} {tgt_raw} \
--outputs {BPE_DIR}/{direction}{prefix}/{split}.bpe.{src} {BPE_DIR}/{direction}{prefix}/{split}.bpe.{tgt} """
print(cmd)
call(cmd)
def binarize_(
bpe_dir,
databin_dir,
direction, spm_vocab=SPM_VOCAB,
splits=['train', 'test', 'valid'],
):
src, tgt = direction.split('-')
try:
shutil.rmtree(f'{databin_dir}', ignore_errors=True)
os.mkdir(f'{databin_dir}')
except OSError as error:
print(error)
cmds = [
"fairseq-preprocess",
f"--source-lang {src} --target-lang {tgt}",
f"--destdir {databin_dir}/",
f"--workers 8",
]
if isinstance(spm_vocab, tuple):
src_vocab, tgt_vocab = spm_vocab
cmds.extend(
[
f"--srcdict {src_vocab}",
f"--tgtdict {tgt_vocab}",
]
)
else:
cmds.extend(
[
f"--joined-dictionary",
f"--srcdict {spm_vocab}",
]
)
input_options = []
if 'train' in splits and glob.glob(f"{bpe_dir}/train.bpe*"):
input_options.append(
f"--trainpref {bpe_dir}/train.bpe",
)
if 'valid' in splits and glob.glob(f"{bpe_dir}/valid.bpe*"):
input_options.append(f"--validpref {bpe_dir}/valid.bpe")
if 'test' in splits and glob.glob(f"{bpe_dir}/test.bpe*"):
input_options.append(f"--testpref {bpe_dir}/test.bpe")
if len(input_options) > 0:
cmd = " ".join(cmds + input_options)
print(cmd)
call(cmd)
def binarize(
databin_dir,
direction, spm_vocab=SPM_VOCAB, prefix='',
splits=['train', 'test', 'valid'],
pairs_per_shard=None,
):
def move_databin_files(from_folder, to_folder):
for bin_file in glob.glob(f"{from_folder}/*.bin") \
+ glob.glob(f"{from_folder}/*.idx") \
+ glob.glob(f"{from_folder}/dict*"):
try:
shutil.move(bin_file, to_folder)
except OSError as error:
print(error)
bpe_databin_dir = f"{BPE_DIR}/{direction}{prefix}_databin"
bpe_dir = f"{BPE_DIR}/{direction}{prefix}"
if pairs_per_shard is None:
binarize_(bpe_dir, bpe_databin_dir, direction, spm_vocab=spm_vocab, splits=splits)
move_databin_files(bpe_databin_dir, databin_dir)
else:
# binarize valid and test which will not be sharded
binarize_(
bpe_dir, bpe_databin_dir, direction,
spm_vocab=spm_vocab, splits=[s for s in splits if s != "train"])
for shard_bpe_dir in glob.glob(f"{bpe_dir}/shard*"):
path_strs = os.path.split(shard_bpe_dir)
shard_str = path_strs[-1]
shard_folder = f"{bpe_databin_dir}/{shard_str}"
databin_shard_folder = f"{databin_dir}/{shard_str}"
print(f'working from {shard_folder} to {databin_shard_folder}')
os.makedirs(databin_shard_folder, exist_ok=True)
binarize_(
shard_bpe_dir, shard_folder, direction,
spm_vocab=spm_vocab, splits=["train"])
for test_data in glob.glob(f"{bpe_databin_dir}/valid.*") + glob.glob(f"{bpe_databin_dir}/test.*"):
filename = os.path.split(test_data)[-1]
try:
os.symlink(test_data, f"{databin_shard_folder}/{filename}")
except OSError as error:
print(error)
move_databin_files(shard_folder, databin_shard_folder)
def load_langs(path):
with open(path) as fr:
langs = [l.strip() for l in fr]
return langs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", default=f"{WORKDIR_ROOT}/ML50")
parser.add_argument("--raw-folder", default='raw')
parser.add_argument("--bpe-folder", default='bpe')
parser.add_argument("--databin-folder", default='databin')
args = parser.parse_args()
DATA_PATH = args.data_root #'/private/home/yuqtang/public_data/ML50'
RAW_DIR = f'{DATA_PATH}/{args.raw_folder}'
BPE_DIR = f'{DATA_PATH}/{args.bpe_folder}'
DATABIN_DIR = f'{DATA_PATH}/{args.databin_folder}'
os.makedirs(BPE_DIR, exist_ok=True)
raw_files = itertools.chain(
glob.glob(f'{RAW_DIR}/train*'),
glob.glob(f'{RAW_DIR}/valid*'),
glob.glob(f'{RAW_DIR}/test*'),
)
directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files]
for direction in directions:
prefix = ""
splits = ['train', 'valid', 'test']
try:
shutil.rmtree(f'{BPE_DIR}/{direction}{prefix}', ignore_errors=True)
os.mkdir(f'{BPE_DIR}/{direction}{prefix}')
os.makedirs(DATABIN_DIR, exist_ok=True)
except OSError as error:
print(error)
spm_model, spm_vocab = SPM_MODEL, SPM_VOCAB
encode_spm(spm_model, direction=direction, splits=splits)
binarize(DATABIN_DIR, direction, spm_vocab=spm_vocab, splits=splits)
| EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/binarize.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import argparse
from utils.dedup import deup
import sys
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
def get_directions(folder):
raw_files = glob.glob(f'{folder}/train*')
directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files]
return directions
def diff_list(lhs, rhs):
return set(lhs).difference(set(rhs))
def check_diff(
from_src_file, from_tgt_file,
to_src_file, to_tgt_file,
):
seen_in_from = set()
seen_src_in_from = set()
seen_tgt_in_from = set()
from_count = 0
with open(from_src_file, encoding='utf-8') as fsrc, \
open(from_tgt_file, encoding='utf-8') as ftgt:
for s, t in zip(fsrc, ftgt):
seen_in_from.add((s, t))
seen_src_in_from.add(s)
seen_tgt_in_from.add(t)
from_count += 1
common = 0
common_src = 0
common_tgt = 0
to_count = 0
seen = set()
with open(to_src_file, encoding='utf-8') as fsrc, \
open(to_tgt_file, encoding='utf-8') as ftgt:
for s, t in zip(fsrc, ftgt):
to_count += 1
if (s, t) not in seen:
if (s, t) in seen_in_from:
common += 1
if s in seen_src_in_from:
common_src += 1
seen_src_in_from.remove(s)
if t in seen_tgt_in_from:
common_tgt += 1
seen_tgt_in_from.remove(t)
seen.add((s, t))
return common, common_src, common_tgt, from_count, to_count
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--folder", type=str, required=True,
help="the data folder ")
parser.add_argument("--split", type=str, default='test',
help="split (valid, test) to check against training data")
parser.add_argument('--directions', type=str, default=None, required=False)
args = parser.parse_args()
if args.directions is None:
directions = set(get_directions(args.folder))
directions = sorted(directions)
else:
directions = args.directions.split(',')
directions = sorted(set(directions))
results = []
print(f'checking where {args.split} split data are in training')
print(f'direction\tcommon_count\tsrc common\ttgt common\tfrom_size\tto_size')
for direction in directions:
src, tgt = direction.split('-')
from_src_file = f'{args.folder}/{args.split}.{src}-{tgt}.{src}'
from_tgt_file = f'{args.folder}/{args.split}.{src}-{tgt}.{tgt}'
if not os.path.exists(from_src_file):
# some test/valid data might in reverse directinos:
from_src_file = f'{args.folder}/{args.split}.{tgt}-{src}.{src}'
from_tgt_file = f'{args.folder}/{args.split}.{tgt}-{src}.{tgt}'
to_src_file = f'{args.folder}/train.{src}-{tgt}.{src}'
to_tgt_file = f'{args.folder}/train.{src}-{tgt}.{tgt}'
if not os.path.exists(to_src_file) or not os.path.exists(from_src_file):
continue
r = check_diff(from_src_file, from_tgt_file, to_src_file, to_tgt_file)
results.append(r)
print(f'{direction}\t', '\t'.join(map(str, r)))
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/bin/python
import fasttext
from multiprocessing import Pool
import contextlib
import sys
import argparse
from functools import partial
import io
model = None
def init(model_path):
global model
model = fasttext.load_model(model_path)
def pred(lines):
return lines, [model.predict(line.strip())[0][0][9:] for line in lines]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True,
help="model to load")
parser.add_argument("--inputs", nargs="+", default=['-'],
help="input files to filter")
parser.add_argument("--langs", nargs="+", required=True,
help="lang ids of each input file")
parser.add_argument("--outputs", nargs="+", default=['-'],
help="path to save lid filtered outputs")
parser.add_argument("--num-workers", type=int, metavar="N", default=10,
help="number of processes in parallel")
args = parser.parse_args()
assert len(args.inputs) == len(args.langs) and len(args.inputs) == len(args.outputs)
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8", newline="\n", errors="replace"))
if input != "-" else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8', errors="replace")
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8", newline="\n"))
if output != "-" else sys.stdout
for output in args.outputs
]
with Pool(args.num_workers, initializer=partial(init, args.model)) as p:
skip_cnt = 0
for lines, preds in p.imap(pred, list(zip(*inputs)), chunksize=500):
if not all(a == b for a, b in zip(preds, args.langs)):
skip_cnt += 1
continue
for line, output_h in zip(lines, outputs):
print(line.strip(), file=output_h)
print(f"Skipped {skip_cnt} lines.")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/utils/fasttext_multi_filter.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
def deup(src_file, tgt_file, src_file_out, tgt_file_out):
seen = set()
dup_count = 0
with open(src_file, encoding='utf-8') as fsrc, \
open(tgt_file, encoding='utf-8') as ftgt, \
open(src_file_out, 'w', encoding='utf-8') as fsrc_out, \
open(tgt_file_out, 'w', encoding='utf-8') as ftgt_out:
for s, t in zip(fsrc, ftgt):
if (s, t) not in seen:
fsrc_out.write(s)
ftgt_out.write(t)
seen.add((s, t))
else:
dup_count += 1
print(f'number of duplication: {dup_count}')
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--src-file", type=str, required=True,
help="src file")
parser.add_argument("--tgt-file", type=str, required=True,
help="tgt file")
parser.add_argument("--src-file-out", type=str, required=True,
help="src ouptut file")
parser.add_argument("--tgt-file-out", type=str, required=True,
help="tgt ouput file")
args = parser.parse_args()
deup(args.src_file, args.tgt_file, args.src_file_out, args.tgt_file_out)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/multilingual/data_scripts/utils/dedup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
@register_model("laser_lstm")
class LSTMModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens=None,
tgt_tokens=None,
tgt_lengths=None,
target_language_id=None,
dataset_name="",
):
assert target_language_id is not None
src_encoder_out = self.encoder(src_tokens, src_lengths, dataset_name)
return self.decoder(
prev_output_tokens, src_encoder_out, lang_id=target_language_id
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout",
default=0.1,
type=float,
metavar="D",
help="dropout probability",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-embed-path",
default=None,
type=str,
metavar="STR",
help="path to pre-trained encoder embedding",
)
parser.add_argument(
"--encoder-hidden-size", type=int, metavar="N", help="encoder hidden size"
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="number of encoder layers"
)
parser.add_argument(
"--encoder-bidirectional",
action="store_true",
help="make all layers of encoder bidirectional",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-embed-path",
default=None,
type=str,
metavar="STR",
help="path to pre-trained decoder embedding",
)
parser.add_argument(
"--decoder-hidden-size", type=int, metavar="N", help="decoder hidden size"
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="number of decoder layers"
)
parser.add_argument(
"--decoder-out-embed-dim",
type=int,
metavar="N",
help="decoder output embedding dimension",
)
parser.add_argument(
"--decoder-zero-init",
type=str,
metavar="BOOL",
help="initialize the decoder hidden/cell state to zero",
)
parser.add_argument(
"--decoder-lang-embed-dim",
type=int,
metavar="N",
help="decoder language embedding dimension",
)
parser.add_argument(
"--fixed-embeddings",
action="store_true",
help="keep embeddings fixed (ENCODER ONLY)",
) # TODO Also apply to decoder embeddings?
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument(
"--encoder-dropout-in",
type=float,
metavar="D",
help="dropout probability for encoder input embedding",
)
parser.add_argument(
"--encoder-dropout-out",
type=float,
metavar="D",
help="dropout probability for encoder output",
)
parser.add_argument(
"--decoder-dropout-in",
type=float,
metavar="D",
help="dropout probability for decoder input embedding",
)
parser.add_argument(
"--decoder-dropout-out",
type=float,
metavar="D",
help="dropout probability for decoder output",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
pretrained_encoder_embed = None
if args.encoder_embed_path:
pretrained_encoder_embed = load_pretrained_embedding_from_file(
args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim
)
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim
)
num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0
encoder = LSTMEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
hidden_size=args.encoder_hidden_size,
num_layers=args.encoder_layers,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
bidirectional=args.encoder_bidirectional,
pretrained_embed=pretrained_encoder_embed,
fixed_embeddings=args.fixed_embeddings,
)
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
zero_init=options.eval_bool(args.decoder_zero_init),
encoder_embed_dim=args.encoder_embed_dim,
encoder_output_units=encoder.output_units,
pretrained_embed=pretrained_decoder_embed,
num_langs=num_langs,
lang_embed_dim=args.decoder_lang_embed_dim,
)
return cls(encoder, decoder)
class LSTMEncoder(FairseqEncoder):
"""LSTM encoder."""
def __init__(
self,
dictionary,
embed_dim=512,
hidden_size=512,
num_layers=1,
dropout_in=0.1,
dropout_out=0.1,
bidirectional=False,
left_pad=True,
pretrained_embed=None,
padding_value=0.0,
fixed_embeddings=False,
):
super().__init__(dictionary)
self.num_layers = num_layers
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.bidirectional = bidirectional
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
else:
self.embed_tokens = pretrained_embed
if fixed_embeddings:
self.embed_tokens.weight.requires_grad = False
self.lstm = LSTM(
input_size=embed_dim,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=self.dropout_out if num_layers > 1 else 0.0,
bidirectional=bidirectional,
)
self.left_pad = left_pad
self.padding_value = padding_value
self.output_units = hidden_size
if bidirectional:
self.output_units *= 2
def forward(self, src_tokens, src_lengths, dataset_name):
if self.left_pad:
# convert left-padding to right-padding
src_tokens = utils.convert_padding_direction(
src_tokens,
self.padding_idx,
left_to_right=True,
)
bsz, seqlen = src_tokens.size()
# embed tokens
x = self.embed_tokens(src_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# pack embedded source tokens into a PackedSequence
try:
packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist())
except BaseException:
raise Exception(f"Packing failed in dataset {dataset_name}")
# apply LSTM
if self.bidirectional:
state_size = 2 * self.num_layers, bsz, self.hidden_size
else:
state_size = self.num_layers, bsz, self.hidden_size
h0 = x.data.new(*state_size).zero_()
c0 = x.data.new(*state_size).zero_()
packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))
# unpack outputs and apply dropout
x, _ = nn.utils.rnn.pad_packed_sequence(
packed_outs, padding_value=self.padding_value
)
x = F.dropout(x, p=self.dropout_out, training=self.training)
assert list(x.size()) == [seqlen, bsz, self.output_units]
if self.bidirectional:
def combine_bidir(outs):
return torch.cat(
[
torch.cat([outs[2 * i], outs[2 * i + 1]], dim=0).view(
1, bsz, self.output_units
)
for i in range(self.num_layers)
],
dim=0,
)
final_hiddens = combine_bidir(final_hiddens)
final_cells = combine_bidir(final_cells)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
# Set padded outputs to -inf so they are not selected by max-pooling
padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1)
if padding_mask.any():
x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x)
# Build the sentence embedding by max-pooling over the encoder outputs
sentemb = x.max(dim=0)[0]
return {
"sentemb": sentemb,
"encoder_out": (x, final_hiddens, final_cells),
"encoder_padding_mask": encoder_padding_mask
if encoder_padding_mask.any()
else None,
}
def reorder_encoder_out(self, encoder_out_dict, new_order):
encoder_out_dict["sentemb"] = encoder_out_dict["sentemb"].index_select(
0, new_order
)
encoder_out_dict["encoder_out"] = tuple(
eo.index_select(1, new_order) for eo in encoder_out_dict["encoder_out"]
)
if encoder_out_dict["encoder_padding_mask"] is not None:
encoder_out_dict["encoder_padding_mask"] = encoder_out_dict[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out_dict
def max_positions(self):
"""Maximum input length supported by the encoder."""
return int(1e5) # an arbitrary large number
class LSTMDecoder(FairseqIncrementalDecoder):
"""LSTM decoder."""
def __init__(
self,
dictionary,
embed_dim=512,
hidden_size=512,
out_embed_dim=512,
num_layers=1,
dropout_in=0.1,
dropout_out=0.1,
zero_init=False,
encoder_embed_dim=512,
encoder_output_units=512,
pretrained_embed=None,
num_langs=1,
lang_embed_dim=0,
):
super().__init__(dictionary)
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
else:
self.embed_tokens = pretrained_embed
self.layers = nn.ModuleList(
[
LSTMCell(
input_size=encoder_output_units + embed_dim + lang_embed_dim
if layer == 0
else hidden_size,
hidden_size=hidden_size,
)
for layer in range(num_layers)
]
)
if hidden_size != out_embed_dim:
self.additional_fc = Linear(hidden_size, out_embed_dim)
self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
if zero_init:
self.sentemb2init = None
else:
self.sentemb2init = Linear(
encoder_output_units, 2 * num_layers * hidden_size
)
if lang_embed_dim == 0:
self.embed_lang = None
else:
self.embed_lang = nn.Embedding(num_langs, lang_embed_dim)
nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1)
def forward(
self, prev_output_tokens, encoder_out_dict, incremental_state=None, lang_id=0
):
sentemb = encoder_out_dict["sentemb"]
encoder_out = encoder_out_dict["encoder_out"]
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bsz, seqlen = prev_output_tokens.size()
# get outputs from encoder
encoder_outs, _, _ = encoder_out[:3]
srclen = encoder_outs.size(0)
# embed tokens
x = self.embed_tokens(prev_output_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# embed language identifier
if self.embed_lang is not None:
lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id)
langemb = self.embed_lang(lang_ids)
# TODO Should we dropout here???
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# initialize previous states (or get from cache during incremental generation)
cached_state = utils.get_incremental_state(
self, incremental_state, "cached_state"
)
if cached_state is not None:
prev_hiddens, prev_cells, input_feed = cached_state
else:
num_layers = len(self.layers)
if self.sentemb2init is None:
prev_hiddens = [
x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers)
]
prev_cells = [
x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers)
]
else:
init = self.sentemb2init(sentemb)
prev_hiddens = [
init[:, (2 * i) * self.hidden_size : (2 * i + 1) * self.hidden_size]
for i in range(num_layers)
]
prev_cells = [
init[
:,
(2 * i + 1) * self.hidden_size : (2 * i + 2) * self.hidden_size,
]
for i in range(num_layers)
]
input_feed = x.data.new(bsz, self.hidden_size).zero_()
attn_scores = x.data.new(srclen, seqlen, bsz).zero_()
outs = []
for j in range(seqlen):
if self.embed_lang is None:
input = torch.cat((x[j, :, :], sentemb), dim=1)
else:
input = torch.cat((x[j, :, :], sentemb, langemb), dim=1)
for i, rnn in enumerate(self.layers):
# recurrent cell
hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
# hidden state becomes the input to the next layer
input = F.dropout(hidden, p=self.dropout_out, training=self.training)
# save state for next time step
prev_hiddens[i] = hidden
prev_cells[i] = cell
out = hidden
out = F.dropout(out, p=self.dropout_out, training=self.training)
# input feeding
input_feed = out
# save final output
outs.append(out)
# cache previous states (no-op except during incremental generation)
utils.set_incremental_state(
self,
incremental_state,
"cached_state",
(prev_hiddens, prev_cells, input_feed),
)
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# srclen x tgtlen x bsz -> bsz x tgtlen x srclen
attn_scores = attn_scores.transpose(0, 2)
# project back to size of vocabulary
if hasattr(self, "additional_fc"):
x = self.additional_fc(x)
x = F.dropout(x, p=self.dropout_out, training=self.training)
x = self.fc_out(x)
return x, attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
cached_state = utils.get_incremental_state(
self, incremental_state, "cached_state"
)
if cached_state is None:
return
def reorder_state(state):
if isinstance(state, list):
return [reorder_state(state_i) for state_i in state]
return state.index_select(0, new_order)
new_state = tuple(map(reorder_state, cached_state))
utils.set_incremental_state(self, incremental_state, "cached_state", new_state)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return int(1e5) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.uniform_(m.weight, -0.1, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def LSTM(input_size, hidden_size, **kwargs):
m = nn.LSTM(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if "weight" in name or "bias" in name:
param.data.uniform_(-0.1, 0.1)
return m
def LSTMCell(input_size, hidden_size, **kwargs):
m = nn.LSTMCell(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if "weight" in name or "bias" in name:
param.data.uniform_(-0.1, 0.1)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
@register_model_architecture("laser_lstm", "laser_lstm")
def base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_hidden_size = getattr(
args, "encoder_hidden_size", args.encoder_embed_dim
)
args.encoder_layers = getattr(args, "encoder_layers", 1)
args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False)
args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout)
args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_hidden_size = getattr(
args, "decoder_hidden_size", args.decoder_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 1)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout)
args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
args.decoder_zero_init = getattr(args, "decoder_zero_init", "0")
args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0)
args.fixed_embeddings = getattr(args, "fixed_embeddings", False)
| EXA-1-master | exa/libraries/fairseq/examples/laser/laser_src/laser_lstm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, List, Optional
from torch import Tensor
import torch
import torch.nn as nn
from fairseq.models import (
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
base_architecture,
Embedding,
TransformerModel,
TransformerEncoder,
TransformerDecoder,
)
from fairseq.modules import (
TransformerDecoderLayer,
)
logger = logging.getLogger(__name__)
@register_model("laser_transformer")
class LaserTransformerModel(FairseqEncoderDecoderModel):
"""Train Transformer for LASER task
Requires --task laser
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens=None,
tgt_tokens=None,
tgt_lengths=None,
target_language_id=-1,
dataset_name="",
):
laser_encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(
prev_output_tokens, laser_encoder_out, lang_id=target_language_id
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--decoder-lang-embed-dim",
type=int,
metavar="N",
help="decoder language embedding dimension",
)
@classmethod
def build_model(cls, args, task):
base_laser_transformer_architecture(args)
num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0
def load_embed_tokens(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
encoder_embed_tokens = load_embed_tokens(
task.source_dictionary, args.encoder_embed_dim
)
decoder_embed_tokens = load_embed_tokens(
task.target_dictionary, args.decoder_embed_dim
)
num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0
encoder = LaserTransformerEncoder(
args, task.source_dictionary, encoder_embed_tokens
)
decoder = LaserTransformerDecoder(
args,
task.target_dictionary,
decoder_embed_tokens,
num_langs=num_langs,
lang_embed_dim=args.decoder_lang_embed_dim,
)
return cls(encoder, decoder)
class LaserTransformerEncoder(TransformerEncoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, src_tokens, *args, **kwargs):
encoder_out = super().forward(src_tokens, *args, **kwargs)
x = encoder_out["encoder_out"][0] # T x B x C
padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1)
if padding_mask.any():
x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x)
# Build the sentence embedding by max-pooling over the encoder outputs
sentemb = x.max(dim=0)[0]
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `foward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {"sentemb": [sentemb]} # B x C
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Same as the one in transformer.py, with new_sentemb
"""
if len(encoder_out["sentemb"]) == 0:
new_sentemb = []
else:
new_sentemb = [encoder_out["sentemb"][0].index_select(0, new_order)]
return {
"sentemb": new_sentemb, # B x C
}
class LaserTransformerDecoder(TransformerDecoder):
def __init__(self, args, dictionary, *kargs, **kwargs):
self.num_langs = kwargs.get("num_langs", 1)
self.lang_embed_dim = kwargs.get("lang_embed_dim", 0)
kwargs.pop("num_langs", None)
kwargs.pop("lang_embed_dim", None)
super().__init__(args, dictionary, *kargs, **kwargs, no_encoder_attn=True)
if self.lang_embed_dim == 0:
self.embed_lang = None
else:
self.embed_lang = nn.Embedding(self.num_langs, self.lang_embed_dim)
nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1)
if self.output_projection is not None:
laser_output_embed_dim = (
self.output_embed_dim + self.lang_embed_dim + args.encoder_embed_dim
)
self.output_projection = nn.Linear(
laser_output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight,
mean=0,
std=laser_output_embed_dim ** -0.5,
)
def build_decoder_layer(self, args, no_encoder_attn=False):
decoder_embed_dim = args.decoder_embed_dim
args.decoder_embed_dim = (
decoder_embed_dim + self.lang_embed_dim + args.encoder_embed_dim
)
res = TransformerDecoderLayer(args, no_encoder_attn=True)
args.decoder_embed_dim = decoder_embed_dim
return res
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
lang_id: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
bsz, seqlen = prev_output_tokens.size()
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if self.embed_lang is not None:
lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id)
langemb = self.embed_lang(lang_ids)
langemb = langemb.unsqueeze(0)
repeat_vals = [x.shape[0] // langemb.shape[0]] + [-1] * (
len(langemb.shape) - 1
)
x = torch.cat((x, langemb.expand(*repeat_vals)), dim=-1)
sentemb = encoder_out["sentemb"][0]
sentemb = sentemb.unsqueeze(0)
repeat_vals = [x.shape[0] // sentemb.shape[0]] + [-1] * (len(sentemb.shape) - 1)
x = torch.cat((x, sentemb.expand(*repeat_vals)), dim=-1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
None,
None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
lang_id: Optional[int] = None,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
assert lang_id is not None
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
lang_id=lang_id,
)
if not features_only:
x = self.output_layer(x)
return x, extra
@register_model_architecture("laser_transformer", "laser_transformer")
def base_laser_transformer_architecture(args):
base_architecture(args)
args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0)
| EXA-1-master | exa/libraries/fairseq/examples/laser/laser_src/laser_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import numpy as np
from fairseq.data import BaseWrapperDataset, FairseqDataset, iterators
class MultiItr(object):
def __init__(self, itr):
self.itr = itr
self._counts = [0 for x in itr]
def __len__(self):
return sum(len(itr) for itr in self.itr)
def __iter__(self):
return self
def __next__(self):
ratios = [count / len(itr) for count, itr in zip(self._counts, self.itr)]
idx = ratios.index(min(ratios))
self._counts[idx] += 1
return next(self.itr[idx])
class MultidatasetEpochBatchIterator(iterators.EpochBatchIterating):
"""A wrapper around multiple epoch batch iterators."""
def __init__(
self,
dataset,
batch_sampler,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
):
assert isinstance(dataset, OrderedDict)
assert len(dataset)
assert isinstance(dataset[next(iter(dataset))], FairseqDataset)
self.iterators = []
self.epoch = epoch
for key, dt in dataset.items():
epoch_iter = iterators.EpochBatchIterator(
dataset=dt,
collate_fn=dt.collater,
batch_sampler=batch_sampler[key],
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=0,
epoch=epoch,
)
self.iterators.append(epoch_iter)
def __len__(self):
return sum(len(itr) for itr in self.iterators)
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
# `self.epoch += 1` should be handled by underlying `EpochBatchIterator`s.
return MultiItr(
[
itr.next_epoch_itr(
shuffle=shuffle, fix_batches_to_gpus=fix_batches_to_gpus
)
for itr in self.iterators
]
)
def end_of_epoch(self):
return all(itr.end_of_epoch() for itr in self.iterators)
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
epochs = [itr.next_epoch_idx for itr in self.iterators]
self.epoch = epochs[0]
assert all(epoch == self.epoch for epoch in epochs)
return self.epoch
@property
def iterations_in_epoch(self):
return sum(itr.iterations_in_epoch for itr in self.iterators)
def state_dict(self):
return {
"iterators": [it.state_dict() for it in self.iterators],
"epoch": self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
for it, d in zip(self.iterators, state_dict["iterators"]):
it.load_state_dict(d)
class MultitaskDatasetWrapper(BaseWrapperDataset):
"""A wrapper for a multitask dataset."""
def __init__(self, dataset, target_language_id, sample=1.0, name=""):
super().__init__(dataset)
self.target_language_id = target_language_id
self.sample = sample
self.name = name
def collater(self, *args, **kwargs):
ans = self.dataset.collater(*args, **kwargs)
if "net_input" in ans:
ans["net_input"]["target_language_id"] = self.target_language_id
ans["net_input"]["dataset_name"] = self.name
return ans
def num_tokens(self, *args, **kwargs):
return self.dataset.num_tokens(*args, **kwargs)
def ordered_indices(self, *args, **kwargs):
indices = self.dataset.ordered_indices(*args, **kwargs)
# Hacky solution for sampling
size = int(self.sample * indices.shape[0])
return indices.take(np.sort(np.random.permutation(indices.shape[0])[:size]))
def size(self, index: int):
return self.dataset.size(index)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
| EXA-1-master | exa/libraries/fairseq/examples/laser/laser_src/multitask_data_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .laser_task import * # noqa
from .laser_lstm import * # noqa
from .laser_transformer import * # noqa
| EXA-1-master | exa/libraries/fairseq/examples/laser/laser_src/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict, defaultdict
import json
import os
import logging
from argparse import ArgumentError
from fairseq import options, models
from fairseq.data import (
data_utils,
Dictionary,
LanguagePairDataset,
IndexedDataset,
FairseqDataset,
)
from .multitask_data_utils import (
MultitaskDatasetWrapper,
MultidatasetEpochBatchIterator,
)
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("laser")
class LaserTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"configfile", metavar="PATH", help="dataset configuration file in json"
)
parser.add_argument(
"--weighting-alpha",
type=float,
default=None,
help="alpha for automatic weighting",
)
parser.add_argument(
"--raw-text", action="store_true", help="load raw text dataset"
)
parser.add_argument(
"--left-pad-source",
default="True",
type=str,
metavar="BOOL",
help="pad the source on the left (default: True)",
)
parser.add_argument(
"--left-pad-target",
default="False",
type=str,
metavar="BOOL",
help="pad the target on the left (default: False)",
)
try:
parser.add_argument(
"--max-source-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
except ArgumentError:
# this might have already been defined. Once we transition this to hydra it should be fine to add it here.
pass
def __init__(self, args, config, src_dictionary, tgt_dictionary, num_tasks):
super().__init__(args)
self.config = config
self.src_dictionary = src_dictionary
self.tgt_dictionary = tgt_dictionary
self.num_tasks = num_tasks
@classmethod
def setup_task(cls, args, **kwargs):
with open(args.configfile, "r") as f:
config = json.load(f)
num_tasks = max(dataset["id"] for dataset in config["train"]) + 1
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
src_dictionary = Dictionary.load(config["src_vocab"])
tgt_dictionary = Dictionary.load(config["tgt_vocab"])
logger.info(
"| src Dictionary {} : {} types".format(
config["src_vocab"], len(src_dictionary)
)
)
logger.info(
"| tgt Dictionary {} : {} types".format(
config["tgt_vocab"], len(tgt_dictionary)
)
)
return cls(args, config, src_dictionary, tgt_dictionary, num_tasks)
# Experimental overriding for backtranslation
def build_model(self, args, from_checkpoint=False):
model = models.build_model(args, self)
return model
def dataset(self, split):
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
return self.datasets[split]
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
def indexed_dataset(path, dictionary):
if self.args.raw_text:
raise Exception("Unable to handle raw text.")
dataset = IndexedDataset(path, fix_lua_indexing=True)
return dataset
pair_datasets = OrderedDict()
if split == "valid":
self.datasets[split] = pair_datasets
return
if split not in self.config:
raise FileNotFoundError(
"Dataset not found in config file: {}".format(split)
)
size_by_corpus = defaultdict(int)
size_sum = 0
size_sum_with_subsampling = 0
init_pair_datasets = {}
for dataset_config in self.config[split]:
src_path = os.path.dirname(dataset_config["src"])
corpus_name = src_path.split("/")[-2]
language_pair_name = src_path.split("/")[-1]
pair_datasets_key = corpus_name + "-" + language_pair_name
logger.info(f"loading... {pair_datasets_key}")
if "src" in dataset_config:
src_dataset = indexed_dataset(
dataset_config["src"], self.src_dictionary
)
else:
src_dataset = None
if "tgt" in dataset_config:
tgt_dataset = indexed_dataset(
dataset_config["tgt"], self.tgt_dictionary
)
else:
tgt_dataset = None
dataset = LanguagePairDataset(
src_dataset,
src_dataset.sizes,
self.src_dictionary,
tgt_dataset,
tgt_dataset.sizes,
self.tgt_dictionary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
if pair_datasets_key in init_pair_datasets:
logger.warning(
f"Ignoring already added {pair_datasets_key}. "
f"Consider using `sample` key in order to upsample."
)
else:
init_pair_datasets[pair_datasets_key] = {
"dataset": dataset,
"sample": dataset_config.get("sample", None),
"id": dataset_config.get("id", None),
"len": len(dataset),
}
length_sum = 0
weighted_freqs_sum = 0
freq_per_dataset = {}
vmax = 0
vmin = 1
weighted_freq_per_dataset = {}
if self.args.weighting_alpha:
for key in init_pair_datasets:
if init_pair_datasets[key]["sample"] is None:
length_sum += len(init_pair_datasets[key]["dataset"])
for key in init_pair_datasets:
if init_pair_datasets[key]["sample"] is None:
val = float(init_pair_datasets[key]["len"]) / length_sum
freq_per_dataset[key] = val
weighted_freqs_sum += val ** self.args.weighting_alpha
for key in freq_per_dataset:
val = (
freq_per_dataset[key] ** self.args.weighting_alpha
/ weighted_freqs_sum
)
vmin = min(vmin, val)
vmax = max(vmax, val)
weighted_freq_per_dataset[key] = val
for pair_datasets_key in init_pair_datasets:
dataset_config = init_pair_datasets[pair_datasets_key]
dataset = dataset_config["dataset"]
sample = dataset_config["sample"]
if sample is None:
sample = 1.0
if pair_datasets_key in weighted_freq_per_dataset:
w = vmax / weighted_freq_per_dataset[pair_datasets_key]
sample = w
sample = round(sample)
initial_sample = sample
initial_pair_datasets_key = pair_datasets_key
while sample >= 1.0:
assert (
pair_datasets_key not in pair_datasets
), f"{pair_datasets_key} already in"
size_sum_with_subsampling += len(dataset)
pair_datasets[pair_datasets_key] = MultitaskDatasetWrapper(
dataset, dataset_config.get("id", 0), 1.0, name=pair_datasets_key
)
size_sum += len(dataset)
sample -= 1.0
pair_datasets_key += "-up"
assert sample < 1e-6, f"sample remains > 0 {pair_datasets_key}"
logger.info(
f"added pair {initial_pair_datasets_key} length {len(dataset)} new_length = {len(dataset)*initial_sample}"
)
size_by_corpus[corpus_name] += len(dataset)
self.datasets[split] = pair_datasets
logger.info(
f"Datasets number = {len(self.datasets[split])} size = {size_sum} size_sum_with_subsampling = {size_sum_with_subsampling}"
)
@property
def source_dictionary(self):
return self.src_dictionary
@property
def target_dictionary(self):
return self.tgt_dictionary
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
**kwargs,
):
assert isinstance(dataset, OrderedDict)
assert len(dataset)
assert isinstance(dataset[next(iter(dataset))], FairseqDataset)
# initialize the dataset with the correct starting epoch
for _, dt in dataset.items():
dt.set_epoch(epoch)
indices = OrderedDict()
batch_sampler = OrderedDict()
with data_utils.numpy_seed(seed + epoch):
for key, dt in dataset.items():
logger.info(f"\t ordered_indices {key}")
indices[key] = dt.ordered_indices()
# filter examples that are too large
if max_positions is not None:
for key, dt in dataset.items():
logger.info(f"\t filter_by_size {key}")
indices[key], ignored = dt.filter_indices_by_size(
indices[key], max_positions
)
for key, dt in dataset.items():
logger.info(f"\t batch_by_size {key}")
batch_sampler[key] = data_utils.batch_by_size(
indices[key],
dt.num_tokens,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
epoch_iter = MultidatasetEpochBatchIterator(
dataset=dataset,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
)
return epoch_iter
| EXA-1-master | exa/libraries/fairseq/examples/laser/laser_src/laser_task.py |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mmpt",
version="0.0.1",
author="Hu Xu, Po-yao Huang",
author_email="[email protected]",
description="A package for multimodal pretraining.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pytorch/fairseq/examples/MMPT",
packages=setuptools.find_packages(),
install_requires=[
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: CC-BY-NC",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from omegaconf import OmegaConf
from mmpt.utils import recursive_config, overwrite_dir
from mmpt_cli.localjob import LocalJob
class JobLauncher(object):
JOB_CONFIG = {
"local": LocalJob,
}
def __init__(self, yaml_file):
self.yaml_file = yaml_file
job_key = "local"
if yaml_file.endswith(".yaml"):
config = recursive_config(yaml_file)
if config.task_type is not None:
job_key = config.task_type.split("_")[0]
else:
raise ValueError("unknown extension of job file:", yaml_file)
self.job_key = job_key
def __call__(self, job_type=None, dryrun=False):
if job_type is not None:
self.job_key = job_type.split("_")[0]
print("[JobLauncher] job_key", self.job_key)
job = JobLauncher.JOB_CONFIG[self.job_key](
self.yaml_file, job_type=job_type, dryrun=dryrun)
return job.submit()
class Pipeline(object):
"""a job that loads yaml config."""
def __init__(self, fn):
"""
load a yaml config of a job and save generated configs as yaml for each task.
return: a list of files to run as specified by `run_task`.
"""
if fn.endswith(".py"):
# a python command.
self.backend = "python"
self.run_yamls = [fn]
return
job_config = recursive_config(fn)
if job_config.base_dir is None: # single file job config.
self.run_yamls = [fn]
return
self.project_dir = os.path.join("projects", job_config.project_dir)
self.run_dir = os.path.join("runs", job_config.project_dir)
if job_config.run_task is not None:
run_yamls = []
for stage in job_config.run_task:
# each stage can have multiple tasks running in parallel.
if OmegaConf.is_list(stage):
stage_yamls = []
for task_file in stage:
stage_yamls.append(
os.path.join(self.project_dir, task_file))
run_yamls.append(stage_yamls)
else:
run_yamls.append(os.path.join(self.project_dir, stage))
self.run_yamls = run_yamls
configs_to_save = self._overwrite_task(job_config)
self._save_configs(configs_to_save)
def __getitem__(self, idx):
yaml_files = self.run_yamls[idx]
if isinstance(yaml_files, list):
return [JobLauncher(yaml_file) for yaml_file in yaml_files]
return [JobLauncher(yaml_files)]
def __len__(self):
return len(self.run_yamls)
def _save_configs(self, configs_to_save: dict):
# save
os.makedirs(self.project_dir, exist_ok=True)
for config_file in configs_to_save:
config = configs_to_save[config_file]
print("saving", config_file)
OmegaConf.save(config=config, f=config_file)
def _overwrite_task(self, job_config):
configs_to_save = {}
self.base_project_dir = os.path.join("projects", job_config.base_dir)
self.base_run_dir = os.path.join("runs", job_config.base_dir)
for config_sets in job_config.task_group:
overwrite_config = job_config.task_group[config_sets]
if (
overwrite_config.task_list is None
or len(overwrite_config.task_list) == 0
):
print(
"[warning]",
job_config.task_group,
"has no task_list specified.")
# we don't want this added to a final config.
task_list = overwrite_config.pop("task_list", None)
for config_file in task_list:
config_file_path = os.path.join(
self.base_project_dir, config_file)
config = recursive_config(config_file_path)
# overwrite it.
if overwrite_config:
config = OmegaConf.merge(config, overwrite_config)
overwrite_dir(config, self.run_dir, basedir=self.base_run_dir)
save_file_path = os.path.join(self.project_dir, config_file)
configs_to_save[save_file_path] = config
return configs_to_save
def main(args):
job_type = args.jobtype if args.jobtype else None
# parse multiple pipelines.
pipelines = [Pipeline(fn) for fn in args.yamls.split(",")]
for pipe_id, pipeline in enumerate(pipelines):
if not hasattr(pipeline, "project_dir"):
for job in pipeline[0]:
job(job_type=job_type, dryrun=args.dryrun)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("yamls", type=str)
parser.add_argument(
"--dryrun",
action="store_true",
help="run config and prepare to submit without launch the job.",
)
parser.add_argument(
"--jobtype", type=str, default="",
help="force to run jobs as specified.")
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/locallaunch.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
# fairseq user dir
from .datasets import FairseqMMDataset
from .losses import FairseqCriterion
from .models import FairseqMMModel
from .tasks import FairseqMMTask
except ImportError:
pass
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .loss import *
from .nce import *
try:
from .fairseqmmloss import *
except ImportError:
pass
try:
from .expnce import *
except ImportError:
pass
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/losses/__init__.py |
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch import nn
class Loss(object):
def __call__(self, *args, **kwargs):
raise NotImplementedError
# Dummy Loss for testing.
class DummyLoss(Loss):
def __init__(self):
self.loss = nn.CrossEntropyLoss()
def __call__(self, logits, targets, **kwargs):
return self.loss(logits, targets)
class DummyK400Loss(Loss):
"""dummy k400 loss for MViT."""
def __init__(self):
self.loss = nn.CrossEntropyLoss()
def __call__(self, logits, targets, **kwargs):
return self.loss(
logits, torch.randint(0, 400, (logits.size(0),), device=logits.device))
class CrossEntropy(Loss):
def __init__(self):
self.loss = nn.CrossEntropyLoss()
def __call__(self, logits, targets, **kwargs):
return self.loss(logits.reshape(-1, logits.size(-1)), targets.reshape(-1))
class ArgmaxCrossEntropy(Loss):
def __init__(self):
self.loss = nn.CrossEntropyLoss()
def __call__(self, logits, targets, **kwargs):
return self.loss(logits, targets.argmax(dim=1))
class BCE(Loss):
def __init__(self):
self.loss = nn.BCEWithLogitsLoss()
def __call__(self, logits, targets, **kwargs):
targets = targets.squeeze(0)
return self.loss(logits, targets)
class NLGLoss(Loss):
def __init__(self):
self.loss = nn.CrossEntropyLoss()
def __call__(self, logits, text_label, **kwargs):
targets = text_label[text_label != -100]
return self.loss(logits, targets)
class MSE(Loss):
def __init__(self):
self.loss = nn.MSELoss()
def __call__(self, logits, targets, **kwargs):
return self.loss(logits, targets)
class L1(Loss):
def __init__(self):
self.loss = nn.L1Loss()
def __call__(self, logits, targets, **kwargs):
return self.loss(logits, targets)
class SmoothL1(Loss):
def __init__(self):
self.loss = nn.SmoothL1Loss()
def __call__(self, logits, targets, **kwargs):
return self.loss(logits, targets)
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/losses/loss.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
TODO (huxu): a general fairseq criterion for all your pre-defined losses.
"""
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.logging import metrics
@register_criterion("mmloss")
class MMCriterion(FairseqCriterion):
def __init__(self, task):
super().__init__(task)
# TODO (huxu): wrap forward call of loss_fn and eval_fn into task.
self.mmtask = task.mmtask
def forward(self, model, sample):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
outputs = self.mmtask(model, sample)
loss, loss_scalar, max_len, batch_size, sample_size = (
outputs["loss"],
outputs["loss_scalar"],
outputs["max_len"],
outputs["batch_size"],
outputs["sample_size"],
)
logging_output = {
"loss": loss_scalar,
"ntokens": max_len * batch_size, # dummy report.
"nsentences": batch_size, # dummy report.
"sample_size": sample_size,
}
return loss, 1, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
"""since we use NCE, our actual batch_size is 1 per GPU.
Then we take the mean of each worker."""
loss_sum = sum(log.get("loss", 0.0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar("loss", loss_sum / sample_size, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/losses/fairseqmmloss.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
softmax-based NCE loss, used by this project.
"""
import torch
from torch import nn
from .loss import Loss
class NCE(Loss):
def __init__(self):
# TODO (huxu): define temperature.
self.loss = nn.CrossEntropyLoss()
def __call__(self, align_scores, **kargs):
# note: we reuse the same shape as cls head in BERT (batch_size, 2)
# but NCE only needs one logits.
# (so we drop all weights in the second neg logits.)
align_scores = align_scores[:, :1]
# duplicate negative examples
batch_size = align_scores.size(0) // 2
pos_scores = align_scores[:batch_size]
neg_scores = align_scores[batch_size:].view(1, batch_size).repeat(
batch_size, 1)
scores = torch.cat([pos_scores, neg_scores], dim=1)
return self.loss(
scores,
torch.zeros(
(batch_size,),
dtype=torch.long,
device=align_scores.device),
)
class T2VContraLoss(Loss):
"""NCE for MM joint space, on softmax text2video matrix.
"""
def __init__(self):
# TODO (huxu): define temperature.
self.loss = nn.CrossEntropyLoss()
def __call__(self, pooled_video, pooled_text, **kargs):
batch_size = pooled_video.size(0)
logits = torch.mm(pooled_text, pooled_video.transpose(1, 0))
targets = torch.arange(
batch_size,
dtype=torch.long,
device=pooled_video.device)
return self.loss(logits, targets)
class V2TContraLoss(Loss):
"""NCE for MM joint space, with softmax on video2text matrix."""
def __init__(self):
# TODO (huxu): define temperature.
self.loss = nn.CrossEntropyLoss()
def __call__(self, pooled_video, pooled_text, **kargs):
batch_size = pooled_video.size(0)
logits = torch.mm(pooled_video, pooled_text.transpose(1, 0))
targets = torch.arange(
batch_size,
dtype=torch.long,
device=pooled_video.device)
return self.loss(logits, targets)
class MMContraLoss(Loss):
def __init__(self):
self.loss = nn.CrossEntropyLoss()
def __call__(self, pooled_video, pooled_text, **kwargs):
logits_per_video = pooled_video @ pooled_text.t()
logits_per_text = pooled_text @ pooled_video.t()
targets = torch.arange(
pooled_video.size(0),
dtype=torch.long,
device=pooled_video.device)
loss_video = self.loss(logits_per_video, targets)
loss_text = self.loss(logits_per_text, targets)
return loss_video + loss_text
class MTM(Loss):
"""Combination of MFM and MLM."""
def __init__(self):
self.loss = nn.CrossEntropyLoss()
def __call__(
self,
video_logits,
text_logits,
video_label,
text_label,
**kwargs
):
text_logits = torch.cat([
text_logits,
torch.zeros(
(text_logits.size(0), 1), device=text_logits.device)
], dim=1)
vt_logits = torch.cat([video_logits, text_logits], dim=0)
# loss for video.
video_label = torch.zeros(
(video_logits.size(0),),
dtype=torch.long,
device=video_logits.device
)
# loss for text.
text_label = text_label.reshape(-1)
labels_mask = text_label != -100
selected_text_label = text_label[labels_mask]
vt_label = torch.cat([video_label, selected_text_label], dim=0)
return self.loss(vt_logits, vt_label)
class MFMMLM(Loss):
"""Combination of MFM and MLM."""
def __init__(self):
self.loss = nn.CrossEntropyLoss()
def __call__(
self,
video_logits,
text_logits,
video_label,
text_label,
**kwargs
):
# loss for video.
video_label = torch.zeros(
(video_logits.size(0),),
dtype=torch.long,
device=video_logits.device
)
masked_frame_loss = self.loss(video_logits, video_label)
# loss for text.
text_label = text_label.reshape(-1)
labels_mask = text_label != -100
selected_text_label = text_label[labels_mask]
masked_lm_loss = self.loss(text_logits, selected_text_label)
return masked_frame_loss + masked_lm_loss
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/losses/nce.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from .. import tasks
from .. import models
from .. import losses
from ..datasets import MMDataset
from .. import processors
class Task(object):
"""
A task refers to one generic training task (e.g., training one model).
"""
@classmethod
def config_task(cls, config):
"""
determine whether to load a hard-coded task or config from a generic one.
via if a task string is available in config.
"""
if config.task is not None:
# TODO (huxu): expand the search scope.
task_cls = getattr(tasks, config.task)
return task_cls(config)
else:
return Task(config)
def __init__(self, config):
self.config = config
self.train_data = None
self.val_data = None
self.test_data = None
self.model = None
self.loss_fn = None
self.eval_fn = None
def build_dataset(self):
"""TODO (huxu): move processor breakdown to MMDataset."""
"""fill-in `self.train_data`, `self.val_data` and `self.test_data`."""
meta_processor_cls = getattr(
processors, self.config.dataset.meta_processor)
video_processor_cls = getattr(
processors, self.config.dataset.video_processor)
text_processor_cls = getattr(
processors, self.config.dataset.text_processor)
aligner_cls = getattr(
processors, self.config.dataset.aligner)
if self.config.dataset.train_path is not None:
self.config.dataset.split = "train"
# may be used by meta processor.
# meta_processor controls different dataset.
meta_processor = meta_processor_cls(self.config.dataset)
video_processor = video_processor_cls(self.config.dataset)
text_processor = text_processor_cls(self.config.dataset)
aligner = aligner_cls(self.config.dataset)
self.train_data = MMDataset(
meta_processor, video_processor, text_processor, aligner
)
print("train_len", len(self.train_data))
output = self.train_data[0]
self.train_data.print_example(output)
if self.config.dataset.val_path is not None:
self.config.dataset.split = "valid"
# may be used by meta processor.
meta_processor = meta_processor_cls(self.config.dataset)
video_processor = video_processor_cls(self.config.dataset)
text_processor = text_processor_cls(self.config.dataset)
aligner = aligner_cls(self.config.dataset)
self.val_data = MMDataset(
meta_processor, video_processor, text_processor, aligner
)
print("val_len", len(self.val_data))
output = self.val_data[0]
self.val_data.print_example(output)
if self.config.dataset.split == "test":
# the following is run via lauching fairseq-validate.
meta_processor = meta_processor_cls(self.config.dataset)
video_processor = video_processor_cls(self.config.dataset)
text_processor = text_processor_cls(self.config.dataset)
self.test_data = MMDataset(
meta_processor, video_processor, text_processor, aligner
)
print("test_len", len(self.test_data))
output = self.test_data[0]
self.test_data.print_example(output)
def build_model(self, checkpoint=None):
if self.model is None:
model_cls = getattr(models, self.config.model.model_cls)
self.model = model_cls(self.config)
if checkpoint is not None:
self.load_checkpoint(checkpoint)
return self.model
def load_checkpoint(self, checkpoint):
if self.model is None:
raise ValueError("model is not initialized.")
state_dict = torch.load(checkpoint)
state_dict = self._trim_state_dict(state_dict)
self.model.load_state_dict(state_dict, strict=False)
# if it's a fp16 model, turn it back.
if next(self.model.parameters()).dtype == torch.float16:
self.model = self.model.float()
return self.model
def _trim_state_dict(self, state_dict):
from collections import OrderedDict
if "state_dict" in state_dict:
state_dict = state_dict["state_dict"]
if "model" in state_dict: # fairseq checkpoint format.
state_dict = state_dict["model"]
ret_state_dict = OrderedDict()
for (
key,
value,
) in state_dict.items():
# remove fairseq wrapper since this is a task.
if key.startswith("mmmodel"):
key = key[len("mmmodel."):]
ret_state_dict[key] = value
return ret_state_dict
def build_loss(self):
if self.loss_fn is None and self.config.loss is not None:
loss_cls = getattr(losses, self.config.loss.loss_cls)
self.loss_fn = loss_cls()
return self.loss_fn
def flat_subsample(self, tensor):
size = tensor.size()
if len(size) >= 2:
batch_size = size[0] * size[1]
expanded_size = (
(batch_size,) + size[2:] if len(size) > 2
else (batch_size,)
)
tensor = tensor.view(expanded_size)
return tensor
def reshape_subsample(self, sample):
if (
hasattr(self.config.dataset, "subsampling")
and self.config.dataset.subsampling is not None
and self.config.dataset.subsampling > 1
):
for key in sample:
if torch.is_tensor(sample[key]):
sample[key] = self.flat_subsample(sample[key])
return sample
def __call__(self, model, sample):
loss = None
loss_scalar = float("inf")
sample = self.reshape_subsample(sample)
outputs = self.model(**sample)
sample.update(outputs)
if self.loss_fn is not None:
loss = self.loss_fn(**sample)
loss_scalar = loss.item()
batch_size = sample["caps"].size(0)
sample_size = 1
return {
"loss": loss,
"loss_scalar": loss_scalar,
"max_len": self.config.dataset.max_len,
"batch_size": batch_size,
"sample_size": sample_size,
}
def build_dataloader(self):
"""only used for trainer that lacks building loaders."""
raise NotImplementedError
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/tasks/task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
make a general fairseq task for MM pretraining.
"""
import random
from fairseq.tasks import LegacyFairseqTask, register_task
from .task import Task
from .retritask import RetriTask
from ..datasets import FairseqMMDataset
from .. import utils
@register_task("mmtask")
class FairseqMMTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
# Add some command-line arguments for specifying where the data is
# located and the maximum supported input length.
parser.add_argument(
"taskconfig",
metavar="FILE",
help=("taskconfig to load all configurations" "outside fairseq parser."),
)
@classmethod
def setup_task(cls, args, **kwargs):
return FairseqMMTask(args)
def __init__(self, args):
super().__init__(args)
config = utils.load_config(args)
self.mmtask = Task.config_task(config)
self.mmtask.build_dataset()
self.mmtask.build_model()
self.mmtask.build_loss()
def load_dataset(self, split, **kwargs):
split_map = {
"train": self.mmtask.train_data,
"valid": self.mmtask.val_data,
"test": self.mmtask.test_data,
}
if split not in split_map:
raise ValueError("unknown split type.")
if split_map[split] is not None:
self.datasets[split] = FairseqMMDataset(split_map[split])
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
skip_remainder_batch=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
random.seed(epoch)
if dataset.mmdataset.split == "train" and isinstance(self.mmtask, RetriTask):
if epoch >= self.mmtask.config.retri_epoch:
if not hasattr(self.mmtask, "retri_dataloader"):
self.mmtask.build_dataloader()
self.mmtask.retrive_candidates(epoch)
return super().get_batch_iterator(
dataset,
max_tokens,
max_sentences,
max_positions,
ignore_invalid_inputs,
required_batch_size_multiple,
seed,
num_shards,
shard_id,
num_workers,
epoch,
data_buffer_size,
disable_iterator_cache,
grouped_shuffling,
update_epoch_batch_itr,
)
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/tasks/fairseqmmtask.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .task import *
from .vlmtask import *
from .retritask import *
try:
from .fairseqmmtask import *
except ImportError:
pass
try:
from .milncetask import *
except ImportError:
pass
try:
from .expretritask import *
except ImportError:
pass
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/tasks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import pickle
import random
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from ..processors import (
ShardedHow2MetaProcessor,
ShardedVideoProcessor,
ShardedTextProcessor,
VariedLenAligner,
)
from ..datasets import MMDataset
from .task import Task
from ..modules import vectorpool
from ..evaluators.predictor import Predictor
from ..utils import set_seed, get_local_rank, get_world_size
class RetriTask(Task):
"""abstract class for task with retrival."""
def reshape_subsample(self, sample):
for key in sample:
if torch.is_tensor(sample[key]):
sample[key] = self.flat_subsample(sample[key])
return sample
def flat_subsample(self, tensor):
if tensor.size(0) == 1:
tensor = tensor.squeeze(0)
return tensor
def build_dataloader(self):
"""called by `get_batch_iterator` in fairseqmmtask. """
# TODO: hard-code dataloader for retri for now and configurable in .yaml.
# reuse the `train.lst`.
self.config.dataset.split = "train"
meta_processor = ShardedHow2MetaProcessor(self.config.dataset)
video_processor = ShardedVideoProcessor(self.config.dataset)
text_processor = ShardedTextProcessor(self.config.dataset)
aligner = VariedLenAligner(self.config.dataset)
aligner.subsampling = self.config.dataset.clip_per_video
self.retri_data = MMDataset(
meta_processor, video_processor, text_processor, aligner
)
retri_sampler = DistributedSampler(self.retri_data)
infer_scale = 16
batch_size = self.config.dataset.num_video_per_batch \
* infer_scale
self.retri_dataloader = DataLoader(
self.retri_data,
collate_fn=self.retri_data.collater,
batch_size=batch_size,
shuffle=False,
sampler=retri_sampler,
num_workers=self.config.fairseq.dataset.num_workers
)
return self.retri_dataloader
def retrive_candidates(self, epoch, dataloader=None):
if get_local_rank() == 0:
print("running retrieval model.")
out_dir = os.path.join(
self.config.fairseq.checkpoint.save_dir, "retri")
os.makedirs(out_dir, exist_ok=True)
if not os.path.isfile(
os.path.join(
out_dir, "batched_e" + str(epoch) + "_videos0.pkl")
):
if dataloader is None:
dataloader = self.retri_dataloader
self.model.eval()
self.model.is_train = False
assert self.retri_data.meta_processor.data == \
self.train_data.meta_processor.data # video_ids not mutated.
self._retri_predict(epoch, dataloader)
self.model.train()
self.model.is_train = True
torch.distributed.barrier()
output = self._retri_sync(epoch, out_dir)
torch.distributed.barrier()
self.train_data.meta_processor.set_candidates(output)
return output
class VideoRetriTask(RetriTask):
"""RetriTask on video level."""
def reshape_subsample(self, sample):
if (
hasattr(self.config.dataset, "clip_per_video")
and self.config.dataset.clip_per_video is not None
and self.config.dataset.clip_per_video > 1
):
for key in sample:
if torch.is_tensor(sample[key]):
sample[key] = self.flat_subsample(sample[key])
return sample
def flat_subsample(self, tensor):
if tensor.size(0) == 1:
tensor = tensor.squeeze(0)
return Task.flat_subsample(self, tensor)
def _retri_predict(self, epoch, dataloader):
set_seed(epoch)
# save for retrival.
predictor = VideoPredictor(self.config)
predictor.predict_loop(
self.model, dataloader)
set_seed(epoch) # get the same text clips.
# retrival.
retri_predictor = VideoRetriPredictor(
self.config)
retri_predictor.predict_loop(
self.model, predictor.vecpool.retriver, epoch)
del predictor
del retri_predictor
def _retri_sync(self, epoch, out_dir):
# gpu do the same merge.
batched_videos = []
for local_rank in range(get_world_size()):
fn = os.path.join(
out_dir,
"batched_e" + str(epoch) + "_videos" + str(local_rank) + ".pkl")
with open(fn, "rb") as fr:
batched_videos.extend(pickle.load(fr))
print(
"[INFO] batched_videos",
len(batched_videos), len(batched_videos[0]))
return batched_videos
class VideoPredictor(Predictor):
def __init__(self, config):
vectorpool_cls = getattr(vectorpool, config.vectorpool_cls)
self.vecpool = vectorpool_cls(config)
def predict_loop(
self,
model,
dataloader,
early_stop=-1,
):
with torch.no_grad():
if get_local_rank() == 0:
dataloader = tqdm(dataloader)
for batch_idx, batch in enumerate(dataloader):
if batch_idx == early_stop:
break
self(batch, model)
return self.finalize()
def __call__(self, sample, model, **kwargs):
param = next(model.parameters())
dtype = param.dtype
device = param.device
subsample = sample["vfeats"].size(1)
sample = self.to_ctx(sample, device, dtype)
for key in sample:
if torch.is_tensor(sample[key]):
size = sample[key].size()
if len(size) >= 2:
batch_size = size[0] * size[1]
expanded_size = (
(batch_size,) + size[2:] if len(size) > 2
else (batch_size,)
)
sample[key] = sample[key].view(expanded_size)
outputs = model(**sample)
sample.update(outputs)
self.vecpool(sample, subsample)
def finalize(self):
print("[INFO]", self.vecpool)
if not self.vecpool.retriver.db.is_trained:
self.vecpool.retriver.finalize_training()
return self.vecpool.retriver
class VideoRetriPredictor(Predictor):
"""
Online Retrieval Predictor for Clips (used by RetriTask).
TODO: merge this with VisPredictor?
"""
def __init__(self, config):
self.pred_dir = os.path.join(
config.fairseq.checkpoint.save_dir,
"retri")
self.num_cands = config.num_cands
self.num_video_per_batch = config.dataset.num_video_per_batch
def predict_loop(
self,
model,
retriver,
epoch,
early_stop=-1
):
# a fake loop that only try to recover video vector
# from video_id.
batched_videos = []
# obtain available video_ids.
video_ids = list(retriver.videoid_to_vectoridx.keys())
dataloader = random.sample(
video_ids,
len(video_ids) // self.num_video_per_batch
)
if get_local_rank() == 0:
dataloader = tqdm(dataloader)
for batch_idx, batch in enumerate(dataloader):
# batch is one video id.
if batch_idx == early_stop:
break
video_ids = retriver.search_by_video_ids(
[batch], self.num_cands)[0]
if len(video_ids) > self.num_video_per_batch:
# we moved the center to make cluster robust.
video_ids = random.sample(video_ids, self.num_video_per_batch)
batched_videos.append(video_ids)
return self.finalize(batched_videos, epoch)
def finalize(self, batched_videos, epoch):
fn = os.path.join(
self.pred_dir,
"batched_e" + str(epoch) + "_videos" + str(get_local_rank()) + ".pkl")
with open(fn, "wb") as fw:
pickle.dump(batched_videos, fw, pickle.HIGHEST_PROTOCOL)
return batched_videos
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/tasks/retritask.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from .task import Task
class MILNCETask(Task):
def reshape_subsample(self, sample):
if (
hasattr(self.config.dataset, "subsampling")
and self.config.dataset.subsampling is not None
and self.config.dataset.subsampling > 1
):
for key in sample:
if torch.is_tensor(sample[key]):
tensor = self.flat_subsample(sample[key])
if key in ["caps", "cmasks"]:
size = tensor.size()
batch_size = size[0] * size[1]
expanded_size = (batch_size,) + size[2:]
tensor = tensor.view(expanded_size)
sample[key] = tensor
return sample
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/tasks/milncetask.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from .task import Task
class VLMTask(Task):
"""A VLM task for reproducibility.
the collator split subsamples into two sub-batches.
This has should have no logic changes.
but changed the randomness in frame masking.
"""
def flat_subsample(self, tensor):
size = tensor.size()
if len(size) >= 2:
batch_size = size[0] * (size[1] // 2)
expanded_size = (
(batch_size, 2) + size[2:] if len(size) > 2
else (batch_size, 2)
)
tensor = tensor.view(expanded_size)
tensor = torch.cat([tensor[:, 0], tensor[:, 1]], dim=0)
return tensor
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/tasks/vlmtask.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
TODO (huxu): fairseq wrapper class for all dataset you defined: mostly MMDataset.
"""
from collections import OrderedDict
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from fairseq.data import FairseqDataset, data_utils
class FairseqMMDataset(FairseqDataset):
"""
A wrapper class for MMDataset for fairseq.
"""
def __init__(self, mmdataset):
if not isinstance(mmdataset, Dataset):
raise TypeError("mmdataset must be of type `torch.utils.data.dataset`.")
self.mmdataset = mmdataset
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, idx):
with data_utils.numpy_seed(43211, self.epoch, idx):
return self.mmdataset[idx]
def __len__(self):
return len(self.mmdataset)
def collater(self, samples):
if hasattr(self.mmdataset, "collator"):
return self.mmdataset.collator(samples)
if len(samples) == 0:
return {}
if isinstance(samples[0], dict):
batch = OrderedDict()
for key in samples[0]:
if samples[0][key] is not None:
batch[key] = default_collate([sample[key] for sample in samples])
return batch
else:
return default_collate(samples)
def size(self, index):
"""dummy implementation: we don't use --max-tokens"""
return 1
def num_tokens(self, index):
"""dummy implementation: we don't use --max-tokens"""
return 1
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/datasets/fairseqmmdataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .mmdataset import *
try:
from .fairseqmmdataset import *
except ImportError:
pass
| EXA-1-master | exa/libraries/fairseq/examples/MMPT/mmpt/datasets/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.