python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os from pathlib import Path import shutil from itertools import groupby from tempfile import NamedTemporaryFile from typing import Tuple import numpy as np import pandas as pd import soundfile as sf from examples.speech_to_text.data_utils import ( create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, cal_gcmvn_stats, ) import torch from torch.utils.data import Dataset from tqdm import tqdm from fairseq.data.audio.audio_utils import get_waveform, convert_waveform log = logging.getLogger(__name__) MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"] class MUSTC(Dataset): """ Create a Dataset for MuST-C. Each item is a tuple of the form: waveform, sample_rate, source utterance, target utterance, speaker_id, utterance_id """ SPLITS = ["train", "dev", "tst-COMMON", "tst-HE"] LANGUAGES = ["de", "es", "fr", "it", "nl", "pt", "ro", "ru"] def __init__(self, root: str, lang: str, split: str) -> None: assert split in self.SPLITS and lang in self.LANGUAGES _root = Path(root) / f"en-{lang}" / "data" / split wav_root, txt_root = _root / "wav", _root / "txt" assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir() # Load audio segments try: import yaml except ImportError: print("Please install PyYAML to load the MuST-C YAML files") with open(txt_root / f"{split}.yaml") as f: segments = yaml.load(f, Loader=yaml.BaseLoader) # Load source and target utterances for _lang in ["en", lang]: with open(txt_root / f"{split}.{_lang}") as f: utterances = [r.strip() for r in f] assert len(segments) == len(utterances) for i, u in enumerate(utterances): segments[i][_lang] = u # Gather info self.data = [] for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]): wav_path = wav_root / wav_filename sample_rate = sf.info(wav_path.as_posix()).samplerate seg_group = sorted(_seg_group, key=lambda x: x["offset"]) for i, segment in enumerate(seg_group): offset = int(float(segment["offset"]) * sample_rate) n_frames = int(float(segment["duration"]) * sample_rate) _id = f"{wav_path.stem}_{i}" self.data.append( ( wav_path.as_posix(), offset, n_frames, sample_rate, segment["en"], segment[lang], segment["speaker_id"], _id, ) ) def __getitem__( self, n: int ) -> Tuple[torch.Tensor, int, str, str, str, str]: wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, \ utt_id = self.data[n] waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset) waveform = torch.from_numpy(waveform) return waveform, sr, src_utt, tgt_utt, spk_id, utt_id def __len__(self) -> int: return len(self.data) def process(args): root = Path(args.data_root).absolute() for lang in MUSTC.LANGUAGES: cur_root = root / f"en-{lang}" if not cur_root.is_dir(): print(f"{cur_root.as_posix()} does not exist. Skipped.") continue # Extract features audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80") audio_root.mkdir(exist_ok=True) for split in MUSTC.SPLITS: print(f"Fetching split {split}...") dataset = MUSTC(root.as_posix(), lang, split) if args.use_audio_input: print("Converting audios...") for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset): tgt_sample_rate = 16_000 _wavform, _ = convert_waveform( waveform, sample_rate, to_mono=True, to_sample_rate=tgt_sample_rate ) sf.write( (audio_root / f"{utt_id}.flac").as_posix(), _wavform.T.numpy(), tgt_sample_rate ) else: print("Extracting log mel filter bank features...") gcmvn_feature_list = [] if split == 'train' and args.cmvn_type == "global": print("And estimating cepstral mean and variance stats...") for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset): features = extract_fbank_features( waveform, sample_rate, audio_root / f"{utt_id}.npy" ) if split == 'train' and args.cmvn_type == "global": if len(gcmvn_feature_list) < args.gcmvn_max_num: gcmvn_feature_list.append(features) if split == 'train' and args.cmvn_type == "global": # Estimate and save cmv stats = cal_gcmvn_stats(gcmvn_feature_list) with open(cur_root / "gcmvn.npz", "wb") as f: np.savez(f, mean=stats["mean"], std=stats["std"]) # Pack features into ZIP zip_path = cur_root / f"{audio_root.name}.zip" print("ZIPing audios/features...") create_zip(audio_root, zip_path) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest( zip_path, is_audio=args.use_audio_input, ) # Generate TSV manifest print("Generating manifest...") train_text = [] for split in MUSTC.SPLITS: is_train_split = split.startswith("train") manifest = {c: [] for c in MANIFEST_COLUMNS} dataset = MUSTC(args.data_root, lang, split) for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset): manifest["id"].append(utt_id) manifest["audio"].append(audio_paths[utt_id]) manifest["n_frames"].append(audio_lengths[utt_id]) manifest["tgt_text"].append( src_utt if args.task == "asr" else tgt_utt ) manifest["speaker"].append(speaker_id) if is_train_split: train_text.extend(manifest["tgt_text"]) df = pd.DataFrame.from_dict(manifest) df = filter_manifest_df(df, is_train_split=is_train_split) save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv") # Generate vocab v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}" with NamedTemporaryFile(mode="w") as f: for t in train_text: f.write(t + "\n") gen_vocab( Path(f.name), cur_root / spm_filename_prefix, args.vocab_type, args.vocab_size, ) # Generate config YAML if args.use_audio_input: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy=None, extra={"use_audio_input": True} ) else: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy="lb", cmvn_type=args.cmvn_type, gcmvn_path=( cur_root / "gcmvn.npz" if args.cmvn_type == "global" else None ), ) # Clean up shutil.rmtree(audio_root) def process_joint(args): cur_root = Path(args.data_root) assert all( (cur_root / f"en-{lang}").is_dir() for lang in MUSTC.LANGUAGES ), "do not have downloaded data available for all 8 languages" # Generate vocab vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}" with NamedTemporaryFile(mode="w") as f: for lang in MUSTC.LANGUAGES: tsv_path = cur_root / f"en-{lang}" / f"train_{args.task}.tsv" df = load_df_from_tsv(tsv_path) for t in df["tgt_text"]: f.write(t + "\n") special_symbols = None if args.task == 'st': special_symbols = [f'<lang:{lang}>' for lang in MUSTC.LANGUAGES] gen_vocab( Path(f.name), cur_root / spm_filename_prefix, args.vocab_type, args.vocab_size, special_symbols=special_symbols ) # Generate config YAML gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy="ld", prepend_tgt_lang_tag=(args.task == "st"), ) # Make symbolic links to manifests for lang in MUSTC.LANGUAGES: for split in MUSTC.SPLITS: src_path = cur_root / f"en-{lang}" / f"{split}_{args.task}.tsv" desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv" if not desc_path.is_symlink(): os.symlink(src_path, desc_path) def main(): parser = argparse.ArgumentParser() parser.add_argument("--data-root", "-d", required=True, type=str) parser.add_argument( "--vocab-type", default="unigram", required=True, type=str, choices=["bpe", "unigram", "char"], ), parser.add_argument("--vocab-size", default=8000, type=int) parser.add_argument("--task", type=str, choices=["asr", "st"]) parser.add_argument("--joint", action="store_true", help="") parser.add_argument( "--cmvn-type", default="utterance", choices=["global", "utterance"], help="The type of cepstral mean and variance normalization" ) parser.add_argument( "--gcmvn-max-num", default=150000, type=int, help="Maximum number of sentences to use to estimate global mean and " "variance" ) parser.add_argument("--use-audio-input", action="store_true") args = parser.parse_args() if args.joint: process_joint(args) else: process(args) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/speech_to_text/prep_mustc_data.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os from pathlib import Path import shutil from itertools import groupby from tempfile import NamedTemporaryFile from typing import Tuple import pandas as pd import soundfile as sf from examples.speech_to_text.data_utils import ( create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, ) import torch from torch.utils.data import Dataset from tqdm import tqdm from fairseq.data.audio.audio_utils import get_waveform, convert_waveform log = logging.getLogger(__name__) MANIFEST_COLUMNS = [ "id", "audio", "n_frames", "tgt_text", "speaker", "tgt_lang" ] class mTEDx(Dataset): """ Create a Dataset for Multilingual TEDx. Each item is a tuple of the form: waveform, sample_rate, source utterance, target utterance, speaker_id, utterance_id """ SPLITS = ["train", "valid", "test"] LANGPAIRS = ["es-es", "fr-fr", "pt-pt", "it-it", "ru-ru", "el-el", "ar-ar", "de-de", "es-en", "es-fr", "es-pt", "es-it", "fr-en", "fr-es", "fr-pt", "pt-en", "pt-es", "it-en", "it-es", "ru-en", "el-en"] def __init__(self, root: str, lang: str, split: str) -> None: assert split in self.SPLITS and lang in self.LANGPAIRS _root = Path(root) / f"{lang}" / "data" / split wav_root, txt_root = _root / "wav", _root / "txt" assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir() # Load audio segments try: import yaml except ImportError: print( "Please install PyYAML to load the Multilingual TEDx YAML files" ) with open(txt_root / f"{split}.yaml") as f: segments = yaml.load(f, Loader=yaml.BaseLoader) # Load source and target utterances src, tgt = lang.split("-") for _lang in [src, tgt]: with open(txt_root / f"{split}.{_lang}") as f: utterances = [r.strip() for r in f] assert len(segments) == len(utterances) for i, u in enumerate(utterances): segments[i][_lang] = u # Gather info self.data = [] for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]): wav_filename = wav_filename.replace(".wav", ".flac") wav_path = wav_root / wav_filename sample_rate = sf.info(wav_path.as_posix()).samplerate seg_group = sorted(_seg_group, key=lambda x: float(x["offset"])) for i, segment in enumerate(seg_group): offset = int(float(segment["offset"]) * sample_rate) n_frames = int(float(segment["duration"]) * sample_rate) _id = f"{wav_path.stem}_{i}" self.data.append( ( wav_path.as_posix(), offset, n_frames, sample_rate, segment[src], segment[tgt], segment["speaker_id"], tgt, _id, ) ) def __getitem__( self, n: int ) -> Tuple[torch.Tensor, int, str, str, str, str, str]: wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, tgt_lang, \ utt_id = self.data[n] waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset) waveform = torch.from_numpy(waveform) return waveform, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id def __len__(self) -> int: return len(self.data) def process(args): root = Path(args.data_root).absolute() for lang in mTEDx.LANGPAIRS: cur_root = root / f"{lang}" if not cur_root.is_dir(): print(f"{cur_root.as_posix()} does not exist. Skipped.") continue # Extract features audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80") audio_root.mkdir(exist_ok=True) for split in mTEDx.SPLITS: print(f"Fetching split {split}...") dataset = mTEDx(root.as_posix(), lang, split) if args.use_audio_input: print("Converting audios...") for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset): tgt_sample_rate = 16_000 _wavform, _ = convert_waveform( waveform, sample_rate, to_mono=True, to_sample_rate=tgt_sample_rate ) sf.write( (audio_root / f"{utt_id}.flac").as_posix(), _wavform.numpy(), tgt_sample_rate ) else: print("Extracting log mel filter bank features...") for waveform, sample_rate, _, _, _, _, utt_id in tqdm(dataset): extract_fbank_features( waveform, sample_rate, audio_root / f"{utt_id}.npy" ) # Pack features into ZIP zip_path = cur_root / f"{audio_root.name}.zip" print("ZIPing audios/features...") create_zip(audio_root, zip_path) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest(zip_path) # Generate TSV manifest print("Generating manifest...") train_text = [] for split in mTEDx.SPLITS: is_train_split = split.startswith("train") manifest = {c: [] for c in MANIFEST_COLUMNS} ds = mTEDx(args.data_root, lang, split) for _, _, src_utt, tgt_utt, spk_id, tgt_lang, utt_id in tqdm(ds): manifest["id"].append(utt_id) manifest["audio"].append(audio_paths[utt_id]) manifest["n_frames"].append(audio_lengths[utt_id]) manifest["tgt_text"].append( src_utt if args.task == "asr" else tgt_utt ) manifest["speaker"].append(spk_id) manifest["tgt_lang"].append(tgt_lang) if is_train_split: train_text.extend(manifest["tgt_text"]) df = pd.DataFrame.from_dict(manifest) df = filter_manifest_df(df, is_train_split=is_train_split) save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv") # Generate vocab v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}" with NamedTemporaryFile(mode="w") as f: for t in train_text: f.write(t + "\n") gen_vocab( Path(f.name), cur_root / spm_filename_prefix, args.vocab_type, args.vocab_size, ) # Generate config YAML if args.use_audio_input: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy=None, extra={"use_audio_input": True} ) else: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy="lb", ) # Clean up shutil.rmtree(audio_root) def process_joint(args): cur_root = Path(args.data_root) assert all((cur_root / f"{lang}").is_dir() for lang in mTEDx.LANGPAIRS), \ "do not have downloaded data available for all languages" # Generate vocab vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}" with NamedTemporaryFile(mode="w") as f: for lang in mTEDx.LANGPAIRS: tsv_path = cur_root / f"{lang}" / f"train_{args.task}.tsv" df = load_df_from_tsv(tsv_path) for t in df["tgt_text"]: f.write(t + "\n") special_symbols = None if args.joint: # Add tgt_lang tags to dict special_symbols = list( {f'<lang:{lang.split("-")[1]}>' for lang in mTEDx.LANGPAIRS} ) gen_vocab( Path(f.name), cur_root / spm_filename_prefix, args.vocab_type, args.vocab_size, special_symbols=special_symbols ) # Generate config YAML gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy="ld", prepend_tgt_lang_tag=(args.joint), ) # Make symbolic links to manifests for lang in mTEDx.LANGPAIRS: for split in mTEDx.SPLITS: src_path = cur_root / f"{lang}" / f"{split}_{args.task}.tsv" desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv" if not desc_path.is_symlink(): os.symlink(src_path, desc_path) def main(): parser = argparse.ArgumentParser() parser.add_argument("--data-root", "-d", required=True, type=str) parser.add_argument( "--vocab-type", default="unigram", required=True, type=str, choices=["bpe", "unigram", "char"], ), parser.add_argument("--vocab-size", default=8000, type=int) parser.add_argument("--task", type=str, choices=["asr", "st"]) parser.add_argument("--joint", action="store_true", help="") parser.add_argument("--use-audio-input", action="store_true") args = parser.parse_args() if args.joint: process_joint(args) else: process(args) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/speech_to_text/prep_mtedx_data.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import csv from pathlib import Path import zipfile from functools import reduce from multiprocessing import cpu_count from typing import Any, Dict, List, Optional, Union import io import numpy as np import pandas as pd import sentencepiece as sp from fairseq.data.audio.audio_utils import ( convert_waveform, _get_kaldi_fbank, _get_torchaudio_fbank, is_npy_data, is_sf_audio_data ) import torch import soundfile as sf from tqdm import tqdm UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3 BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0 EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2 PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1 def gen_vocab( input_path: Path, output_path_prefix: Path, model_type="bpe", vocab_size=1000, special_symbols: Optional[List[str]] = None ): # Train SentencePiece Model arguments = [ f"--input={input_path.as_posix()}", f"--model_prefix={output_path_prefix.as_posix()}", f"--model_type={model_type}", f"--vocab_size={vocab_size}", "--character_coverage=1.0", f"--num_threads={cpu_count()}", f"--unk_id={UNK_TOKEN_ID}", f"--bos_id={BOS_TOKEN_ID}", f"--eos_id={EOS_TOKEN_ID}", f"--pad_id={PAD_TOKEN_ID}", ] if special_symbols is not None: _special_symbols = ",".join(special_symbols) arguments.append(f"--user_defined_symbols={_special_symbols}") sp.SentencePieceTrainer.Train(" ".join(arguments)) # Export fairseq dictionary spm = sp.SentencePieceProcessor() spm.Load(output_path_prefix.as_posix() + ".model") vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())} assert ( vocab.get(UNK_TOKEN_ID) == UNK_TOKEN and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN ) vocab = { i: s for i, s in vocab.items() if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN} } with open(output_path_prefix.as_posix() + ".txt", "w") as f_out: for _, s in sorted(vocab.items(), key=lambda x: x[0]): f_out.write(f"{s} 1\n") def extract_fbank_features( waveform: torch.FloatTensor, sample_rate: int, output_path: Optional[Path] = None, n_mel_bins: int = 80, overwrite: bool = False, ): if output_path is not None and output_path.is_file() and not overwrite: return _waveform, _ = convert_waveform(waveform, sample_rate, to_mono=True) # Kaldi compliance: 16-bit signed integers _waveform = _waveform * (2 ** 15) _waveform = _waveform[0].numpy() features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins) if features is None: features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins) if features is None: raise ImportError( "Please install pyKaldi or torchaudio to enable fbank feature extraction" ) if output_path is not None: np.save(output_path.as_posix(), features) return features def create_zip(data_root: Path, zip_path: Path): paths = list(data_root.glob("*.npy")) paths.extend(data_root.glob("*.flac")) with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f: for path in tqdm(paths): f.write(path, arcname=path.name) def get_zip_manifest( zip_path: Path, zip_root: Optional[Path] = None, is_audio=False ): _zip_path = Path.joinpath(zip_root or Path(""), zip_path) with zipfile.ZipFile(_zip_path, mode="r") as f: info = f.infolist() paths, lengths = {}, {} for i in tqdm(info): utt_id = Path(i.filename).stem offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}" with open(_zip_path, "rb") as f: f.seek(offset) byte_data = f.read(file_size) assert len(byte_data) > 1 if is_audio: assert is_sf_audio_data(byte_data), i else: assert is_npy_data(byte_data), i byte_data_fp = io.BytesIO(byte_data) if is_audio: lengths[utt_id] = sf.info(byte_data_fp).frames else: lengths[utt_id] = np.load(byte_data_fp).shape[0] return paths, lengths def gen_config_yaml( manifest_root: Path, spm_filename: Optional[str] = None, vocab_name: Optional[str] = None, yaml_filename: str = "config.yaml", specaugment_policy: Optional[str] = "lb", prepend_tgt_lang_tag: bool = False, sampling_alpha: Optional[float] = None, input_channels: Optional[int] = 1, input_feat_per_channel: Optional[int] = 80, audio_root: str = "", cmvn_type: str = "utterance", gcmvn_path: Optional[Path] = None, extra=None ): manifest_root = manifest_root.absolute() writer = S2TDataConfigWriter(manifest_root / yaml_filename) assert spm_filename is not None or vocab_name is not None vocab_name = spm_filename.replace(".model", ".txt") if vocab_name is None \ else vocab_name writer.set_vocab_filename(vocab_name) if input_channels is not None: writer.set_input_channels(input_channels) if input_feat_per_channel is not None: writer.set_input_feat_per_channel(input_feat_per_channel) specaugment_setters = { "lb": writer.set_specaugment_lb_policy, "ld": writer.set_specaugment_ld_policy, "sm": writer.set_specaugment_sm_policy, "ss": writer.set_specaugment_ss_policy, } specaugment_setter = specaugment_setters.get(specaugment_policy, None) if specaugment_setter is not None: specaugment_setter() if spm_filename is not None: writer.set_bpe_tokenizer( { "bpe": "sentencepiece", "sentencepiece_model": (manifest_root / spm_filename).as_posix(), } ) if prepend_tgt_lang_tag: writer.set_prepend_tgt_lang_tag(True) if sampling_alpha is not None: writer.set_sampling_alpha(sampling_alpha) if cmvn_type not in ["global", "utterance"]: raise NotImplementedError if specaugment_policy is not None: writer.set_feature_transforms( "_train", [f"{cmvn_type}_cmvn", "specaugment"] ) writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"]) if cmvn_type == "global": if gcmvn_path is None: raise ValueError("Please provide path of global cmvn file.") else: writer.set_global_cmvn(gcmvn_path.as_posix()) if len(audio_root) > 0: writer.set_audio_root(audio_root) if extra is not None: writer.set_extra(extra) writer.flush() def load_df_from_tsv(path: Union[str, Path]) -> pd.DataFrame: _path = path if isinstance(path, str) else path.as_posix() return pd.read_csv( _path, sep="\t", header=0, encoding="utf-8", escapechar="\\", quoting=csv.QUOTE_NONE, na_filter=False, ) def save_df_to_tsv(dataframe, path: Union[str, Path]): _path = path if isinstance(path, str) else path.as_posix() dataframe.to_csv( _path, sep="\t", header=True, index=False, encoding="utf-8", escapechar="\\", quoting=csv.QUOTE_NONE, ) def load_tsv_to_dicts(path: Union[str, Path]) -> List[dict]: with open(path, "r") as f: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE, ) rows = [dict(e) for e in reader] return rows def filter_manifest_df( df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000 ): filters = { "no speech": df["audio"] == "", f"short speech (<{min_n_frames} frames)": df["n_frames"] < min_n_frames, "empty sentence": df["tgt_text"] == "", } if is_train_split: filters[f"long speech (>{max_n_frames} frames)"] = df["n_frames"] > max_n_frames if extra_filters is not None: filters.update(extra_filters) invalid = reduce(lambda x, y: x | y, filters.values()) valid = ~invalid print( "| " + ", ".join(f"{n}: {f.sum()}" for n, f in filters.items()) + f", total {invalid.sum()} filtered, {valid.sum()} remained." ) return df[valid] def cal_gcmvn_stats(features_list): features = np.concatenate(features_list) square_sums = (features ** 2).sum(axis=0) mean = features.mean(axis=0) features = np.subtract(features, mean) var = square_sums / features.shape[0] - mean ** 2 std = np.sqrt(np.maximum(var, 1e-8)) return {"mean": mean.astype("float32"), "std": std.astype("float32")} class S2TDataConfigWriter(object): DEFAULT_VOCAB_FILENAME = "dict.txt" DEFAULT_INPUT_FEAT_PER_CHANNEL = 80 DEFAULT_INPUT_CHANNELS = 1 def __init__(self, yaml_path: Path): try: import yaml except ImportError: print("Please install PyYAML for S2T data config YAML files") self.yaml = yaml self.yaml_path = yaml_path self.config = {} def flush(self): with open(self.yaml_path, "w") as f: self.yaml.dump(self.config, f) def set_audio_root(self, audio_root=""): self.config["audio_root"] = audio_root def set_vocab_filename(self, vocab_filename: str = "dict.txt"): self.config["vocab_filename"] = vocab_filename def set_specaugment( self, time_wrap_w: int, freq_mask_n: int, freq_mask_f: int, time_mask_n: int, time_mask_t: int, time_mask_p: float, ): self.config["specaugment"] = { "time_wrap_W": time_wrap_w, "freq_mask_N": freq_mask_n, "freq_mask_F": freq_mask_f, "time_mask_N": time_mask_n, "time_mask_T": time_mask_t, "time_mask_p": time_mask_p, } def set_specaugment_lb_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=1, freq_mask_f=27, time_mask_n=1, time_mask_t=100, time_mask_p=1.0, ) def set_specaugment_ld_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=2, freq_mask_f=27, time_mask_n=2, time_mask_t=100, time_mask_p=1.0, ) def set_specaugment_sm_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=2, freq_mask_f=15, time_mask_n=2, time_mask_t=70, time_mask_p=0.2, ) def set_specaugment_ss_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=2, freq_mask_f=27, time_mask_n=2, time_mask_t=70, time_mask_p=0.2, ) def set_input_channels(self, input_channels: int = 1): self.config["input_channels"] = input_channels def set_input_feat_per_channel(self, input_feat_per_channel: int = 80): self.config["input_feat_per_channel"] = input_feat_per_channel def set_bpe_tokenizer(self, bpe_tokenizer: Dict[str, Any]): self.config["bpe_tokenizer"] = bpe_tokenizer def set_global_cmvn(self, stats_npz_path: str): self.config["global_cmvn"] = {"stats_npz_path": stats_npz_path} def set_feature_transforms(self, split: str, transforms: List[str]): if "transforms" not in self.config: self.config["transforms"] = {} self.config["transforms"][split] = transforms def set_prepend_tgt_lang_tag(self, flag: bool = True): self.config["prepend_tgt_lang_tag"] = flag def set_sampling_alpha(self, sampling_alpha: float = 1.0): self.config["sampling_alpha"] = sampling_alpha def set_extra(self, data): self.config.update(data)
KosmosX-API-main
kosmosX/fairseq/examples/speech_to_text/data_utils.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging from pathlib import Path import shutil from tempfile import NamedTemporaryFile import pandas as pd from examples.speech_to_text.data_utils import ( create_zip, extract_fbank_features, gen_config_yaml, gen_vocab, get_zip_manifest, save_df_to_tsv, ) from torchaudio.datasets import LIBRISPEECH from tqdm import tqdm log = logging.getLogger(__name__) SPLITS = [ "train-clean-100", "train-clean-360", "train-other-500", "dev-clean", "dev-other", "test-clean", "test-other", ] MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"] def process(args): out_root = Path(args.output_root).absolute() out_root.mkdir(exist_ok=True) # Extract features feature_root = out_root / "fbank80" feature_root.mkdir(exist_ok=True) for split in SPLITS: print(f"Fetching split {split}...") dataset = LIBRISPEECH(out_root.as_posix(), url=split, download=True) print("Extracting log mel filter bank features...") for wav, sample_rate, _, spk_id, chapter_no, utt_no in tqdm(dataset): sample_id = f"{spk_id}-{chapter_no}-{utt_no}" extract_fbank_features( wav, sample_rate, feature_root / f"{sample_id}.npy" ) # Pack features into ZIP zip_path = out_root / "fbank80.zip" print("ZIPing features...") create_zip(feature_root, zip_path) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest(zip_path) # Generate TSV manifest print("Generating manifest...") train_text = [] for split in SPLITS: manifest = {c: [] for c in MANIFEST_COLUMNS} dataset = LIBRISPEECH(out_root.as_posix(), url=split) for _, _, utt, spk_id, chapter_no, utt_no in tqdm(dataset): sample_id = f"{spk_id}-{chapter_no}-{utt_no}" manifest["id"].append(sample_id) manifest["audio"].append(audio_paths[sample_id]) manifest["n_frames"].append(audio_lengths[sample_id]) manifest["tgt_text"].append(utt.lower()) manifest["speaker"].append(spk_id) save_df_to_tsv( pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv" ) if split.startswith("train"): train_text.extend(manifest["tgt_text"]) # Generate vocab vocab_size = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size}" with NamedTemporaryFile(mode="w") as f: for t in train_text: f.write(t + "\n") gen_vocab( Path(f.name), out_root / spm_filename_prefix, args.vocab_type, args.vocab_size, ) # Generate config YAML gen_config_yaml( out_root, spm_filename=spm_filename_prefix + ".model", specaugment_policy="ld" ) # Clean up shutil.rmtree(feature_root) def main(): parser = argparse.ArgumentParser() parser.add_argument("--output-root", "-o", required=True, type=str) parser.add_argument( "--vocab-type", default="unigram", required=True, type=str, choices=["bpe", "unigram", "char"], ), parser.add_argument("--vocab-size", default=10000, type=int) args = parser.parse_args() process(args) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/speech_to_text/prep_librispeech_data.py
import math import os import json import numpy as np import torch import torchaudio.compliance.kaldi as kaldi import yaml from fairseq import checkpoint_utils, tasks from fairseq.file_io import PathManager try: from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS from simuleval.agents import SpeechAgent from simuleval.states import ListEntry, SpeechStates except ImportError: print("Please install simuleval 'pip install simuleval'") SHIFT_SIZE = 10 WINDOW_SIZE = 25 SAMPLE_RATE = 16000 FEATURE_DIM = 80 BOW_PREFIX = "\u2581" class OnlineFeatureExtractor: """ Extract speech feature on the fly. """ def __init__(self, args): self.shift_size = args.shift_size self.window_size = args.window_size assert self.window_size >= self.shift_size self.sample_rate = args.sample_rate self.feature_dim = args.feature_dim self.num_samples_per_shift = int(self.shift_size * self.sample_rate / 1000) self.num_samples_per_window = int(self.window_size * self.sample_rate / 1000) self.len_ms_to_samples = lambda x: x * self.sample_rate / 1000 self.previous_residual_samples = [] self.global_cmvn = args.global_cmvn def clear_cache(self): self.previous_residual_samples = [] def __call__(self, new_samples): samples = self.previous_residual_samples + new_samples if len(samples) < self.num_samples_per_window: self.previous_residual_samples = samples return # num_frames is the number of frames from the new segment num_frames = math.floor( (len(samples) - self.len_ms_to_samples(self.window_size - self.shift_size)) / self.num_samples_per_shift ) # the number of frames used for feature extraction # including some part of thte previous segment effective_num_samples = int( num_frames * self.len_ms_to_samples(self.shift_size) + self.len_ms_to_samples(self.window_size - self.shift_size) ) input_samples = samples[:effective_num_samples] self.previous_residual_samples = samples[ num_frames * self.num_samples_per_shift: ] torch.manual_seed(1) output = kaldi.fbank( torch.FloatTensor(input_samples).unsqueeze(0), num_mel_bins=self.feature_dim, frame_length=self.window_size, frame_shift=self.shift_size, ).numpy() output = self.transform(output) return torch.from_numpy(output) def transform(self, input): if self.global_cmvn is None: return input mean = self.global_cmvn["mean"] std = self.global_cmvn["std"] x = np.subtract(input, mean) x = np.divide(x, std) return x class TensorListEntry(ListEntry): """ Data structure to store a list of tensor. """ def append(self, value): if len(self.value) == 0: self.value = value return self.value = torch.cat([self.value] + [value], dim=0) def info(self): return { "type": str(self.new_value_type), "length": self.__len__(), "value": "" if type(self.value) is list else self.value.size(), } class FairseqSimulSTAgent(SpeechAgent): speech_segment_size = 40 # in ms, 4 pooling ratio * 10 ms step size def __init__(self, args): super().__init__(args) self.eos = DEFAULT_EOS self.gpu = getattr(args, "gpu", False) self.args = args self.load_model_vocab(args) if getattr( self.model.decoder.layers[0].encoder_attn, 'pre_decision_ratio', None ) is not None: self.speech_segment_size *= ( self.model.decoder.layers[0].encoder_attn.pre_decision_ratio ) args.global_cmvn = None if args.config: with open(os.path.join(args.data_bin, args.config), "r") as f: config = yaml.load(f, Loader=yaml.BaseLoader) if "global_cmvn" in config: args.global_cmvn = np.load(config["global_cmvn"]["stats_npz_path"]) if args.global_stats: with PathManager.open(args.global_stats, "r") as f: global_cmvn = json.loads(f.read()) self.global_cmvn = {"mean": global_cmvn["mean"], "std": global_cmvn["stddev"]} self.feature_extractor = OnlineFeatureExtractor(args) self.max_len = args.max_len self.force_finish = args.force_finish torch.set_grad_enabled(False) def build_states(self, args, client, sentence_id): # Initialize states here, for example add customized entry to states # This function will be called at beginning of every new sentence states = SpeechStates(args, client, sentence_id, self) self.initialize_states(states) return states def to_device(self, tensor): if self.gpu: return tensor.cuda() else: return tensor.cpu() @staticmethod def add_args(parser): # fmt: off parser.add_argument('--model-path', type=str, required=True, help='path to your pretrained model.') parser.add_argument("--data-bin", type=str, required=True, help="Path of data binary") parser.add_argument("--config", type=str, default=None, help="Path to config yaml file") parser.add_argument("--global-stats", type=str, default=None, help="Path to json file containing cmvn stats") parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece", help="Subword splitter type for target text") parser.add_argument("--tgt-splitter-path", type=str, default=None, help="Subword splitter model path for target text") parser.add_argument("--user-dir", type=str, default="examples/simultaneous_translation", help="User directory for simultaneous translation") parser.add_argument("--max-len", type=int, default=200, help="Max length of translation") parser.add_argument("--force-finish", default=False, action="store_true", help="Force the model to finish the hypothsis if the source is not finished") parser.add_argument("--shift-size", type=int, default=SHIFT_SIZE, help="Shift size of feature extraction window.") parser.add_argument("--window-size", type=int, default=WINDOW_SIZE, help="Window size of feature extraction window.") parser.add_argument("--sample-rate", type=int, default=SAMPLE_RATE, help="Sample rate") parser.add_argument("--feature-dim", type=int, default=FEATURE_DIM, help="Acoustic feature dimension.") # fmt: on return parser def load_model_vocab(self, args): filename = args.model_path if not os.path.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = checkpoint_utils.load_checkpoint_to_cpu(filename) task_args = state["cfg"]["task"] task_args.data = args.data_bin if args.config is not None: task_args.config_yaml = args.config task = tasks.setup_task(task_args) # build model for ensemble state["cfg"]["model"].load_pretrained_encoder_from = None state["cfg"]["model"].load_pretrained_decoder_from = None self.model = task.build_model(state["cfg"]["model"]) self.model.load_state_dict(state["model"], strict=True) self.model.eval() self.model.share_memory() if self.gpu: self.model.cuda() # Set dictionary self.dict = {} self.dict["tgt"] = task.target_dictionary def initialize_states(self, states): self.feature_extractor.clear_cache() states.units.source = TensorListEntry() states.units.target = ListEntry() states.incremental_states = dict() def segment_to_units(self, segment, states): # Convert speech samples to features features = self.feature_extractor(segment) if features is not None: return [features] else: return [] def units_to_segment(self, units, states): # Merge sub word to full word. if self.model.decoder.dictionary.eos() == units[0]: return DEFAULT_EOS segment = [] if None in units.value: units.value.remove(None) for index in units: if index is None: units.pop() token = self.model.decoder.dictionary.string([index]) if token.startswith(BOW_PREFIX): if len(segment) == 0: segment += [token.replace(BOW_PREFIX, "")] else: for j in range(len(segment)): units.pop() string_to_return = ["".join(segment)] if self.model.decoder.dictionary.eos() == units[0]: string_to_return += [DEFAULT_EOS] return string_to_return else: segment += [token.replace(BOW_PREFIX, "")] if ( len(units) > 0 and self.model.decoder.dictionary.eos() == units[-1] or len(states.units.target) > self.max_len ): tokens = [self.model.decoder.dictionary.string([unit]) for unit in units] return ["".join(tokens).replace(BOW_PREFIX, "")] + [DEFAULT_EOS] return None def update_model_encoder(self, states): if len(states.units.source) == 0: return src_indices = self.to_device( states.units.source.value.unsqueeze(0) ) src_lengths = self.to_device( torch.LongTensor([states.units.source.value.size(0)]) ) states.encoder_states = self.model.encoder(src_indices, src_lengths) torch.cuda.empty_cache() def update_states_read(self, states): # Happens after a read action. self.update_model_encoder(states) def policy(self, states): if not getattr(states, "encoder_states", None): return READ_ACTION tgt_indices = self.to_device( torch.LongTensor( [self.model.decoder.dictionary.eos()] + [x for x in states.units.target.value if x is not None] ).unsqueeze(0) ) states.incremental_states["steps"] = { "src": states.encoder_states["encoder_out"][0].size(0), "tgt": 1 + len(states.units.target), } states.incremental_states["online"] = {"only": torch.tensor(not states.finish_read())} x, outputs = self.model.decoder.forward( prev_output_tokens=tgt_indices, encoder_out=states.encoder_states, incremental_state=states.incremental_states, ) states.decoder_out = x states.decoder_out_extra = outputs torch.cuda.empty_cache() if outputs.action == 0: return READ_ACTION else: return WRITE_ACTION def predict(self, states): decoder_states = states.decoder_out lprobs = self.model.get_normalized_probs( [decoder_states[:, -1:]], log_probs=True ) index = lprobs.argmax(dim=-1) index = index[0, 0].item() if ( self.force_finish and index == self.model.decoder.dictionary.eos() and not states.finish_read() ): # If we want to force finish the translation # (don't stop before finish reading), return a None # self.model.decoder.clear_cache(states.incremental_states) index = None return index
KosmosX-API-main
kosmosX/fairseq/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Use: echo {text} | python tokenize_indic.py {language} import sys from indicnlp.normalize.indic_normalize import IndicNormalizerFactory from indicnlp.tokenize.indic_tokenize import trivial_tokenize factory = IndicNormalizerFactory() normalizer = factory.get_normalizer( sys.argv[1], remove_nuktas=False, nasals_mode="do_nothing" ) for line in sys.stdin: normalized_line = normalizer.normalize(line.strip()) tokenized_line = " ".join(trivial_tokenize(normalized_line, sys.argv[1])) print(tokenized_line)
KosmosX-API-main
kosmosX/fairseq/examples/m2m_100/tokenizers/tokenize_indic.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from pythainlp import word_tokenize for line in sys.stdin: print(" ".join(word_tokenize(line.strip())))
KosmosX-API-main
kosmosX/fairseq/examples/m2m_100/tokenizers/tokenize_thai.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import fileinput import sacrebleu for line in fileinput.input(): print(sacrebleu.tokenize_zh(line))
KosmosX-API-main
kosmosX/fairseq/examples/m2m_100/tokenizers/tokenize_zh.py
import argparse from collections import namedtuple import os DATADIR = "/path/to/train_data" DEDUP_FROM_DIR = "/path/to/eval/data" OUTPUT_DIR = "/path/to/output/data" def main(args): languages = set() for language_directory in os.listdir(DATADIR): if "_" in language_directory: src, tgt = language_directory.split("_") languages.add(LanguagePair(src=src, tgt=tgt)) data = existing_data() train_languages = sorted(languages) for language_pair in train_languages[args.start_index:args.start_index + args.size]: print(language_pair) dedup(language_pair, data) LanguagePair = namedtuple("LanguagePair", ["src", "tgt"]) def existing_data(): data = set() for file in os.listdir(DEDUP_FROM_DIR): with open(os.path.join(DEDUP_FROM_DIR, file)) as f: data |= set(f.readlines()) return data def dedup(language_pair, data, verbose=True, output=True): train_filenames = LanguagePair( src=f"{DATADIR}/{language_pair.src}_{language_pair.tgt}/train.{language_pair.src}", tgt=f"{DATADIR}/{language_pair.src}_{language_pair.tgt}/train.{language_pair.tgt}", ) output_filenames = LanguagePair( src=f"{OUTPUT_DIR}/train.dedup.{language_pair.src}-{language_pair.tgt}.{language_pair.src}", tgt=f"{OUTPUT_DIR}/train.dedup.{language_pair.src}-{language_pair.tgt}.{language_pair.tgt}" ) # If output exists, skip this pair. It has already been done. if (os.path.exists(output_filenames.src) and os.path.exists(output_filenames.tgt)): if verbose: print(f"{language_pair.src}-{language_pair.tgt} already done.") return if verbose: print(f"{language_pair.src}-{language_pair.tgt} ready, will check dups.") # If there is no output, no need to actually do the loop. if not output: return if os.path.exists(train_filenames.src) and os.path.exists(train_filenames.tgt): with open(train_filenames.src) as f: train_source = f.readlines() with open(train_filenames.tgt) as f: train_target = f.readlines() # do dedup new_train_source = [] new_train_target = [] for i, train_line in enumerate(train_source): if train_line not in data and train_target[i] not in data: new_train_source.append(train_line) new_train_target.append(train_target[i]) assert len(train_source) == len(train_target) assert len(new_train_source) == len(new_train_target) assert len(new_train_source) <= len(train_source) with open(output_filenames.src, "w") as o: for line in new_train_source: o.write(line) with open(output_filenames.tgt, "w") as o: for line in new_train_target: o.write(line) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-s", "--start-index", required=True, type=int) parser.add_argument("-n", "--size", required=True, type=int) main(parser.parse_args())
KosmosX-API-main
kosmosX/fairseq/examples/m2m_100/process_data/dedup_data.py
import gzip import argparse from string import punctuation def len_no_punc(s, punc): return len([ch for ch in s if ch in punc]) def filter_overpunc(len_npunc, len_sen): return len_npunc < 0.5*len_sen def main(args): punc = punctuation + "—|–" print('Processing file {}'.format(args.input)) with gzip.open(args.input, 'rt', encoding=args.encoding) as tsv: with open(args.bitext + '.' + args.src_lang, 'wt', encoding=args.encoding) as fsrc: with open(args.bitext + '.' + args.tgt_lang, 'wt', encoding=args.encoding) as ftgt: line = tsv.readline() fields = line.split('\t') src, tgt = fields[1], fields[2] nchar_npunc_src = len_no_punc(src, punc) nchar_npunc_tgt = len_no_punc(tgt, punc) if filter_overpunc(nchar_npunc_src, len(src)) and filter_overpunc(nchar_npunc_tgt, len(tgt)): fsrc.write(src.strip() + '\n') ftgt.write(tgt.strip() + '\n') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--input", required=True, type=str) parser.add_argument('--encoding', default='utf-8', help='character encoding for input/output') parser.add_argument('--bitext', type=str, required=True, help='language direction') parser.add_argument('--src-lang', type=str, required=True, help='Source language') parser.add_argument('--tgt-lang', type=str, required=True, help='Target language') main(parser.parse_args())
KosmosX-API-main
kosmosX/fairseq/examples/m2m_100/process_data/remove_too_much_punc.py
import argparse parser = argparse.ArgumentParser() parser.add_argument('--src', type=str, help='Source language') parser.add_argument('--tgt', type=str, help='Target language') parser.add_argument('--src-file', type=str, help='Input source file') parser.add_argument('--tgt-file', type=str, help='Input target file') parser.add_argument('--src-output-file', type=str, help='Output source file') parser.add_argument('--tgt-output-file', type=str, help='Output target file') parser.add_argument('--threshold', type=float, default=0.5, help='Threshold') parser.add_argument('--threshold-character', type=str, default=']', help='Threshold character') parser.add_argument('--histograms', type=str, help='Path to histograms') args = parser.parse_args() def read_hist(f): ch = [] for line in f: c = line[0] if c == args.threshold_character: break ch.append(c) return ch with(open("{}/{}".format(args.histograms, args.src), 'r', encoding='utf8')) as f: ch1 = read_hist(f) with(open("{}/{}".format(args.histograms, args.tgt), 'r', encoding='utf8')) as f: ch2 = read_hist(f) print("Accepted characters for {}: {}".format(args.src, ch1)) print("Accepted characters for {}: {}".format(args.tgt, ch2)) with open(args.src_file, 'r', encoding='utf8') as fs1, open(args.tgt_file, 'r', encoding='utf8') as fs2, open(args.src_output_file, 'w', encoding='utf8') as fos1, open(args.tgt_output_file, 'w', encoding='utf8') as fos2: ls1 = fs1.readline() ls2 = fs2.readline() while ls1 or ls2: cnt1 = len([c for c in ls1.strip() if c in ch1]) cnt2 = len([c for c in ls2.strip() if c in ch2]) if cnt1 / len(ls1) > args.threshold and cnt2 / len(ls2) > args.threshold: fos1.write(ls1) fos2.write(ls2) else: print("{} {} {} \n{} {} {}".format(args.src, cnt1 / len(ls1), ls1.strip(), args.tgt, cnt2 / len(ls2), ls2.strip())) ls1 = fs1.readline() ls2 = fs2.readline()
KosmosX-API-main
kosmosX/fairseq/examples/m2m_100/process_data/clean_histogram.py
#!/usr/bin/env python3 -u import argparse import fileinput import logging import os from fairseq.models.transformer import TransformerModel logging.getLogger().setLevel(logging.INFO) def main(): parser = argparse.ArgumentParser(description="") parser.add_argument("--en2fr", required=True, help="path to en2fr model") parser.add_argument( "--fr2en", required=True, help="path to fr2en mixture of experts model" ) parser.add_argument( "--user-dir", help="path to fairseq examples/translation_moe/src directory" ) parser.add_argument( "--num-experts", type=int, default=10, help="(keep at 10 unless using a different model)", ) parser.add_argument( "files", nargs="*", default=["-"], help='input files to paraphrase; "-" for stdin', ) args = parser.parse_args() if args.user_dir is None: args.user_dir = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), # examples/ "translation_moe", "src", ) if os.path.exists(args.user_dir): logging.info("found user_dir:" + args.user_dir) else: raise RuntimeError( "cannot find fairseq examples/translation_moe/src " "(tried looking here: {})".format(args.user_dir) ) logging.info("loading en2fr model from:" + args.en2fr) en2fr = TransformerModel.from_pretrained( model_name_or_path=args.en2fr, tokenizer="moses", bpe="sentencepiece", ).eval() logging.info("loading fr2en model from:" + args.fr2en) fr2en = TransformerModel.from_pretrained( model_name_or_path=args.fr2en, tokenizer="moses", bpe="sentencepiece", user_dir=args.user_dir, task="translation_moe", ).eval() def gen_paraphrases(en): fr = en2fr.translate(en) return [ fr2en.translate(fr, inference_step_args={"expert": i}) for i in range(args.num_experts) ] logging.info("Type the input sentence and press return:") for line in fileinput.input(args.files): line = line.strip() if len(line) == 0: continue for paraphrase in gen_paraphrases(line): print(paraphrase) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/paraphraser/paraphrase.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import sys import numpy as np aggregate_funcs = { "std": np.std, "var": np.var, "median": np.median, "mean": np.mean, "min": np.min, "max": np.max, } def main(): parser = argparse.ArgumentParser() parser.add_argument("-i", "--input_file", required=True, type=str) parser.add_argument("-n", "--repeat_times", required=True, type=int) parser.add_argument("-o", "--output_file", required=False) parser.add_argument("-f", "--func", required=False, default="mean") args = parser.parse_args() stream = open(args.output_file, "w") if args.output_file else sys.stdout segment_scores = [] for line in open(args.input_file): segment_scores.append(float(line.strip())) if len(segment_scores) == args.repeat_times: stream.write("{}\n".format(aggregate_funcs[args.func](segment_scores))) segment_scores = [] if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/unsupervised_quality_estimation/aggregate_scores.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import sys def _normalize_spaces(line): return " ".join(line.split()) def main(): parser = argparse.ArgumentParser() parser.add_argument("-i", "--input_file", required=True, type=str) parser.add_argument("-n", "--repeat_times", required=True, type=int) parser.add_argument("-o", "--output_file", required=False, type=str) args = parser.parse_args() stream = open(args.output_file, "w") if args.output_file else sys.stdout for line in open(args.input_file): for _ in range(args.repeat_times): stream.write(_normalize_spaces(line) + "\n") if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/unsupervised_quality_estimation/repeat_lines.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import math import os import subprocess import sys import tempfile from collections import defaultdict from itertools import combinations def read_translations(path, n_repeats): segment_counter = 0 segment_translations = [] translations = defaultdict(list) for line in open(path): segment_translations.append(" ".join(line.split())) if len(segment_translations) == n_repeats: translations[segment_counter] = segment_translations segment_translations = [] segment_counter += 1 return translations def generate_input(translations, n_repeats): _, ref_path = tempfile.mkstemp() _, mt_path = tempfile.mkstemp() ref_fh = open(ref_path, "w") mt_fh = open(mt_path, "w") for segid in sorted(translations.keys()): assert len(translations[segid]) == n_repeats indexes = combinations(range(n_repeats), 2) for idx1, idx2 in indexes: mt_fh.write(translations[segid][idx1].strip() + "\n") ref_fh.write(translations[segid][idx2].strip() + "\n") sys.stderr.write("\nSaved translations to %s and %s" % (ref_path, mt_path)) return ref_path, mt_path def run_meteor(ref_path, mt_path, metric_path, lang="en"): _, out_path = tempfile.mkstemp() subprocess.call( [ "java", "-Xmx2G", "-jar", metric_path, mt_path, ref_path, "-p", "0.5 0.2 0.6 0.75", # default parameters, only changed alpha to give equal weight to P and R "-norm", "-l", lang, ], stdout=open(out_path, "w"), ) os.remove(ref_path) os.remove(mt_path) sys.stderr.write("\nSaved Meteor output to %s" % out_path) return out_path def read_output(meteor_output_path, n_repeats): n_combinations = math.factorial(n_repeats) / ( math.factorial(2) * math.factorial(n_repeats - 2) ) raw_scores = [] average_scores = [] for line in open(meteor_output_path): if not line.startswith("Segment "): continue score = float(line.strip().split("\t")[1]) raw_scores.append(score) if len(raw_scores) == n_combinations: average_scores.append(sum(raw_scores) / n_combinations) raw_scores = [] os.remove(meteor_output_path) return average_scores def main(): parser = argparse.ArgumentParser() parser.add_argument("-i", "--infile") parser.add_argument("-n", "--repeat_times", type=int) parser.add_argument("-m", "--meteor") parser.add_argument("-o", "--output") args = parser.parse_args() translations = read_translations(args.infile, args.repeat_times) sys.stderr.write("\nGenerating input for Meteor...") ref_path, mt_path = generate_input(translations, args.repeat_times) sys.stderr.write("\nRunning Meteor...") out_path = run_meteor(ref_path, mt_path, args.meteor) sys.stderr.write("\nReading output...") scores = read_output(out_path, args.repeat_times) sys.stderr.write("\nWriting results...") with open(args.output, "w") as o: for scr in scores: o.write("{}\n".format(scr)) o.close() if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/unsupervised_quality_estimation/meteor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import models # noqa
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch def prob_check(tensor, eps=1e-10): assert not torch.isnan(tensor).any(), ( "Nan in a probability tensor." ) # Add the eps here to prevent errors introduced by precision assert tensor.le(1.0 + eps).all() and tensor.ge(0.0 - eps).all(), ( "Incorrect values in a probability tensor" ", 0.0 <= tensor <= 1.0" ) def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10): """ Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i] """ tensor_size = list(tensor.size()) tensor_size[dim] = 1 return_tensor = safe_cumprod( torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim), dim=dim, eps=eps, ) if dim == 0: return return_tensor[:-1] elif dim == 1: return return_tensor[:, :-1] elif dim == 2: return return_tensor[:, :, :-1] else: raise RuntimeError( "Cumprod on dimension 3 and more is not implemented" ) def safe_cumprod(tensor, dim: int, eps: float = 1e-10): """ An implementation of cumprod to prevent precision issue. cumprod(x) = [x1, x1x2, x1x2x3, ....] = [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...] = exp(cumsum(log(x))) """ if (tensor + eps < 0).any().item(): raise RuntimeError( "Safe cumprod can only take non-negative tensors as input." "Consider use torch.cumprod if you want to calculate negative values." ) log_tensor = torch.log(tensor + eps) cumsum_log_tensor = torch.cumsum(log_tensor, dim) exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor) return exp_cumsum_log_tensor def moving_sum(x, start_idx: int, end_idx: int): """ From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] """ # TODO: Make dimension configurable assert start_idx > 0 and end_idx > 0 batch_size, tgt_len, src_len = x.size() x = x.view(-1, src_len).unsqueeze(1) # batch_size, 1, src_len moving_sum_weight = torch.ones([1, 1, end_idx + start_idx - 1]).type_as(x) moving_sum = torch.nn.functional.conv1d( x, moving_sum_weight, padding=start_idx + end_idx - 1 ).squeeze(1) moving_sum = moving_sum[:, end_idx:-start_idx] assert src_len == moving_sum.size(1) assert batch_size * tgt_len == moving_sum.size(0) moving_sum = moving_sum.view(batch_size, tgt_len, src_len) return moving_sum
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/utils/functions.py
from typing import Optional, Dict from torch import Tensor import torch def waitk_p_choose( tgt_len: int, src_len: int, bsz: int, waitk_lagging: int, key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None ): max_src_len = src_len if incremental_state is not None: # Retrieve target length from incremental states # For inference the length of query is always 1 max_tgt_len = incremental_state["steps"]["tgt"] assert max_tgt_len is not None max_tgt_len = int(max_tgt_len) else: max_tgt_len = tgt_len if max_src_len < waitk_lagging: if incremental_state is not None: max_tgt_len = 1 return torch.zeros( bsz, max_tgt_len, max_src_len ) # Assuming the p_choose looks like this for wait k=3 # src_len = 6, max_tgt_len = 5 # [0, 0, 1, 0, 0, 0, 0] # [0, 0, 0, 1, 0, 0, 0] # [0, 0, 0, 0, 1, 0, 0] # [0, 0, 0, 0, 0, 1, 0] # [0, 0, 0, 0, 0, 0, 1] # linearize the p_choose matrix: # [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0...] # The indices of linearized matrix that equals 1 is # 2 + 6 * 0 # 3 + 6 * 1 # ... # n + src_len * n + k - 1 = n * (src_len + 1) + k - 1 # n from 0 to max_tgt_len - 1 # # First, generate the indices (activate_indices_offset: bsz, max_tgt_len) # Second, scatter a zeros tensor (bsz, max_tgt_len * src_len) # with activate_indices_offset # Third, resize the tensor to (bsz, max_tgt_len, src_len) activate_indices_offset = ( ( torch.arange(max_tgt_len) * (max_src_len + 1) + waitk_lagging - 1 ) .unsqueeze(0) .expand(bsz, max_tgt_len) .long() ) if key_padding_mask is not None: if key_padding_mask[:, 0].any(): # Left padding activate_indices_offset += ( key_padding_mask.sum(dim=1, keepdim=True) ) # Need to clamp the indices that are too large activate_indices_offset = ( activate_indices_offset .clamp( 0, min( [ max_tgt_len, max_src_len - waitk_lagging + 1 ] ) * max_src_len - 1 ) ) p_choose = torch.zeros(bsz, max_tgt_len * max_src_len) p_choose = p_choose.scatter( 1, activate_indices_offset, 1.0 ).view(bsz, max_tgt_len, max_src_len) if key_padding_mask is not None: p_choose = p_choose.to(key_padding_mask) p_choose = p_choose.masked_fill(key_padding_mask.unsqueeze(1), 0) if incremental_state is not None: p_choose = p_choose[:, -1:] return p_choose.float() def learnable_p_choose( energy, noise_mean: float = 0.0, noise_var: float = 0.0, training: bool = True ): """ Calculating step wise prob for reading and writing 1 to read, 0 to write energy: bsz, tgt_len, src_len """ noise = 0 if training: # add noise here to encourage discretness noise = ( torch.normal(noise_mean, noise_var, energy.size()) .type_as(energy) .to(energy.device) ) p_choose = torch.sigmoid(energy + noise) # p_choose: bsz * self.num_heads, tgt_len, src_len return p_choose
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/utils/p_choose_strategy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os # automatically import any Python files in the criterions/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): module = file[: file.find(".py")] importlib.import_module("examples.simultaneous_translation.utils." + module)
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/utils/__init__.py
from typing import Optional import torch from torch import Tensor from examples.simultaneous_translation.utils.functions import ( prob_check, moving_sum, ) def expected_alignment_from_p_choose( p_choose: Tensor, padding_mask: Optional[Tensor] = None, eps: float = 1e-6 ): """ Calculating expected alignment for from stepwise probability Reference: Online and Linear-Time Attention by Enforcing Monotonic Alignments https://arxiv.org/pdf/1704.00784.pdf q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j} a_ij = p_ij q_ij Parallel solution: ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi)) ============================================================ Expected input size p_choose: bsz, tgt_len, src_len """ prob_check(p_choose) # p_choose: bsz, tgt_len, src_len bsz, tgt_len, src_len = p_choose.size() dtype = p_choose.dtype p_choose = p_choose.float() if padding_mask is not None: p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0.0) if p_choose.is_cuda: p_choose = p_choose.contiguous() from alignment_train_cuda_binding import alignment_train_cuda as alignment_train else: from alignment_train_cpu_binding import alignment_train_cpu as alignment_train alpha = p_choose.new_zeros([bsz, tgt_len, src_len]) alignment_train(p_choose, alpha, eps) # Mix precision to prevent overflow for fp16 alpha = alpha.type(dtype) prob_check(alpha) return alpha def expected_soft_attention( alpha: Tensor, soft_energy: Tensor, padding_mask: Optional[Tensor] = None, chunk_size: Optional[int] = None, eps: float = 1e-10 ): """ Function to compute expected soft attention for monotonic infinite lookback attention from expected alignment and soft energy. Reference: Monotonic Chunkwise Attention https://arxiv.org/abs/1712.05382 Monotonic Infinite Lookback Attention for Simultaneous Machine Translation https://arxiv.org/abs/1906.05218 alpha: bsz, tgt_len, src_len soft_energy: bsz, tgt_len, src_len padding_mask: bsz, src_len left_padding: bool """ if padding_mask is not None: alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0) soft_energy = soft_energy.masked_fill( padding_mask.unsqueeze(1), -float("inf") ) prob_check(alpha) dtype = alpha.dtype alpha = alpha.float() soft_energy = soft_energy.float() soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] exp_soft_energy = torch.exp(soft_energy) + eps if chunk_size is not None: # Chunkwise beta = ( exp_soft_energy * moving_sum( alpha / (eps + moving_sum(exp_soft_energy, chunk_size, 1)), 1, chunk_size ) ) else: # Infinite lookback # Notice that infinite lookback is a special case of chunkwise # where chunksize = inf inner_items = alpha / (eps + torch.cumsum(exp_soft_energy, dim=2)) beta = ( exp_soft_energy * torch.cumsum(inner_items.flip(dims=[2]), dim=2) .flip(dims=[2]) ) if padding_mask is not None: beta = beta.masked_fill( padding_mask.unsqueeze(1).to(torch.bool), 0.0) # Mix precision to prevent overflow for fp16 beta = beta.type(dtype) beta = beta.clamp(0, 1) prob_check(beta) return beta def mass_preservation( alpha: Tensor, padding_mask: Optional[Tensor] = None, left_padding: bool = False ): """ Function to compute the mass perservation for alpha. This means that the residual weights of alpha will be assigned to the last token. Reference: Monotonic Infinite Lookback Attention for Simultaneous Machine Translation https://arxiv.org/abs/1906.05218 alpha: bsz, tgt_len, src_len padding_mask: bsz, src_len left_padding: bool """ prob_check(alpha) if padding_mask is not None: if not left_padding: assert not padding_mask[:, 0].any(), ( "Find padding on the beginning of the sequence." ) alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0) if left_padding or padding_mask is None: residuals = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0, 1) alpha[:, :, -1] = residuals else: # right padding _, tgt_len, src_len = alpha.size() residuals = 1 - alpha.sum(dim=-1, keepdim=True).clamp(0, 1) src_lens = src_len - padding_mask.sum(dim=1, keepdim=True) src_lens = src_lens.expand(-1, tgt_len).contiguous() # add back the last value residuals += alpha.gather(2, src_lens.unsqueeze(2) - 1) alpha = alpha.scatter(2, src_lens.unsqueeze(2) - 1, residuals) prob_check(alpha) return alpha
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/utils/monotonic_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, NamedTuple, Optional import torch import torch.nn as nn from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( TransformerMonotonicDecoderLayer, TransformerMonotonicEncoderLayer, ) from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import ( TransformerModel, TransformerEncoder, TransformerDecoder, base_architecture, transformer_iwslt_de_en, transformer_vaswani_wmt_en_de_big, tiny_architecture ) from torch import Tensor DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 READ_ACTION = 0 WRITE_ACTION = 1 TransformerMonotonicDecoderOut = NamedTuple( "TransformerMonotonicDecoderOut", [ ("action", int), ("p_choose", Optional[Tensor]), ("attn_list", Optional[List[Optional[Dict[str, Tensor]]]]), ("encoder_out", Optional[Dict[str, List[Tensor]]]), ("encoder_padding_mask", Optional[Tensor]), ], ) @register_model("transformer_unidirectional") class TransformerUnidirectionalModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerMonotonicEncoder(args, src_dict, embed_tokens) @register_model("transformer_monotonic") class TransformerModelSimulTrans(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerMonotonicEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerMonotonicDecoder(args, tgt_dict, embed_tokens) class TransformerMonotonicEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.dictionary = dictionary self.layers = nn.ModuleList([]) self.layers.extend( [ TransformerMonotonicEncoderLayer(args) for i in range(args.encoder_layers) ] ) class TransformerMonotonicDecoder(TransformerDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False) self.dictionary = dictionary self.layers = nn.ModuleList([]) self.layers.extend( [ TransformerMonotonicDecoderLayer(args) for _ in range(args.decoder_layers) ] ) self.policy_criterion = getattr(args, "policy_criterion", "any") self.num_updates = None def set_num_updates(self, num_updates): self.num_updates = num_updates def pre_attention( self, prev_output_tokens, encoder_out_dict: Dict[str, List[Tensor]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_out = encoder_out_dict["encoder_out"][0] if "encoder_padding_mask" in encoder_out_dict: encoder_padding_mask = ( encoder_out_dict["encoder_padding_mask"][0] if encoder_out_dict["encoder_padding_mask"] and len(encoder_out_dict["encoder_padding_mask"]) > 0 else None ) else: encoder_padding_mask = None return x, encoder_out, encoder_padding_mask def post_attention(self, x): if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x def clean_cache( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], end_id: Optional[int] = None, ): """ Clean cache in the monotonic layers. The cache is generated because of a forward pass of decoder has run but no prediction, so that the self attention key value in decoder is written in the incremental state. end_id is the last idx of the layers """ if end_id is None: end_id = len(self.layers) for index, layer in enumerate(self.layers): if index < end_id: layer.prune_incremental_state(incremental_state) def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, # unused alignment_layer: Optional[int] = None, # unused alignment_heads: Optional[int] = None, # unsed ): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # incremental_state = None assert encoder_out is not None (x, encoder_outs, encoder_padding_mask) = self.pre_attention( prev_output_tokens, encoder_out, incremental_state ) attn = None inner_states = [x] attn_list: List[Optional[Dict[str, Tensor]]] = [] p_choose = torch.tensor([1.0]) for i, layer in enumerate(self.layers): x, attn, _ = layer( x=x, encoder_out=encoder_outs, encoder_padding_mask=encoder_padding_mask, incremental_state=incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) attn_list.append(attn) if incremental_state is not None: if_online = incremental_state["online"]["only"] assert if_online is not None if if_online.to(torch.bool): # Online indicates that the encoder states are still changing assert attn is not None if self.policy_criterion == "any": # Any head decide to read than read head_read = layer.encoder_attn._get_monotonic_buffer(incremental_state)["head_read"] assert head_read is not None if head_read.any(): # We need to prune the last self_attn saved_state # if model decide not to read # otherwise there will be duplicated saved_state self.clean_cache(incremental_state, i + 1) return x, TransformerMonotonicDecoderOut( action=0, p_choose=p_choose, attn_list=None, encoder_out=None, encoder_padding_mask=None, ) x = self.post_attention(x) return x, TransformerMonotonicDecoderOut( action=1, p_choose=p_choose, attn_list=attn_list, encoder_out=encoder_out, encoder_padding_mask=encoder_padding_mask, ) @register_model_architecture("transformer_monotonic", "transformer_monotonic") def base_monotonic_architecture(args): base_architecture(args) args.encoder_unidirectional = getattr(args, "encoder_unidirectional", False) @register_model_architecture( "transformer_monotonic", "transformer_monotonic_iwslt_de_en" ) def transformer_monotonic_iwslt_de_en(args): transformer_iwslt_de_en(args) base_monotonic_architecture(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( "transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_de_big" ) def transformer_monotonic_vaswani_wmt_en_de_big(args): transformer_vaswani_wmt_en_de_big(args) @register_model_architecture( "transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_fr_big" ) def transformer_monotonic_vaswani_wmt_en_fr_big(args): transformer_monotonic_vaswani_wmt_en_fr_big(args) @register_model_architecture( "transformer_unidirectional", "transformer_unidirectional_iwslt_de_en" ) def transformer_unidirectional_iwslt_de_en(args): transformer_iwslt_de_en(args) @register_model_architecture("transformer_monotonic", "transformer_monotonic_tiny") def monotonic_tiny_architecture(args): tiny_architecture(args) base_monotonic_architecture(args)
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/models/transformer_monotonic_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): model_name = file[: file.find(".py")] importlib.import_module( "examples.simultaneous_translation.models." + model_name )
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/models/__init__.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from fairseq import checkpoint_utils from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.speech_to_text import ( ConvTransformerModel, convtransformer_espnet, ConvTransformerEncoder, ) from fairseq.models.speech_to_text.modules.augmented_memory_attention import ( augmented_memory, SequenceEncoder, AugmentedMemoryConvTransformerEncoder, ) from torch import nn, Tensor from typing import Dict, List from fairseq.models.speech_to_text.modules.emformer import NoSegAugmentedMemoryTransformerEncoderLayer @register_model("convtransformer_simul_trans") class SimulConvTransformerModel(ConvTransformerModel): """ Implementation of the paper: SimulMT to SimulST: Adapting Simultaneous Text Translation to End-to-End Simultaneous Speech Translation https://www.aclweb.org/anthology/2020.aacl-main.58.pdf """ @staticmethod def add_args(parser): super(SimulConvTransformerModel, SimulConvTransformerModel).add_args(parser) parser.add_argument( "--train-monotonic-only", action="store_true", default=False, help="Only train monotonic attention", ) @classmethod def build_decoder(cls, args, task, embed_tokens): tgt_dict = task.tgt_dict from examples.simultaneous_translation.models.transformer_monotonic_attention import ( TransformerMonotonicDecoder, ) decoder = TransformerMonotonicDecoder(args, tgt_dict, embed_tokens) if getattr(args, "load_pretrained_decoder_from", None): decoder = checkpoint_utils.load_pretrained_component_from_model( component=decoder, checkpoint=args.load_pretrained_decoder_from ) return decoder @register_model_architecture( "convtransformer_simul_trans", "convtransformer_simul_trans_espnet" ) def convtransformer_simul_trans_espnet(args): convtransformer_espnet(args) @register_model("convtransformer_augmented_memory") @augmented_memory class AugmentedMemoryConvTransformerModel(SimulConvTransformerModel): @classmethod def build_encoder(cls, args): encoder = SequenceEncoder(args, AugmentedMemoryConvTransformerEncoder(args)) if getattr(args, "load_pretrained_encoder_from", None) is not None: encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @register_model_architecture( "convtransformer_augmented_memory", "convtransformer_augmented_memory" ) def augmented_memory_convtransformer_espnet(args): convtransformer_espnet(args) # ============================================================================ # # Convtransformer # with monotonic attention decoder # with emformer encoder # ============================================================================ # class ConvTransformerEmformerEncoder(ConvTransformerEncoder): def __init__(self, args): super().__init__(args) stride = self.conv_layer_stride(args) trf_left_context = args.segment_left_context // stride trf_right_context = args.segment_right_context // stride context_config = [trf_left_context, trf_right_context] self.transformer_layers = nn.ModuleList( [ NoSegAugmentedMemoryTransformerEncoderLayer( input_dim=args.encoder_embed_dim, num_heads=args.encoder_attention_heads, ffn_dim=args.encoder_ffn_embed_dim, num_layers=args.encoder_layers, dropout_in_attn=args.dropout, dropout_on_attn=args.dropout, dropout_on_fc1=args.dropout, dropout_on_fc2=args.dropout, activation_fn=args.activation_fn, context_config=context_config, segment_size=args.segment_length, max_memory_size=args.max_memory_size, scaled_init=True, # TODO: use constant for now. tanh_on_mem=args.amtrf_tanh_on_mem, ) ] ) self.conv_transformer_encoder = ConvTransformerEncoder(args) def forward(self, src_tokens, src_lengths): encoder_out: Dict[str, List[Tensor]] = self.conv_transformer_encoder(src_tokens, src_lengths.to(src_tokens.device)) output = encoder_out["encoder_out"][0] encoder_padding_masks = encoder_out["encoder_padding_mask"] return { "encoder_out": [output], # This is because that in the original implementation # the output didn't consider the last segment as right context. "encoder_padding_mask": [encoder_padding_masks[0][:, : output.size(0)]] if len(encoder_padding_masks) > 0 else [], "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } @staticmethod def conv_layer_stride(args): # TODO: make it configurable from the args return 4 @register_model("convtransformer_emformer") class ConvtransformerEmformer(SimulConvTransformerModel): @staticmethod def add_args(parser): super(ConvtransformerEmformer, ConvtransformerEmformer).add_args(parser) parser.add_argument( "--segment-length", type=int, metavar="N", help="length of each segment (not including left context / right context)", ) parser.add_argument( "--segment-left-context", type=int, help="length of left context in a segment", ) parser.add_argument( "--segment-right-context", type=int, help="length of right context in a segment", ) parser.add_argument( "--max-memory-size", type=int, default=-1, help="Right context for the segment.", ) parser.add_argument( "--amtrf-tanh-on-mem", default=False, action="store_true", help="whether to use tanh on memory vector", ) @classmethod def build_encoder(cls, args): encoder = ConvTransformerEmformerEncoder(args) if getattr(args, "load_pretrained_encoder_from", None): encoder = checkpoint_utils.load_pretrained_component_from_model( component=encoder, checkpoint=args.load_pretrained_encoder_from ) return encoder @register_model_architecture( "convtransformer_emformer", "convtransformer_emformer", ) def convtransformer_emformer_base(args): convtransformer_espnet(args)
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/models/convtransformer_simul_trans.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from fairseq import checkpoint_utils, tasks import sentencepiece as spm import torch try: from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS from simuleval.agents import TextAgent except ImportError: print("Please install simuleval 'pip install simuleval'") BOS_PREFIX = "\u2581" class SimulTransTextAgentJA(TextAgent): """ Simultaneous Translation Text agent for Japanese """ def __init__(self, args): # Whether use gpu self.gpu = getattr(args, "gpu", False) # Max len self.max_len = args.max_len # Load Model self.load_model_vocab(args) # build word splitter self.build_word_splitter(args) self.eos = DEFAULT_EOS def initialize_states(self, states): states.incremental_states = dict() states.incremental_states["online"] = dict() def to_device(self, tensor): if self.gpu: return tensor.cuda() else: return tensor.cpu() def load_model_vocab(self, args): filename = args.model_path if not os.path.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = checkpoint_utils.load_checkpoint_to_cpu(filename) task_args = state["cfg"]["task"] task_args.data = args.data_bin task = tasks.setup_task(task_args) # build model for ensemble state["cfg"]["model"].load_pretrained_encoder_from = None state["cfg"]["model"].load_pretrained_decoder_from = None self.model = task.build_model(state["cfg"]["model"]) self.model.load_state_dict(state["model"], strict=True) self.model.eval() self.model.share_memory() if self.gpu: self.model.cuda() # Set dictionary self.dict = {} self.dict["tgt"] = task.target_dictionary self.dict["src"] = task.source_dictionary @staticmethod def add_args(parser): # fmt: off parser.add_argument('--model-path', type=str, required=True, help='path to your pretrained model.') parser.add_argument("--data-bin", type=str, required=True, help="Path of data binary") parser.add_argument("--max-len", type=int, default=100, help="Max length of translation") parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece", help="Subword splitter type for target text.") parser.add_argument("--tgt-splitter-path", type=str, default=None, help="Subword splitter model path for target text.") parser.add_argument("--src-splitter-type", type=str, default="SentencePiece", help="Subword splitter type for source text.") parser.add_argument("--src-splitter-path", type=str, default=None, help="Subword splitter model path for source text.") # fmt: on return parser def build_word_splitter(self, args): self.spm = {} for lang in ['src', 'tgt']: if getattr(args, f'{lang}_splitter_type', None): path = getattr(args, f'{lang}_splitter_path', None) if path: self.spm[lang] = spm.SentencePieceProcessor() self.spm[lang].Load(path) def segment_to_units(self, segment, states): # Split a full word (segment) into subwords (units) return self.spm['src'].EncodeAsPieces(segment) def update_model_encoder(self, states): if len(states.units.source) == 0: return src_indices = [ self.dict['src'].index(x) for x in states.units.source.value ] if states.finish_read(): # Append the eos index when the prediction is over src_indices += [self.dict["tgt"].eos_index] src_indices = self.to_device( torch.LongTensor(src_indices).unsqueeze(0) ) src_lengths = self.to_device( torch.LongTensor([src_indices.size(1)]) ) states.encoder_states = self.model.encoder(src_indices, src_lengths) torch.cuda.empty_cache() def update_states_read(self, states): # Happens after a read action. self.update_model_encoder(states) def units_to_segment(self, units, states): # Merge sub words (units) to full word (segment). # For Japanese, we can directly send # the untokenized token to server except the BOS token # with following option # --sacrebleu-tokenizer MeCab # --eval-latency-unit char # --no-space token = units.value.pop() if ( token == self.dict["tgt"].eos_word or len(states.segments.target) > self.max_len ): return DEFAULT_EOS if BOS_PREFIX == token: return None if token[0] == BOS_PREFIX: return token[1:] else: return token def policy(self, states): if not getattr(states, "encoder_states", None): # No encoder states, read a token first return READ_ACTION # encode previous predicted target tokens tgt_indices = self.to_device( torch.LongTensor( [self.model.decoder.dictionary.eos()] + [ self.dict['tgt'].index(x) for x in states.units.target.value if x is not None ] ).unsqueeze(0) ) # Current steps states.incremental_states["steps"] = { "src": states.encoder_states["encoder_out"][0].size(0), "tgt": 1 + len(states.units.target), } # Online only means the reading is not finished states.incremental_states["online"]["only"] = ( torch.BoolTensor([not states.finish_read()]) ) x, outputs = self.model.decoder.forward( prev_output_tokens=tgt_indices, encoder_out=states.encoder_states, incremental_state=states.incremental_states, ) states.decoder_out = x torch.cuda.empty_cache() if outputs.action == 0: return READ_ACTION else: return WRITE_ACTION def predict(self, states): # Predict target token from decoder states decoder_states = states.decoder_out lprobs = self.model.get_normalized_probs( [decoder_states[:, -1:]], log_probs=True ) index = lprobs.argmax(dim=-1)[0, 0].item() if index != self.dict['tgt'].eos_index: token = self.dict['tgt'].string([index]) else: token = self.dict['tgt'].eos_word return token
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/eval/agents/simul_t2t_enja.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer from . import build_monotonic_attention from typing import Dict, Optional, List from torch import Tensor import torch class TransformerMonotonicEncoderLayer(TransformerEncoderLayer): def forward(self, x, encoder_padding_mask): seq_len, _, _ = x.size() attn_mask = x.new_ones([seq_len, seq_len]).triu(1) attn_mask = attn_mask.masked_fill(attn_mask.bool(), float("-inf")) return super().forward(x, encoder_padding_mask, attn_mask) class TransformerMonotonicDecoderLayer(TransformerDecoderLayer): def __init__(self, args): super().__init__(args) assert args.simul_type is not None, "A --simul-type is needed." self.encoder_attn = build_monotonic_attention(args) def prune_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ): input_buffer = self.self_attn._get_input_buffer(incremental_state) for key in ["prev_key", "prev_value"]: input_buffer_key = input_buffer[key] assert input_buffer_key is not None if input_buffer_key.size(2) > 1: input_buffer[key] = input_buffer_key[:, :, :-1, :] else: typed_empty_dict: Dict[str, Optional[Tensor]] = {} input_buffer = typed_empty_dict break assert incremental_state is not None self.self_attn._set_input_buffer(incremental_state, input_buffer) def forward( self, x, encoder_out: Optional[Tensor] = None, encoder_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, prev_self_attn_state: Optional[List[Tensor]] = None, prev_attn_state: Optional[List[Tensor]] = None, self_attn_mask: Optional[Tensor] = None, self_attn_padding_mask: Optional[Tensor] = None, need_attn: bool = False, need_head_weights: bool = False, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor, optional): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. need_attn (bool, optional): return attention weights need_head_weights (bool, optional): return attention weights for each head (default: return average over heads). Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ if need_head_weights: need_attn = True residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if prev_self_attn_state is not None: prev_key, prev_value = prev_self_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_self_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] assert incremental_state is not None self.self_attn._set_input_buffer(incremental_state, saved_state) _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) if self.cross_self_attention and not ( incremental_state is not None and _self_attn_input_buffer is not None and "prev_key" in _self_attn_input_buffer ): if self_attn_mask is not None: assert encoder_out is not None self_attn_mask = torch.cat( (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1 ) if self_attn_padding_mask is not None: if encoder_padding_mask is None: assert encoder_out is not None encoder_padding_mask = self_attn_padding_mask.new_zeros( encoder_out.size(1), encoder_out.size(0) ) self_attn_padding_mask = torch.cat( (encoder_padding_mask, self_attn_padding_mask), dim=1 ) assert encoder_out is not None y = torch.cat((encoder_out, x), dim=0) else: y = x x, attn = self.self_attn( query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) assert self.encoder_attn is not None residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) if prev_attn_state is not None: prev_key, prev_value = prev_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] assert incremental_state is not None self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.encoder_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) assert saved_state is not None if self_attn_padding_mask is not None: self_attn_state = [ saved_state["prev_key"], saved_state["prev_value"], saved_state["prev_key_padding_mask"], ] else: self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]] return x, attn, self_attn_state return x, attn, None
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from torch import Tensor import torch.nn as nn from examples.simultaneous_translation.utils.p_choose_strategy import ( learnable_p_choose, waitk_p_choose ) from examples.simultaneous_translation.utils.monotonic_attention import ( expected_alignment_from_p_choose, expected_soft_attention, mass_preservation, ) from fairseq.modules import MultiheadAttention from . import register_monotonic_attention from typing import Dict, Optional @register_monotonic_attention("hard_aligned") class MonotonicAttention(MultiheadAttention): """ Abstract class of monotonic attentions """ k_in_proj: Dict[str, nn.Linear] q_in_proj: Dict[str, nn.Linear] def __init__(self, args): super().__init__( embed_dim=args.decoder_embed_dim, num_heads=args.decoder_attention_heads, kdim=getattr(args, "encoder_embed_dim", None), vdim=getattr(args, "encoder_embed_dim", None), dropout=args.attention_dropout, encoder_decoder_attention=True, ) self.soft_attention = False self.eps = getattr(args, "attention_eps", True) self.mass_preservation = getattr(args, "mass_preservation", True) self.noise_type = args.noise_type self.noise_mean = args.noise_mean self.noise_var = args.noise_var self.energy_bias_init = args.energy_bias_init self.energy_bias = ( nn.Parameter(self.energy_bias_init * torch.ones([1])) if args.energy_bias is True else 0 ) self.k_in_proj = {"monotonic": self.k_proj} self.q_in_proj = {"monotonic": self.q_proj} self.chunk_size = None @staticmethod def add_args(parser): # fmt: off parser.add_argument('--no-mass-preservation', action="store_false", dest="mass_preservation", help='Do not stay on the last token when decoding') parser.add_argument('--mass-preservation', action="store_true", dest="mass_preservation", help='Stay on the last token when decoding') parser.set_defaults(mass_preservation=True) parser.add_argument('--noise-var', type=float, default=1.0, help='Variance of discretness noise') parser.add_argument('--noise-mean', type=float, default=0.0, help='Mean of discretness noise') parser.add_argument('--noise-type', type=str, default="flat", help='Type of discretness noise') parser.add_argument('--energy-bias', action="store_true", default=False, help='Bias for energy') parser.add_argument('--energy-bias-init', type=float, default=-2.0, help='Initial value of the bias for energy') parser.add_argument('--attention-eps', type=float, default=1e-6, help='Epsilon when calculating expected attention') def energy_from_qk( self, query: Tensor, key: Tensor, energy_type: str, key_padding_mask: Optional[Tensor] = None, bias: int = 0 ): """ Compute energy from query and key q_func_value is a tuple looks like (q_proj_func, q_tensor) q_tensor size: bsz, tgt_len, emb_dim k_tensor size: bsz, src_len, emb_dim key_padding_mask size: bsz, src_len attn_mask: bsz, src_len """ length, bsz, _ = query.size() q = self.q_in_proj[energy_type].forward(query) q = ( q.contiguous() .view(length, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) q = q * self.scaling length, bsz, _ = key.size() k = self.k_in_proj[energy_type].forward(key) k = ( k.contiguous() .view(length, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) energy = torch.bmm(q, k.transpose(1, 2)) + bias if key_padding_mask is not None: energy = energy.masked_fill( key_padding_mask.unsqueeze(1).to(torch.bool), - float("inf") ) return energy def p_choose_from_qk(self, query, key, key_padding_mask, incremental_states=None): monotonic_energy = self.energy_from_qk( query, key, "monotonic", key_padding_mask=key_padding_mask, bias=self.energy_bias, ) p_choose = learnable_p_choose( monotonic_energy, self.noise_mean, self.noise_var, self.training ) return p_choose def p_choose(self, query, key, key_padding_mask, incremental_states=None): return self.p_choose_from_qk(self, query, key, key_padding_mask) def monotonic_attention_process_infer( self, query: Optional[Tensor], key: Optional[Tensor], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], ): """ Monotonic attention at inference time Notice that this function is designed for simuleval not sequence_generator """ assert query is not None assert key is not None if query.size(1) != 1: raise RuntimeError( "Simultaneous translation models don't support batch decoding." ) # 1. compute stepwise probability p_choose = self.p_choose( query, key, None, incremental_state ).squeeze(1) # 2. Compute the alpha src_len = key.size(0) # Maximum steps allows in this iteration max_steps = src_len - 1 if self.mass_preservation else src_len monotonic_cache = self._get_monotonic_buffer(incremental_state) # Step for each head monotonic_step = monotonic_cache.get( 'head_step', p_choose.new_zeros(1, self.num_heads).long() ) assert monotonic_step is not None finish_read = monotonic_step.eq(max_steps) p_choose_i = torch.tensor(1) while finish_read.sum().item() < self.num_heads: # p_choose: self.num_heads, src_len # only choose the p at monotonic steps # p_choose_i: 1, self.num_heads p_choose_i = ( p_choose.gather( 1, monotonic_step .clamp(0, src_len - 1), ) ) read_one_step = ( (p_choose_i < 0.5) .type_as(monotonic_step) .masked_fill(finish_read, 0) ) # 1 x bsz # sample actions on unfinished seq # 0 means stay, finish reading # 1 means leave, continue reading monotonic_step += read_one_step finish_read = monotonic_step.eq(max_steps) | (read_one_step == 0) # p_choose at last steps p_choose_i = ( p_choose.gather( 1, monotonic_step .clamp(0, src_len - 1), ) ) monotonic_cache["head_step"] = monotonic_step # Whether a head is looking for new input monotonic_cache["head_read"] = ( monotonic_step.eq(max_steps) & (p_choose_i < 0.5) ) self._set_monotonic_buffer(incremental_state, monotonic_cache) # 2. Update alpha alpha = ( p_choose .new_zeros([self.num_heads, src_len]) .scatter( 1, (monotonic_step) .view(self.num_heads, 1).clamp(0, src_len - 1), 1 ) ) if not self.mass_preservation: alpha = alpha.masked_fill( (monotonic_step == max_steps) .view(self.num_heads, 1), 0 ) # 4. Compute Beta if self.soft_attention: monotonic_step = monotonic_step.t() beta_mask = torch.arange(src_len).expand_as(alpha).gt(monotonic_step).unsqueeze(1) # If it's soft attention just do softmax on current context soft_energy = self.energy_from_qk( query, key, "soft" ) beta = torch.nn.functional.softmax( soft_energy.masked_fill(beta_mask, -float("inf")), dim=-1 ) # It could happen that a head doesn't move at all beta = beta.masked_fill(monotonic_step.eq(0).unsqueeze(1), 0) else: # If it's hard attention just select the last state beta = alpha return p_choose, alpha, beta def monotonic_attention_process_train( self, query: Optional[Tensor], key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, ): """ Calculating monotonic attention process for training Including: stepwise probability: p_choose expected hard alignment: alpha expected soft attention: beta """ assert query is not None assert key is not None # 1. compute stepwise probability p_choose = self.p_choose_from_qk(query, key, key_padding_mask) # 2. compute expected_alignment alpha = expected_alignment_from_p_choose( p_choose, key_padding_mask, eps=self.eps, ) if self.mass_preservation: alpha = mass_preservation( alpha, key_padding_mask ) # 3. compute expected soft attention (soft aligned model only) if self.soft_attention: soft_energy = self.energy_from_qk( query, key, "soft", key_padding_mask=None, ) beta = expected_soft_attention( alpha, soft_energy, padding_mask=key_padding_mask, chunk_size=self.chunk_size, eps=self.eps, ) else: beta = alpha soft_energy = alpha return p_choose, alpha, beta, soft_energy def forward( self, query: Optional[Tensor], key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, attn_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, need_head_weights: bool = False, ): """ query: tgt_len, bsz, embed_dim key: src_len, bsz, embed_dim value: src_len, bsz, embed_dim """ assert attn_mask is None assert query is not None assert key is not None assert value is not None tgt_len, bsz, embed_dim = query.size() src_len = value.size(0) if key_padding_mask is not None: assert not key_padding_mask[:, 0].any(), ( "Only right padding is supported." ) key_padding_mask = ( key_padding_mask .unsqueeze(1) .expand([bsz, self.num_heads, src_len]) .contiguous() .view(-1, src_len) ) if incremental_state is not None: # Inference ( p_choose, alpha, beta ) = self.monotonic_attention_process_infer( query, key, incremental_state ) soft_energy = beta else: # Train ( p_choose, alpha, beta, soft_energy ) = self.monotonic_attention_process_train( query, key, key_padding_mask ) v = self.v_proj(value) length, bsz, _ = v.size() v = ( v.contiguous() .view(length, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) attn = torch.bmm(beta.type_as(v), v) attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) p_choose = p_choose.view(bsz, self.num_heads, tgt_len, src_len) alpha = alpha.view(bsz, self.num_heads, tgt_len, src_len) beta = beta.view(bsz, self.num_heads, tgt_len, src_len) return attn, { "p_choose": p_choose, "alpha": alpha, "beta": beta, } def _get_monotonic_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]): maybe_incremental_state = self.get_incremental_state( incremental_state, 'monotonic', ) if maybe_incremental_state is None: typed_empty_dict: Dict[str, Optional[Tensor]] = {} return typed_empty_dict else: return maybe_incremental_state def _set_monotonic_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], buffer: Dict[str, Optional[Tensor]]): self.set_incremental_state( incremental_state, 'monotonic', buffer, ) @register_monotonic_attention("infinite_lookback") class MonotonicInfiniteLookbackAttention( MonotonicAttention ): def __init__(self, args): super().__init__(args) self.soft_attention = True self.init_soft_attention() def init_soft_attention(self): self.k_proj_soft = nn.Linear(self.kdim, self.embed_dim, bias=True) self.q_proj_soft = nn.Linear(self.embed_dim, self.embed_dim, bias=True) self.k_in_proj["soft"] = self.k_proj_soft self.q_in_proj["soft"] = self.q_proj_soft if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_( self.k_in_proj["soft"].weight, gain=1 / math.sqrt(2) ) nn.init.xavier_uniform_( self.q_in_proj["soft"].weight, gain=1 / math.sqrt(2) ) else: nn.init.xavier_uniform_(self.k_in_proj["soft"].weight) nn.init.xavier_uniform_(self.q_in_proj["soft"].weight) @register_monotonic_attention("waitk") class WaitKAttention( MonotonicInfiniteLookbackAttention ): """ STACL: Simultaneous Translation with Implicit Anticipation and Controllable Latency using Prefix-to-Prefix Framework https://www.aclweb.org/anthology/P19-1289/ """ def __init__(self, args): super().__init__(args) self.q_in_proj["soft"] = self.q_in_proj["monotonic"] self.k_in_proj["soft"] = self.k_in_proj["monotonic"] self.waitk_lagging = args.waitk_lagging assert self.waitk_lagging > 0, ( f"Lagging has to been larger than 0, get {self.waitk_lagging}." ) @staticmethod def add_args(parser): super( MonotonicInfiniteLookbackAttention, MonotonicInfiniteLookbackAttention ).add_args(parser) parser.add_argument( "--waitk-lagging", type=int, required=True, help="Wait K lagging" ) def p_choose_from_qk( self, query: Optional[Tensor], key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): assert query is not None assert key is not None p_choose = waitk_p_choose( tgt_len=query.size(0), src_len=key.size(0), bsz=query.size(1) * self.num_heads, waitk_lagging=self.waitk_lagging, key_padding_mask=key_padding_mask, incremental_state=incremental_state, ) return p_choose.to(query) @register_monotonic_attention("chunkwise") class ChunkwiseAttention( MonotonicInfiniteLookbackAttention ): def __init__(self, args): super().__init__(args) self.chunk_size = args.mocha_chunk_size assert self.chunk_size > 1 @staticmethod def add_args(parser): super( MonotonicInfiniteLookbackAttention ).add_args(parser) parser.add_argument( "--mocha-chunk-size", type=int, required=True, help="Mocha chunk size" )
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/modules/monotonic_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import importlib from fairseq import registry ( build_monotonic_attention, register_monotonic_attention, MONOTONIC_ATTENTION_REGISTRY, _, ) = registry.setup_registry("--simul-type") for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): model_name = file[: file.find(".py")] importlib.import_module( "examples.simultaneous_translation.modules." + model_name )
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/modules/__init__.py
from functools import partial import torch from torch import Tensor import math import torch.nn.functional as F from . import register_monotonic_attention from .monotonic_multihead_attention import ( MonotonicAttention, MonotonicInfiniteLookbackAttention, WaitKAttention ) from typing import Dict, Optional def fixed_pooling_monotonic_attention(monotonic_attention): def create_model(monotonic_attention, klass): class FixedStrideMonotonicAttention(monotonic_attention): def __init__(self, args): self.waitk_lagging = 0 self.num_heads = 0 self.noise_mean = 0.0 self.noise_var = 0.0 super().__init__(args) self.pre_decision_type = args.fixed_pre_decision_type self.pre_decision_ratio = args.fixed_pre_decision_ratio self.pre_decision_pad_threshold = args.fixed_pre_decision_pad_threshold assert self.pre_decision_ratio > 1 if args.fixed_pre_decision_type == "average": self.pooling_layer = torch.nn.AvgPool1d( kernel_size=self.pre_decision_ratio, stride=self.pre_decision_ratio, ceil_mode=True, ) elif args.fixed_pre_decision_type == "last": def last(key): if key.size(2) < self.pre_decision_ratio: return key else: k = key[ :, :, self.pre_decision_ratio - 1:: self.pre_decision_ratio, ].contiguous() if key.size(-1) % self.pre_decision_ratio != 0: k = torch.cat([k, key[:, :, -1:]], dim=-1).contiguous() return k self.pooling_layer = last else: raise NotImplementedError @staticmethod def add_args(parser): super( FixedStrideMonotonicAttention, FixedStrideMonotonicAttention ).add_args(parser) parser.add_argument( "--fixed-pre-decision-ratio", type=int, required=True, help=( "Ratio for the fixed pre-decision," "indicating how many encoder steps will start" "simultaneous decision making process." ), ) parser.add_argument( "--fixed-pre-decision-type", default="average", choices=["average", "last"], help="Pooling type", ) parser.add_argument( "--fixed-pre-decision-pad-threshold", type=float, default=0.3, help="If a part of the sequence has pad" ",the threshold the pooled part is a pad.", ) def insert_zeros(self, x): bsz_num_heads, tgt_len, src_len = x.size() stride = self.pre_decision_ratio weight = F.pad(torch.ones(1, 1, 1).to(x), (stride - 1, 0)) x_upsample = F.conv_transpose1d( x.view(-1, src_len).unsqueeze(1), weight, stride=stride, padding=0, ) return x_upsample.squeeze(1).view(bsz_num_heads, tgt_len, -1) def p_choose( self, query: Optional[Tensor], key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): assert key is not None assert query is not None src_len = key.size(0) tgt_len = query.size(0) batch_size = query.size(1) key_pool = self.pooling_layer(key.transpose(0, 2)).transpose(0, 2) if key_padding_mask is not None: key_padding_mask_pool = ( self.pooling_layer(key_padding_mask.unsqueeze(0).float()) .squeeze(0) .gt(self.pre_decision_pad_threshold) ) # Make sure at least one element is not pad key_padding_mask_pool[:, 0] = 0 else: key_padding_mask_pool = None if incremental_state is not None: # The floor instead of ceil is used for inference # But make sure the length key_pool at least 1 if ( max(1, math.floor(key.size(0) / self.pre_decision_ratio)) ) < key_pool.size(0): key_pool = key_pool[:-1] if key_padding_mask_pool is not None: key_padding_mask_pool = key_padding_mask_pool[:-1] p_choose_pooled = self.p_choose_from_qk( query, key_pool, key_padding_mask_pool, incremental_state=incremental_state, ) # Upsample, interpolate zeros p_choose = self.insert_zeros(p_choose_pooled) if p_choose.size(-1) < src_len: # Append zeros if the upsampled p_choose is shorter than src_len p_choose = torch.cat( [ p_choose, torch.zeros( p_choose.size(0), tgt_len, src_len - p_choose.size(-1) ).to(p_choose) ], dim=2 ) else: # can be larger than src_len because we used ceil before p_choose = p_choose[:, :, :src_len] p_choose[:, :, -1] = p_choose_pooled[:, :, -1] assert list(p_choose.size()) == [ batch_size * self.num_heads, tgt_len, src_len, ] return p_choose FixedStrideMonotonicAttention.__name__ = klass.__name__ return FixedStrideMonotonicAttention return partial(create_model, monotonic_attention) @register_monotonic_attention("waitk_fixed_pre_decision") @fixed_pooling_monotonic_attention(WaitKAttention) class WaitKAttentionFixedStride: pass @register_monotonic_attention("hard_aligned_fixed_pre_decision") @fixed_pooling_monotonic_attention(MonotonicAttention) class MonotonicAttentionFixedStride: pass @register_monotonic_attention("infinite_lookback_fixed_pre_decision") @fixed_pooling_monotonic_attention(MonotonicInfiniteLookbackAttention) class MonotonicInfiniteLookbackAttentionFixedStride: pass
KosmosX-API-main
kosmosX/fairseq/examples/simultaneous_translation/modules/fixed_pre_decision.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .models import linformer_roberta # noqa
KosmosX-API-main
kosmosX/fairseq/examples/linformer/linformer_src/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Linformer: Self-Attention with Linear Complexity """ import logging import torch from fairseq import utils from fairseq.models import register_model, register_model_architecture from fairseq.models.roberta import ( init_bert_params, roberta_base_architecture, roberta_large_architecture, RobertaEncoder, RobertaModel, ) from fairseq.utils import safe_hasattr from ..modules.linformer_sentence_encoder import LinformerTransformerEncoder logger = logging.getLogger(__name__) @register_model("linformer_roberta") class LinformerModel(RobertaModel): @staticmethod def add_args(parser): RobertaModel.add_args(parser) # add args for Linformer parser.add_argument( "--compressed", type=int, help="compressed ratio of sequence length" ) parser.add_argument( "--shared-kv-compressed", type=int, help="share compressed matrix between k and v, in each layer", ) parser.add_argument( "--shared-layer-kv-compressed", type=int, help="share compressed matrix between k and v and across all layers", ) parser.add_argument( "--freeze-compress", type=int, help="freeze the parameters in compressed layer", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_architecture(args) if not safe_hasattr(args, "max_positions"): args.max_positions = args.tokens_per_sample encoder = LinformerEncoder(args, task.source_dictionary) return cls(args, encoder) class LinformerEncoder(RobertaEncoder): """Linformer encoder.""" def __init__(self, args, dictionary): super().__init__(args, dictionary) self.register_buffer("version", torch.tensor(2)) def build_encoder(self, args, dictionary, embed_tokens): encoder = LinformerTransformerEncoder(args, dictionary, embed_tokens) encoder.apply(init_bert_params) return encoder def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" # some old checkpoints had weight sharing implemented incorrectly # (note: this was correct in the original paper code) if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2: state_dict[f"{prefix}version"] = torch.tensor(1) # check if input embeddings and output embeddings were tied if not torch.allclose( state_dict[f"{prefix}sentence_encoder.embed_tokens.weight"], state_dict[f"{prefix}lm_head.weight"], ): # they weren't tied, re-init the LM head without weight sharing self.lm_head = self.build_lm_head( embed_dim=self.args.encoder_embed_dim, output_dim=len(self.dictionary), activation_fn=self.args.activation_fn, weight=None, # don't share weights ) @register_model_architecture("linformer_roberta", "linformer_roberta") def base_architecture(args): args.compressed = getattr(args, "compressed", 4) args.shared_kv_compressed = getattr(args, "shared_kv_compressed", 0) args.shared_layer_kv_compressed = getattr(args, "shared_layer_kv_compressed", 0) args.freeze_compress = getattr(args, "freeze_compress", 0) roberta_base_architecture(args) @register_model_architecture("linformer_roberta", "linformer_roberta_base") def linformer_roberta_base_architecture(args): base_architecture(args) @register_model_architecture("linformer_roberta", "linformer_roberta_large") def linformer_roberta_large_architecture(args): roberta_large_architecture(args) base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/examples/linformer/linformer_src/models/linformer_roberta.py
KosmosX-API-main
kosmosX/fairseq/examples/linformer/linformer_src/models/__init__.py
KosmosX-API-main
kosmosX/fairseq/examples/linformer/linformer_src/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, Optional, Tuple import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.quant_noise import quant_noise from torch import Tensor, nn from torch.nn import Parameter @with_incremental_state class MultiheadLinearAttention(nn.Module): """Multi-headed linformer attention. Projects the key and values down to the compressed dimension, before computing self-attention. See "Linformer: Self-Attention with Linear Complexity" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, compressed=1, max_seq_len=256, shared_kv_compressed=0, shared_compress_layer=None, freeze_compress=0, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) # used for compress sequence to subsequence if shared_compress_layer is None: self.compress_seq_len = max_seq_len // compressed self.compress_k = nn.Linear(max_seq_len, self.compress_seq_len, bias=False) if shared_kv_compressed == 0: self.compress_v = nn.Linear( max_seq_len, self.compress_seq_len, bias=False ) self.layerwise_sharing = False else: self.compress_k = shared_compress_layer if shared_kv_compressed == 0: self.compress_v = shared_compress_layer self.layerwise_sharing = True self.shared_kv_compressed = shared_kv_compressed self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() if freeze_compress == 1: self.compress_k.weight.requires_grad = False if shared_kv_compressed == 0: self.compress_v.weight.requires_grad = False self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) if ( not self.layerwise_sharing ): # otherwise, we already initialize the parameters nn.init.xavier_uniform_(self.compress_k.weight, gain=1 / math.sqrt(2)) if self.shared_kv_compressed == 0: nn.init.xavier_uniform_( self.compress_v.weight, gain=1 / math.sqrt(2) ) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) if ( not self.layerwise_sharing ): # otherwise, we already initialize the parameters nn.init.xavier_uniform_(self.compress_k.weight) if self.shared_kv_compressed == 0: nn.init.xavier_uniform_(self.compress_v.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k_input = query.permute(1, 2, 0).contiguous() # B * C * T k_input = ( F.linear(k_input, self.compress_k.weight[:, 0:tgt_len]) .permute(2, 0, 1) .contiguous() ) k = self.k_proj(k_input) v_input = query.permute(1, 2, 0).contiguous() # B * C * T if self.shared_kv_compressed == 0: v_input = ( F.linear(v_input, self.compress_v.weight[:, 0:tgt_len]) .permute(2, 0, 1) .contiguous() ) if self.shared_kv_compressed == 1: # use shared kv compressed linear layer v_input = ( F.linear(v_input, self.compress_k.weight[:, 0:tgt_len]) .permute(2, 0, 1) .contiguous() ) v = self.v_proj(v_input) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadLinearAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None src_len = k.size(1) if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = MultiheadLinearAttention.apply_sparse_mask( attn_weights, tgt_len, src_len, bsz ) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if before_softmax: return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = F.dropout( attn_weights, p=self.dropout, training=self.training, ) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) elif key_padding_mask is not None: filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size( 0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value
KosmosX-API-main
kosmosX/fairseq/examples/linformer/linformer_src/modules/multihead_linear_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch.nn as nn from fairseq.models.transformer import TransformerEncoder from .linformer_sentence_encoder_layer import LinformerTransformerEncoderLayer class LinformerTransformerEncoder(TransformerEncoder): """ Implementation for a Bi-directional Linformer based Sentence Encoder used in BERT/XLM style pre-trained models. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). After applying the specified number of LinformerEncoderLayers, it outputs all the internal states of the encoder as well as the final representation associated with the first token (usually CLS token). Input: - tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Output: - a tuple of the following: - a list of internal model states used to compute the predictions where each tensor has shape T x B x C - sentence representation associated with first input token in format B x C. """ def __init__(self, args, dictionary, embed_tokens): self.compress_layer = None super().__init__(args, dictionary, embed_tokens) def build_encoder_layer(self, args): if self.args.shared_layer_kv_compressed == 1 and self.compress_layer is None: compress_layer = nn.Linear( self.args.max_positions, self.args.max_positions // self.args.compressed, ) # intialize parameters for compressed layer nn.init.xavier_uniform_(compress_layer.weight, gain=1 / math.sqrt(2)) if self.args.freeze_compress == 1: compress_layer.weight.requires_grad = False self.compress_layer = compress_layer return LinformerTransformerEncoderLayer(args, self.compress_layer)
KosmosX-API-main
kosmosX/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.modules import TransformerEncoderLayer from .multihead_linear_attention import MultiheadLinearAttention class LinformerTransformerEncoderLayer(TransformerEncoderLayer): """ Implements a Linformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__(self, args, shared_compress_layer): # wrap in a list so it's not automatically registered by PyTorch self.shared_compress_layer = [shared_compress_layer] super().__init__(args) self.register_buffer("version", torch.tensor(2)) def build_self_attention(self, embed_dim, args): return MultiheadLinearAttention( embed_dim, args.encoder_attention_heads, dropout=args.dropout, self_attention=True, q_noise=args.quant_noise_pq, qn_block_size=args.quant_noise_pq_block_size, compressed=args.compressed, max_seq_len=args.max_positions, shared_kv_compressed=args.shared_kv_compressed, shared_compress_layer=self.shared_compress_layer[0], freeze_compress=args.freeze_compress, ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" # some old checkpoints had weight sharing implemented incorrectly # (note: this was correct in the original paper code) if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2: state_dict[f"{prefix}version"] = torch.tensor(1) # check compression layer sharing if f"{prefix}shared_compress_layer.weight" in state_dict: # reinitialize block without sharing compression layer to match # old behavior self.shared_compress_layer = [ torch.nn.Linear( self.shared_compress_layer[0].weight.size(1), self.shared_compress_layer[0].weight.size(0), ) ] self.self_attn = self.build_self_attention(self.embed_dim, self.args) # delete shared_compress_layer, since it's already copied to # self_attn.compress_k.weight del state_dict[f"{prefix}shared_compress_layer.weight"] if f"{prefix}shared_compress_layer.bias" in state_dict: del state_dict[f"{prefix}shared_compress_layer.bias"]
KosmosX-API-main
kosmosX/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch src_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2.pt" ref_ckpt = "/checkpoint/wnhsu/w2v/hubert_icassp_oss_v3/iter2_km100-400k-grp-L6/oss.km500_p0_1_s334.pmw1_0.puw0_0.grpnorm.ml10.mp0_8.untie.mxsz250000.ufreq1.maxtok1400000.MU100k.s1337.ngpu32/checkpoint_last.pt" new_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2_updated.pt" def update_state(state): state["model"]["label_embs_concat"] = state["model"].pop("label_embs") state["args"].task = "hubert_pretraining" state["args"].labels = f"['{state['args'].labels}']" return state src_state = torch.load(src_ckpt) src_state = update_state(src_state) torch.save(src_state, new_ckpt)
KosmosX-API-main
kosmosX/fairseq/examples/hubert/update_ckpt.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import os.path as op import re from tabulate import tabulate from collections import Counter def comp_purity(p_xy, axis): max_p = p_xy.max(axis=axis) marg_p = p_xy.sum(axis=axis) indv_pur = max_p / marg_p aggr_pur = max_p.sum() return indv_pur, aggr_pur def comp_entropy(p): return (-p * np.log(p + 1e-8)).sum() def comp_norm_mutual_info(p_xy): p_x = p_xy.sum(axis=1, keepdims=True) p_y = p_xy.sum(axis=0, keepdims=True) pmi = np.log(p_xy / np.matmul(p_x, p_y) + 1e-8) mi = (p_xy * pmi).sum() h_x = comp_entropy(p_x) h_y = comp_entropy(p_y) return mi, mi / h_x, mi / h_y, h_x, h_y def pad(labs, n): if n == 0: return np.array(labs) return np.concatenate([[labs[0]] * n, labs, [labs[-1]] * n]) def comp_avg_seg_dur(labs_list): n_frms = 0 n_segs = 0 for labs in labs_list: labs = np.array(labs) edges = np.zeros(len(labs)).astype(bool) edges[0] = True edges[1:] = labs[1:] != labs[:-1] n_frms += len(edges) n_segs += edges.astype(int).sum() return n_frms / n_segs def comp_joint_prob(uid2refs, uid2hyps): """ Args: pad: padding for spliced-feature derived labels """ cnts = Counter() skipped = [] abs_frmdiff = 0 for uid in uid2refs: if uid not in uid2hyps: skipped.append(uid) continue refs = uid2refs[uid] hyps = uid2hyps[uid] abs_frmdiff += abs(len(refs) - len(hyps)) min_len = min(len(refs), len(hyps)) refs = refs[:min_len] hyps = hyps[:min_len] cnts.update(zip(refs, hyps)) tot = sum(cnts.values()) ref_set = sorted({ref for ref, _ in cnts.keys()}) hyp_set = sorted({hyp for _, hyp in cnts.keys()}) ref2pid = dict(zip(ref_set, range(len(ref_set)))) hyp2lid = dict(zip(hyp_set, range(len(hyp_set)))) # print(hyp_set) p_xy = np.zeros((len(ref2pid), len(hyp2lid)), dtype=float) for (ref, hyp), cnt in cnts.items(): p_xy[ref2pid[ref], hyp2lid[hyp]] = cnt p_xy /= p_xy.sum() return p_xy, ref2pid, hyp2lid, tot, abs_frmdiff, skipped def read_phn(tsv_path, rm_stress=True): uid2phns = {} with open(tsv_path) as f: for line in f: uid, phns = line.rstrip().split("\t") phns = phns.split(",") if rm_stress: phns = [re.sub("[0-9]", "", phn) for phn in phns] uid2phns[uid] = phns return uid2phns def read_lab(tsv_path, lab_path, pad_len=0, upsample=1): """ tsv is needed to retrieve the uids for the labels """ with open(tsv_path) as f: f.readline() uids = [op.splitext(op.basename(line.rstrip().split()[0]))[0] for line in f] with open(lab_path) as f: labs_list = [pad(line.rstrip().split(), pad_len).repeat(upsample) for line in f] assert len(uids) == len(labs_list) return dict(zip(uids, labs_list)) def main_lab_lab( tsv_dir, lab_dir, lab_name, lab_sets, ref_dir, ref_name, pad_len=0, upsample=1, verbose=False, ): # assume tsv_dir is the same for both the reference and the hypotheses tsv_dir = lab_dir if tsv_dir is None else tsv_dir uid2refs = {} for s in lab_sets: uid2refs.update(read_lab(f"{tsv_dir}/{s}.tsv", f"{ref_dir}/{s}.{ref_name}")) uid2hyps = {} for s in lab_sets: uid2hyps.update( read_lab( f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample ) ) _main(uid2refs, uid2hyps, verbose) def main_phn_lab( tsv_dir, lab_dir, lab_name, lab_sets, phn_dir, phn_sets, pad_len=0, upsample=1, verbose=False, ): uid2refs = {} for s in phn_sets: uid2refs.update(read_phn(f"{phn_dir}/{s}.tsv")) uid2hyps = {} tsv_dir = lab_dir if tsv_dir is None else tsv_dir for s in lab_sets: uid2hyps.update( read_lab( f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample ) ) _main(uid2refs, uid2hyps, verbose) def _main(uid2refs, uid2hyps, verbose): (p_xy, ref2pid, hyp2lid, tot, frmdiff, skipped) = comp_joint_prob( uid2refs, uid2hyps ) ref_pur_by_hyp, ref_pur = comp_purity(p_xy, axis=0) hyp_pur_by_ref, hyp_pur = comp_purity(p_xy, axis=1) (mi, mi_norm_by_ref, mi_norm_by_hyp, h_ref, h_hyp) = comp_norm_mutual_info(p_xy) outputs = { "ref pur": ref_pur, "hyp pur": hyp_pur, "H(ref)": h_ref, "H(hyp)": h_hyp, "MI": mi, "MI/H(ref)": mi_norm_by_ref, "ref segL": comp_avg_seg_dur(uid2refs.values()), "hyp segL": comp_avg_seg_dur(uid2hyps.values()), "p_xy shape": p_xy.shape, "frm tot": tot, "frm diff": frmdiff, "utt tot": len(uid2refs), "utt miss": len(skipped), } print(tabulate([outputs.values()], outputs.keys(), floatfmt=".4f")) if __name__ == "__main__": """ compute quality of labels with respect to phone or another labels if set """ import argparse parser = argparse.ArgumentParser() parser.add_argument("tsv_dir") parser.add_argument("lab_dir") parser.add_argument("lab_name") parser.add_argument("--lab_sets", default=["valid"], type=str, nargs="+") parser.add_argument( "--phn_dir", default="/checkpoint/wnhsu/data/librispeech/960h/fa/raw_phn/phone_frame_align_v1", ) parser.add_argument( "--phn_sets", default=["dev-clean", "dev-other"], type=str, nargs="+" ) parser.add_argument("--pad_len", default=0, type=int, help="padding for hypotheses") parser.add_argument( "--upsample", default=1, type=int, help="upsample factor for hypotheses" ) parser.add_argument("--ref_lab_dir", default="") parser.add_argument("--ref_lab_name", default="") parser.add_argument("--verbose", action="store_true") args = parser.parse_args() if args.ref_lab_dir and args.ref_lab_name: main_lab_lab( args.tsv_dir, args.lab_dir, args.lab_name, args.lab_sets, args.ref_lab_dir, args.ref_lab_name, args.pad_len, args.upsample, args.verbose, ) else: main_phn_lab( args.tsv_dir, args.lab_dir, args.lab_name, args.lab_sets, args.phn_dir, args.phn_sets, args.pad_len, args.upsample, args.verbose, )
KosmosX-API-main
kosmosX/fairseq/examples/hubert/measure_teacher_quality.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import numpy as np import joblib import torch import tqdm logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("dump_km_label") class ApplyKmeans(object): def __init__(self, km_path): self.km_model = joblib.load(km_path) self.C_np = self.km_model.cluster_centers_.transpose() self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True) self.C = torch.from_numpy(self.C_np) self.Cnorm = torch.from_numpy(self.Cnorm_np) if torch.cuda.is_available(): self.C = self.C.cuda() self.Cnorm = self.Cnorm.cuda() def __call__(self, x): if isinstance(x, torch.Tensor): dist = ( x.pow(2).sum(1, keepdim=True) - 2 * torch.matmul(x, self.C) + self.Cnorm ) return dist.argmin(dim=1).cpu().numpy() else: dist = ( (x ** 2).sum(1, keepdims=True) - 2 * np.matmul(x, self.C_np) + self.Cnorm_np ) return np.argmin(dist, axis=1) def get_feat_iterator(feat_dir, split, nshard, rank): feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy" leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len" with open(leng_path, "r") as f: lengs = [int(line.rstrip()) for line in f] offsets = [0] + np.cumsum(lengs[:-1]).tolist() def iterate(): feat = np.load(feat_path, mmap_mode="r") assert feat.shape[0] == (offsets[-1] + lengs[-1]) for offset, leng in zip(offsets, lengs): yield feat[offset: offset + leng] return iterate, len(lengs) def dump_label(feat_dir, split, km_path, nshard, rank, lab_dir): apply_kmeans = ApplyKmeans(km_path) generator, num = get_feat_iterator(feat_dir, split, nshard, rank) iterator = generator() lab_path = f"{lab_dir}/{split}_{rank}_{nshard}.km" os.makedirs(lab_dir, exist_ok=True) with open(lab_path, "w") as f: for feat in tqdm.tqdm(iterator, total=num): # feat = torch.from_numpy(feat).cuda() lab = apply_kmeans(feat).tolist() f.write(" ".join(map(str, lab)) + "\n") logger.info("finished successfully") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("feat_dir") parser.add_argument("split") parser.add_argument("km_path") parser.add_argument("nshard", type=int) parser.add_argument("rank", type=int) parser.add_argument("lab_dir") args = parser.parse_args() logging.info(str(args)) dump_label(**vars(args))
KosmosX-API-main
kosmosX/fairseq/examples/hubert/simple_kmeans/dump_km_label.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import fairseq import soundfile as sf import torch import torch.nn.functional as F from feature_utils import get_path_iterator, dump_feature logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("dump_hubert_feature") class HubertFeatureReader(object): def __init__(self, ckpt_path, layer, max_chunk=1600000): ( model, cfg, task, ) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path]) self.model = model[0].eval().cuda() self.task = task self.layer = layer self.max_chunk = max_chunk logger.info(f"TASK CONFIG:\n{self.task.cfg}") logger.info(f" max_chunk = {self.max_chunk}") def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) assert sr == self.task.cfg.sample_rate, sr if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim if ref_len is not None and abs(ref_len - len(wav)) > 160: logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, path, ref_len=None): x = self.read_audio(path, ref_len) with torch.no_grad(): x = torch.from_numpy(x).float().cuda() if self.task.cfg.normalize: x = F.layer_norm(x, x.shape) x = x.view(1, -1) feat = [] for start in range(0, x.size(1), self.max_chunk): x_chunk = x[:, start: start + self.max_chunk] feat_chunk, _ = self.model.extract_features( source=x_chunk, padding_mask=None, mask=False, output_layer=self.layer, ) feat.append(feat_chunk) return torch.cat(feat, 1).squeeze(0) def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk): reader = HubertFeatureReader(ckpt_path, layer, max_chunk) generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) dump_feature(reader, generator, num, split, nshard, rank, feat_dir) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("tsv_dir") parser.add_argument("split") parser.add_argument("ckpt_path") parser.add_argument("layer", type=int) parser.add_argument("nshard", type=int) parser.add_argument("rank", type=int) parser.add_argument("feat_dir") parser.add_argument("--max_chunk", type=int, default=1600000) args = parser.parse_args() logger.info(args) main(**vars(args))
KosmosX-API-main
kosmosX/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import csv import io import logging import os import os.path as op import sys from dump_hubert_feature import HubertFeatureReader from feature_utils import get_shard_range, dump_feature from fairseq.data.audio.audio_utils import get_waveform from fairseq.data.audio.speech_to_text_dataset import ( read_from_uncompressed_zip, ) logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("dump_hubert_feature_s2t") class HubertFeatureReaderS2T(HubertFeatureReader): def read_audio(self, path, ref_len=None): path, *extra = path.split(":") assert len(extra) == 2 assert path.endswith(".zip") data = read_from_uncompressed_zip(path, int(extra[0]), int(extra[1])) f = io.BytesIO(data) wav, sr = get_waveform(f) assert sr == self.task.cfg.sample_rate, sr if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim if ref_len is not None and abs(ref_len - len(wav)) > 160: logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_path_iterator(root, tsv, nshard, rank): with open(tsv) as f: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE, ) subpaths = [op.join(root, e["audio"]) for e in reader] start, end = get_shard_range(len(subpaths), nshard, rank) subpaths = subpaths[start:end] def iterate(): for subpath in subpaths: yield op.join(root, subpath), None return iterate, len(subpaths) def main( root, tsv_path, ckpt_path, layer, nshard, rank, feat_dir, split, max_chunk ): reader = HubertFeatureReaderS2T(ckpt_path, layer, max_chunk) generator, num = get_path_iterator(root, tsv_path, nshard, rank) dump_feature(reader, generator, num, split, nshard, rank, feat_dir) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("root") parser.add_argument("tsv_path") parser.add_argument("ckpt_path") parser.add_argument("layer", type=int) parser.add_argument("nshard", type=int) parser.add_argument("rank", type=int) parser.add_argument("feat_dir") parser.add_argument("split") parser.add_argument("--max_chunk", type=int, default=1600000) args = parser.parse_args() logger.info(args) main(**vars(args))
KosmosX-API-main
kosmosX/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature_s2t.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import tqdm from npy_append_array import NpyAppendArray logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("feature_utils") def get_shard_range(tot, nshard, rank): assert rank < nshard and rank >= 0, f"invaid rank/nshard {rank}/{nshard}" start = round(tot / nshard * rank) end = round(tot / nshard * (rank + 1)) assert start < end, f"start={start}, end={end}" logger.info( f"rank {rank} of {nshard}, process {end-start} " f"({start}-{end}) out of {tot}" ) return start, end def get_path_iterator(tsv, nshard, rank): with open(tsv, "r") as f: root = f.readline().rstrip() lines = [line.rstrip() for line in f] start, end = get_shard_range(len(lines), nshard, rank) lines = lines[start:end] def iterate(): for line in lines: subpath, nsample = line.split("\t") yield f"{root}/{subpath}", int(nsample) return iterate, len(lines) def dump_feature(reader, generator, num, split, nshard, rank, feat_dir): iterator = generator() feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy" leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len" os.makedirs(feat_dir, exist_ok=True) if os.path.exists(feat_path): os.remove(feat_path) feat_f = NpyAppendArray(feat_path) with open(leng_path, "w") as leng_f: for path, nsample in tqdm.tqdm(iterator, total=num): feat = reader.get_feats(path, nsample) feat_f.append(feat.cpu().numpy()) leng_f.write(f"{len(feat)}\n") logger.info("finished successfully")
KosmosX-API-main
kosmosX/fairseq/examples/hubert/simple_kmeans/feature_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import numpy as np from sklearn.cluster import MiniBatchKMeans import joblib logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("learn_kmeans") def get_km_model( n_clusters, init, max_iter, batch_size, tol, max_no_improvement, n_init, reassignment_ratio, ): return MiniBatchKMeans( n_clusters=n_clusters, init=init, max_iter=max_iter, batch_size=batch_size, verbose=1, compute_labels=False, tol=tol, max_no_improvement=max_no_improvement, init_size=None, n_init=n_init, reassignment_ratio=reassignment_ratio, ) def load_feature_shard(feat_dir, split, nshard, rank, percent): feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy" leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len" with open(leng_path, "r") as f: lengs = [int(line.rstrip()) for line in f] offsets = [0] + np.cumsum(lengs[:-1]).tolist() if percent < 0: return np.load(feat_path, mmap_mode="r") else: nsample = int(np.ceil(len(lengs) * percent)) indices = np.random.choice(len(lengs), nsample, replace=False) feat = np.load(feat_path, mmap_mode="r") sampled_feat = np.concatenate( [feat[offsets[i]: offsets[i] + lengs[i]] for i in indices], axis=0 ) logger.info( ( f"sampled {nsample} utterances, {len(sampled_feat)} frames " f"from shard {rank}/{nshard}" ) ) return sampled_feat def load_feature(feat_dir, split, nshard, seed, percent): assert percent <= 1.0 feat = np.concatenate( [ load_feature_shard(feat_dir, split, nshard, r, percent) for r in range(nshard) ], axis=0, ) logging.info(f"loaded feature with dimension {feat.shape}") return feat def learn_kmeans( feat_dir, split, nshard, km_path, n_clusters, seed, percent, init, max_iter, batch_size, tol, n_init, reassignment_ratio, max_no_improvement, ): np.random.seed(seed) feat = load_feature(feat_dir, split, nshard, seed, percent) km_model = get_km_model( n_clusters, init, max_iter, batch_size, tol, max_no_improvement, n_init, reassignment_ratio, ) km_model.fit(feat) joblib.dump(km_model, km_path) inertia = -km_model.score(feat) / len(feat) logger.info("total intertia: %.5f", inertia) logger.info("finished successfully") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("feat_dir", type=str) parser.add_argument("split", type=str) parser.add_argument("nshard", type=int) parser.add_argument("km_path", type=str) parser.add_argument("n_clusters", type=int) parser.add_argument("--seed", default=0, type=int) parser.add_argument( "--percent", default=-1, type=float, help="sample a subset; -1 for all" ) parser.add_argument("--init", default="k-means++") parser.add_argument("--max_iter", default=100, type=int) parser.add_argument("--batch_size", default=10000, type=int) parser.add_argument("--tol", default=0.0, type=float) parser.add_argument("--max_no_improvement", default=100, type=int) parser.add_argument("--n_init", default=20, type=int) parser.add_argument("--reassignment_ratio", default=0.0, type=float) args = parser.parse_args() logging.info(str(args)) learn_kmeans(**vars(args))
KosmosX-API-main
kosmosX/fairseq/examples/hubert/simple_kmeans/learn_kmeans.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import soundfile as sf import torch import torchaudio from feature_utils import get_path_iterator, dump_feature logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("dump_mfcc_feature") class MfccFeatureReader(object): def __init__(self, sample_rate): self.sample_rate = sample_rate def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) assert sr == self.sample_rate, sr if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim if ref_len is not None and abs(ref_len - len(wav)) > 160: logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, path, ref_len=None): x = self.read_audio(path, ref_len) with torch.no_grad(): x = torch.from_numpy(x).float() x = x.view(1, -1) mfccs = torchaudio.compliance.kaldi.mfcc( waveform=x, sample_frequency=self.sample_rate, use_energy=False, ) # (time, freq) mfccs = mfccs.transpose(0, 1) # (freq, time) deltas = torchaudio.functional.compute_deltas(mfccs) ddeltas = torchaudio.functional.compute_deltas(deltas) concat = torch.cat([mfccs, deltas, ddeltas], dim=0) concat = concat.transpose(0, 1).contiguous() # (freq, time) return concat def main(tsv_dir, split, nshard, rank, feat_dir, sample_rate): reader = MfccFeatureReader(sample_rate) generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) dump_feature(reader, generator, num, split, nshard, rank, feat_dir) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("tsv_dir") parser.add_argument("split") parser.add_argument("nshard", type=int) parser.add_argument("rank", type=int) parser.add_argument("feat_dir") parser.add_argument("--sample_rate", type=int, default=16000) args = parser.parse_args() logger.info(args) main(**vars(args))
KosmosX-API-main
kosmosX/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import fairseq import soundfile as sf import torch import torch.nn.functional as F from feature_utils import get_path_iterator, dump_feature logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("dump_w2v2_feature") class Wav2Vec2FeatureReader(object): def __init__(self, ckpt_path, layer, max_chunk=1600000): ( model, cfg, task, ) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path]) self.model = model[0].eval().cuda() self.task = task self.layer = layer # assume this is 1-based like HuBERT self.max_chunk = max_chunk logger.info(f"TASK CONFIG:\n{self.task.cfg}") logger.info(f" max_chunk = {self.max_chunk}") logger.info(f" model:\n{self.model}") def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) assert sr == self.task.cfg.sample_rate, sr if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim if ref_len is not None and abs(ref_len - len(wav)) > 160: logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, path, ref_len=None): x = self.read_audio(path, ref_len) with torch.no_grad(): x = torch.from_numpy(x).float().cuda() if self.task.cfg.normalize: x = F.layer_norm(x, x.shape) x = x.view(1, -1) feat = [] for start in range(0, x.size(1), self.max_chunk): x_chunk = x[:, start: start + self.max_chunk] res = self.model.extract_features( source=x_chunk, padding_mask=None, mask=False, layer=self.layer - 1, ) feat_chunk = res["x"] feat.append(feat_chunk) return torch.cat(feat, 1).squeeze(0) def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk): reader = Wav2Vec2FeatureReader(ckpt_path, layer, max_chunk) generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) dump_feature(reader, generator, num, split, nshard, rank, feat_dir) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("tsv_dir") parser.add_argument("split") parser.add_argument("ckpt_path") parser.add_argument("layer", type=int) parser.add_argument("nshard", type=int) parser.add_argument("rank", type=int) parser.add_argument("feat_dir") parser.add_argument("--max_chunk", type=int, default=1600000) args = parser.parse_args() logger.info(args) main(**vars(args))
KosmosX-API-main
kosmosX/fairseq/examples/hubert/simple_kmeans/dump_w2v2_feature.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os import joblib import numpy as np from examples.textless_nlp.gslm.speech2unit.clustering.utils import get_audio_files from examples.textless_nlp.gslm.speech2unit.pretrained.utils import get_features def get_logger(): log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" logging.basicConfig(format=log_format, level=logging.INFO) logger = logging.getLogger(__name__) return logger def get_parser(): parser = argparse.ArgumentParser( description="Quantize using K-means clustering over acoustic features." ) parser.add_argument( "--feature_type", type=str, choices=["logmel", "hubert", "w2v2", "cpc"], default=None, required=True, help="Acoustic feature type", ) parser.add_argument( "--kmeans_model_path", type=str, required=True, help="K-means model file path to use for inference", ) parser.add_argument( "--manifest_path", type=str, default=None, help="Manifest file containing the root dir and file names", ) parser.add_argument( "--checkpoint_path", type=str, help="Pretrained model checkpoint", ) parser.add_argument( "--layer", type=int, help="The layer of the pretrained model to extract features from", default=-1, ) parser.add_argument( "--out_dir_path", required=True, type=str, help="File path of quantized output.", ) parser.add_argument( "--extension", type=str, default=".flac", help="Features file path" ) return parser def one_hot(feat, n_clusters): return np.eye(n_clusters)[feat] def main(args, logger): # Feature extraction logger.info(f"Extracting {args.feature_type} acoustic features...") features_batch = get_features( feature_type=args.feature_type, checkpoint_path=args.checkpoint_path, layer=args.layer, manifest_path=args.manifest_path, sample_pct=1.0, flatten=False, ) logger.info(f"Features extracted for {len(features_batch)} utterances.\n") logger.info(f"Dimensionality of representation = {features_batch[0].shape[1]}") logger.info(f"Loading K-means model from {args.kmeans_model_path} ...") kmeans_model = joblib.load(open(args.kmeans_model_path, "rb")) kmeans_model.verbose = False _, fnames, _ = get_audio_files(args.manifest_path) os.makedirs(args.out_dir_path, exist_ok=True) logger.info(f"Writing quantized features to {args.out_dir_path}") for i, feats in enumerate(features_batch): pred = kmeans_model.predict(feats) emb = one_hot(pred, kmeans_model.n_clusters) base_fname = os.path.basename(fnames[i]).rstrip(args.extension) output_path = os.path.join(args.out_dir_path, f"{base_fname}.npy") with open(output_path, "wb") as f: np.save(f, emb) if __name__ == "__main__": parser = get_parser() args = parser.parse_args() logger = get_logger() logger.info(args) main(args, logger)
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/metrics/abx_metrics/dump_abx_feats.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import nltk from misc.bleu_utils import sentence_bleu import warnings def get_target_sequences(manifest, ground_truth, to_take=1000): import json import pathlib with open(ground_truth, 'r') as fin: original_continuations = json.loads(fin.read()) sequence2length = [(k, v[0]) for k, v in original_continuations.items()] assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds sequence2length.sort(key=lambda x: x[1]) to_take_sequences = set(v[0] for v in sequence2length[:to_take]) to_take_ids = [] with open(manifest, 'r') as f: f.readline() for i, line in enumerate(f.readlines()): seq_id = line.split()[0] seq_id = pathlib.Path(seq_id).name.split('__')[0] if seq_id in to_take_sequences: to_take_ids.append(i) print(f'Took {len(to_take_ids)} ids') return set(to_take_ids) def get_args(): import argparse parser = argparse.ArgumentParser() parser.add_argument('--asr-transcript', type=str, help='Path to the transcript file.') parser.add_argument('--manifest', required=True) parser.add_argument('--prompts-description', required=True) parser.add_argument('--cut-id', action='store_true', help='Whether cut the first token (typically a seq id)') parser.add_argument('--cut-tail', action='store_true', help='Whether cut the last token (typically a speaker id)') parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args def get_self_bleu(utterances, averaging_mode, weights): self_bleu = [] for i in range(len(utterances)): hypo = utterances[i] rest = utterances[:i] + utterances[i+1:] self_bleu.append(sentence_bleu(rest, hypo, weights, no_length_penalty=True, averaging_mode=averaging_mode)) return self_bleu def get_self_bleu2_arithmetic(utterances): weights = (0.5, 0.5) # equal weight for unigrams and bigrams return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights) def get_self_bleu2_geometric(utterances): weights = (0.5, 0.5) return get_self_bleu(utterances, averaging_mode='geometric', weights=weights) def get_auto_bleu2_arithmetic(utterances): weights = (0.5, 0.5) return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances] def get_auto_bleu2_geometric(utterances): weights = (0.5, 0.5) return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances] def get_auto_bleu3_geometric(utterances): weights = (1./3, 1./3, 1./3) return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances] def get_auto_bleu3_arithmetic(utterances): weights = (1./3, 1./3, 1./3) return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances] def get_self_bleu3_arithmetic(utterances): weights = (1./3, 1./3, 1./3) return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights) def get_self_bleu3_geometric(utterances): weights = (1./3, 1./3, 1./3) return get_self_bleu(utterances, averaging_mode='geometric', weights=weights) def auto_bleu(sentence, weights, mean_mode='arithmetic'): if len(sentence) <= 1: return 0 N = len(weights) bleu_n = np.zeros([N]) for n in range(N): targ_ngrams = list(nltk.ngrams(sentence, n+1)) for p in range(len(targ_ngrams)): left = sentence[:p] right = sentence[(p+n+1):] rest_ngrams = list(nltk.ngrams(left, n+1)) + \ list(nltk.ngrams(right, n+1)) # compute the nb of matching ngrams bleu_n[n] += targ_ngrams[p] in rest_ngrams bleu_n[n] /= len(targ_ngrams) # average them to get a proportion weights = np.array(weights) if mean_mode == 'arithmetic': return (bleu_n * weights).sum() elif mean_mode == 'geometric': return (bleu_n ** weights).prod() else: raise ValueError(f'Unknown agggregation mode {mean_mode}') def main(): from multiprocessing import Pool args = get_args() target_ids = get_target_sequences(args.manifest, args.prompts_description) with open(args.asr_transcript, 'r') as fin: lines = fin.readlines() terms = [x.strip().split() for x in lines] filtered = [] for term in terms: line_id = int(term[-1].split('-')[1][:-1]) if line_id in target_ids: filtered.append(term) terms = filtered if args.cut_id: terms = [x[1:] for x in terms] if args.cut_tail: terms = [x[:-1] for x in terms] if args.debug: terms = terms[:10] tasks = [ ('Self-BLEU2-arithmetic', get_self_bleu2_arithmetic), ('Self-BLEU2-geometric', get_self_bleu2_geometric), ('Auto-BLEU2-arithmetic', get_auto_bleu2_arithmetic), ('Auto-BLEU2-geometric', get_auto_bleu2_geometric), ('Self-BLEU3-arithmetic', get_self_bleu3_arithmetic), ('Self-BLEU3-geometric', get_self_bleu3_geometric), ('Auto-BLEU3-arithmetic', get_auto_bleu3_arithmetic), ('Auto-BLEU3-geometric', get_auto_bleu3_geometric), ] n_processes = min(16, len(tasks)) with Pool(n_processes) as pool: metrics = pool.map(run_f, [(t[1], terms) for t in tasks]) for (metric_name, _), metric in zip(tasks, metrics): metric, sem = np.mean(metric), np.std(metric) / np.sqrt(len(metric)) metric, sem = [ round(100 * x, 2) for x in [metric, sem] ] print(f'{metric_name} {metric} +- {sem}') def run_f(task_params): f, terms = task_params return f(terms) if __name__ == '__main__': # NLTK produces warnings warnings.filterwarnings("ignore") main()
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/self_auto_bleu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import defaultdict import numpy as np from misc.bleu_utils import sentence_bleu import json import warnings def get_args(): import argparse parser = argparse.ArgumentParser("Tool to calculate Continuation-BLEU2") parser.add_argument('--asr-transcript', type=str, help='Path to the transcript file.') parser.add_argument('--prompts-description', type=str, help='Path to the ground-truth continuation') parser.add_argument('--manifest', type=str, required=True) parser.add_argument('--take-shortest', type=int, default=1000) args = parser.parse_args() return args def main(): # NLTK produces warnings warnings.filterwarnings("ignore") args = get_args() with open(args.prompts_description, 'r') as fin: original_continuations = json.loads(fin.read()) sequence2length = [(k, v[0]) for k, v in original_continuations.items()] assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds sequence2length.sort(key=lambda x: x[1]) to_take = set(v[0] for v in sequence2length[:args.take_shortest]) with open(args.manifest, 'r') as fin: fin.readline() linenum2file = dict([ (i, l.split("__")[0]) for (i, l) in enumerate(fin) ]) max_files = max(linenum2file.keys()) continuations = defaultdict(list) mean_length_after = 0 n_examples = 0 with open(args.asr_transcript, 'r') as fin: for line in fin: n_examples += 1 line = line.split() sequence_id = int(line[-1].split('-')[1][:-1]) assert sequence_id <= max_files sequence_name = linenum2file[sequence_id] continuations[sequence_name].append(line[:-1]) mean_length_after += len(line) mean_length_after /= n_examples print(f'Mean length of continuations, in words: {mean_length_after}') metric_values = [] mean_ground_truth_words = 0 n_examples = 0 n_candidates = 0 for k, candidates in continuations.items(): if k not in to_take: continue n_examples += 1 ground_truth = original_continuations[k][1].split() n_candidates += len(candidates) bleu = sentence_bleu(candidates, ground_truth, weights=( 0.5, 0.5), no_length_penalty=True, averaging_mode="geometric") mean_ground_truth_words += len(ground_truth) metric_values.append(bleu) n = len(metric_values) print( f'Median BLEU over {n} examples: {np.median(metric_values)} +- {np.std(metric_values) / np.sqrt(n)}') if __name__ == '__main__': main()
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import numpy as np import warnings def get_target_sequences(manifest, ground_truth, to_take=1000): import json import pathlib with open(ground_truth, 'r') as fin: original_continuations = json.loads(fin.read()) sequence2length = [(k, v[0]) for k, v in original_continuations.items()] assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds sequence2length.sort(key=lambda x: x[1]) to_take_sequences = set(v[0] for v in sequence2length[:to_take]) to_take_ids = [] with open(manifest, 'r') as f: f.readline() for i, line in enumerate(f.readlines()): seq_id = line.split()[0] seq_id = pathlib.Path(seq_id).name.split('__')[0] if seq_id in to_take_sequences: to_take_ids.append(i) print(f'Took {len(to_take_ids)} ids') return set(to_take_ids) def get_args(): import argparse parser = argparse.ArgumentParser("Evaluate PPX metric of a transcript.") parser.add_argument('--asr-transcript', type=str, help='Path to the transcript file.') parser.add_argument('--cut-id', action='store_true', help='Whether cut the first token (typically a seq id)') parser.add_argument('--cut-tail', action='store_true', help='Whether cut the last token (typically a speaker id)') parser.add_argument('--manifest', type=str, default=None) parser.add_argument('--prompts-description', type=str, default=None) args = parser.parse_args() return args def main(): args = get_args() lm = torch.hub.load( 'pytorch/fairseq', 'transformer_lm.wmt19.en', tokenizer='moses', bpe='fastbpe') lm.eval().cuda() # disable dropout if args.manifest is None and args.prompts_description is None: target_ids = None else: target_ids = get_target_sequences( args.manifest, args.prompts_description) with open(args.asr_transcript, 'r') as fin: lines = fin.readlines() if target_ids is not None: filtered = [] for line in lines: line_id = line.split()[-1] line_id = int(line_id.split('-')[1][:-1]) if line_id in target_ids: filtered.append(line) lines = filtered else: pass if args.cut_id: lines = [' '.join(x.split()[1:]) for x in lines] if args.cut_tail: lines = [' '.join(x.split()[:-1]) for x in lines] lines = [x.strip().lower() for x in lines] def get_logprob(sent): return \ lm.score(sent)['positional_scores'].mean().neg().item() logprobs = [get_logprob(l) for l in lines] filtered = [x for x in logprobs if not np.isnan(x)] if len(filtered) != len(logprobs): warnings.warn("NaNs detected!") logprobs = filtered perplexities = [np.exp(l) for l in logprobs] for name, stats in [('logprob', logprobs), ('perplexity', perplexities)]: mean = np.mean(stats) sem = np.std(stats) / np.sqrt(len(stats)) median = np.median(stats) interval = list(np.percentile(stats, [10, 90])) mean, sem, median, percentile10, percentile90 = [ round(x, 2) for x in [mean, sem, median] + interval] print(name) print(f"\tMean {mean} +- {sem}") print( f"\tMedian {median}, 90% confidence interval {percentile10}...{percentile90}") if __name__ == '__main__': main()
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/ppx.py
""" TODO: the code is take from Apache-2 Licensed NLTK: make sure we do this properly! Copied over from nltk.tranlate.bleu_score. This code has two major changes: - allows to turn off length/brevity penalty --- it has no sense for self-bleu, - allows to use arithmetic instead of geometric mean """ import math from fractions import Fraction from collections import Counter from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction def corpus_bleu( list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False, averaging_mode="geometric", no_length_penalty=False ): """ Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all the hypotheses and their respective references. Instead of averaging the sentence level BLEU scores (i.e. marco-average precision), the original BLEU metric (Papineni et al. 2002) accounts for the micro-average precision (i.e. summing the numerators and denominators for each hypothesis-reference(s) pairs before the division). >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS 0.5920... The example below show that corpus_bleu() is different from averaging sentence_bleu() for hypotheses >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1) >>> score2 = sentence_bleu([ref2a], hyp2) >>> (score1 + score2) / 2 # doctest: +ELLIPSIS 0.6223... :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses :type list_of_references: list(list(list(str))) :param hypotheses: a list of hypothesis sentences :type hypotheses: list(list(str)) :param weights: weights for unigrams, bigrams, trigrams and so on :type weights: list(float) :param smoothing_function: :type smoothing_function: SmoothingFunction :param auto_reweigh: Option to re-normalize the weights uniformly. :type auto_reweigh: bool :return: The corpus-level BLEU score. :rtype: float """ # Before proceeding to compute BLEU, perform sanity checks. p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches. p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref. hyp_lengths, ref_lengths = 0, 0 assert len(list_of_references) == len(hypotheses), ( "The number of hypotheses and their reference(s) should be the " "same " ) # Iterate through each hypothesis and their corresponding references. for references, hypothesis in zip(list_of_references, hypotheses): # For each order of ngram, calculate the numerator and # denominator for the corpus-level modified precision. for i, _ in enumerate(weights, start=1): p_i = modified_precision(references, hypothesis, i) p_numerators[i] += p_i.numerator p_denominators[i] += p_i.denominator # Calculate the hypothesis length and the closest reference length. # Adds them to the corpus-level hypothesis and reference counts. hyp_len = len(hypothesis) hyp_lengths += hyp_len ref_lengths += closest_ref_length(references, hyp_len) # Calculate corpus-level brevity penalty. if no_length_penalty and averaging_mode == 'geometric': bp = 1.0 elif no_length_penalty and averaging_mode == 'arithmetic': bp = 0.0 else: assert not no_length_penalty assert averaging_mode != 'arithmetic', 'Not sure how to apply length penalty when aurithmetic mode' bp = brevity_penalty(ref_lengths, hyp_lengths) # Uniformly re-weighting based on maximum hypothesis lengths if largest # order of n-grams < 4 and weights is set at default. if auto_reweigh: if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25): weights = (1 / hyp_lengths,) * hyp_lengths # Collects the various precision values for the different ngram orders. p_n = [ Fraction(p_numerators[i], p_denominators[i], _normalize=False) for i, _ in enumerate(weights, start=1) ] # Returns 0 if there's no matching n-grams # We only need to check for p_numerators[1] == 0, since if there's # no unigrams, there won't be any higher order ngrams. if p_numerators[1] == 0: return 0 # If there's no smoothing, set use method0 from SmoothinFunction class. if not smoothing_function: smoothing_function = SmoothingFunction().method0 # Smoothen the modified precision. # Note: smoothing_function() may convert values into floats; # it tries to retain the Fraction object as much as the # smoothing method allows. p_n = smoothing_function( p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths ) if averaging_mode == "geometric": s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n)) s = bp * math.exp(math.fsum(s)) elif averaging_mode == "arithmetic": s = (w_i * p_i for w_i, p_i in zip(weights, p_n)) s = math.fsum(s) return s def sentence_bleu( references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False, averaging_mode="geometric", no_length_penalty=False ): return corpus_bleu( [references], [hypothesis], weights, smoothing_function, auto_reweigh, averaging_mode, no_length_penalty )
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/bleu_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torchaudio import argparse import json import pathlib def get_args(): parser = argparse.ArgumentParser( "Assuring generated audio have the same length as ground-truth audio") parser.add_argument('--samples_dir', required=True, type=str) parser.add_argument('--out_dir', required=True, type=str) parser.add_argument('--prompts_description', required=True, type=str) return parser.parse_args() def cut(src, tgt, l): x, sr = torchaudio.load(str(src)) assert sr == 16_000 x = x.squeeze() target_frames = int(l * sr) flag = 0 if target_frames <= x.size(0): x = x[:target_frames] flag = 1 else: flag = 0 torchaudio.save(str(tgt), x.unsqueeze(0), sr) return flag def main(): args = get_args() tgt_dir = pathlib.Path(args.out_dir) tgt_dir.mkdir(exist_ok=True, parents=True) total_files, sufficiently_long = 0, 0 with open(args.prompts_description, 'r') as f: description = json.loads(f.read()) for src_f in pathlib.Path(args.samples_dir).glob('*.wav'): name_prompt = src_f.with_suffix('').name.split('__')[0] assert name_prompt in description, f'Cannot find {name_prompt}!' target_length = description[name_prompt][0] tgt_f = tgt_dir / (src_f.name) is_long_enough = cut(src_f, tgt_f, target_length) sufficiently_long += is_long_enough if not is_long_enough: print(f'{src_f} is not long enough') total_files += 1 print( f'Total files: {total_files}; sufficiently long: {sufficiently_long}') if __name__ == '__main__': main()
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/cut_as.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import gc import logging import os import joblib import soundfile as sf import torch from examples.textless_nlp.gslm.speech2unit.pretrained.utils import get_feature_reader from examples.textless_nlp.gslm.unit2speech.tts_data import TacotronInputDataset from examples.textless_nlp.gslm.unit2speech.utils import ( load_tacotron, load_waveglow, synthesize_audio, ) def get_logger(): log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" logging.basicConfig(format=log_format, level=logging.INFO) logger = logging.getLogger(__name__) return logger def get_parser(): parser = argparse.ArgumentParser(description="GSLM U2S tool") parser.add_argument( "--feature_type", type=str, choices=["logmel", "hubert", "w2v2", "cpc"], default=None, required=True, help="Acoustic feature type", ) parser.add_argument( "--acoustic_model_path", type=str, help="Pretrained acoustic model checkpoint", ) parser.add_argument("--layer", type=int, help="Layer of acoustic model") parser.add_argument( "--kmeans_model_path", type=str, required=True, help="K-means model file path to use for inference", ) parser.add_argument( "--tts_model_path", type=str, help="TTS model file path to use for inference", ) parser.add_argument( "--code_dict_path", type=str, help="Code dict file path to use for inference", ) parser.add_argument( "--waveglow_path", type=str, help="Waveglow (vocoder) model file path to use for inference", ) parser.add_argument("--max_decoder_steps", type=int, default=2000) parser.add_argument("--denoiser_strength", type=float, default=0.1) return parser ################################################ def main(args, logger): # Acoustic Model logger.info(f"Loading acoustic model from {args.tts_model_path}...") feature_reader_cls = get_feature_reader(args.feature_type) reader = feature_reader_cls( checkpoint_path=args.acoustic_model_path, layer=args.layer ) # K-means Model logger.info(f"Loading K-means model from {args.kmeans_model_path} ...") kmeans_model = joblib.load(open(args.kmeans_model_path, "rb")) kmeans_model.verbose = False # TTS Model logger.info(f"Loading TTS model from {args.tts_model_path}...") tacotron_model, sample_rate, hparams = load_tacotron( tacotron_model_path=args.tts_model_path, max_decoder_steps=args.max_decoder_steps, ) # Waveglow Model logger.info(f"Loading Waveglow model from {args.waveglow_path}...") waveglow, denoiser = load_waveglow(waveglow_path=args.waveglow_path) # Dataset if not os.path.exists(hparams.code_dict): hparams.code_dict = args.code_dict_path tts_dataset = TacotronInputDataset(hparams) iters = 0 while True: in_file_path = input("Input: Enter the full file path of audio file...\n") out_file_path = input("Output: Enter the full file path of audio file...\n") feats = reader.get_feats(in_file_path).cpu().numpy() iters += 1 if iters == 1000: gc.collect() torch.cuda.empty_cache() quantized_units = kmeans_model.predict(feats) quantized_units_str = " ".join(map(str, quantized_units)) tts_input = tts_dataset.get_tensor(quantized_units_str) mel, aud, aud_dn, has_eos = synthesize_audio( tacotron_model, waveglow, denoiser, tts_input.unsqueeze(0), strength=args.denoiser_strength, ) sf.write(f"{out_file_path}", aud_dn[0].cpu().float().numpy(), sample_rate) logger.info("Resynthesis done!\n") if __name__ == "__main__": parser = get_parser() args = parser.parse_args() logger = get_logger() logger.info(args) main(args, logger)
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/tools/resynthesize_speech.py
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging from examples.textless_nlp.gslm.speech2unit.pretrained.utils import ( get_and_dump_features, ) def get_parser(): parser = argparse.ArgumentParser( description="Compute and dump log mel fbank features." ) parser.add_argument( "--feature_type", type=str, choices=["logmel", "hubert", "w2v2", "cpc"], default=None, help="Acoustic feature type", ) parser.add_argument( "--manifest_path", type=str, default=None, help="Manifest file containing the root dir and file names", ) parser.add_argument( "--out_features_path", type=str, default=None, help="Features file path to write to", ) parser.add_argument( "--checkpoint_path", type=str, help="Pretrained acoustic model checkpoint", ) parser.add_argument( "--layer", type=int, help="The layer of the pretrained model to extract features from", default=-1, ) parser.add_argument( "--sample_pct", type=float, help="Percent data to use for K-means training", default=0.1, ) parser.add_argument( "--out_features_path", type=str, help="Path to save log mel fbank features", ) return parser def get_logger(): log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" logging.basicConfig(format=log_format, level=logging.INFO) logger = logging.getLogger(__name__) return logger if __name__ == "__main__": """ Example command: python ~/speechbot/clustering/dump_logmelfank_feats.py \ --manifest_path /checkpoint/kushall/data/LJSpeech-1.1/asr_input_wavs_16k/train.tsv --out_features_path /checkpoint/kushall/experiments/speechbot/logmelfbank/features/ljspeech/train.npy """ parser = get_parser() args = parser.parse_args() logger = get_logger() logger.info(args) logger.info(f"Extracting {args.feature_type} acoustic features...") get_and_dump_features( feature_type=args.feature_type, checkpoint_path=args.checkpoint_path, layer=args.layer, manifest_path=args.manifest_path, sample_pct=args.sample_pct, flatten=True, out_features_path=args.out_features_path, ) logger.info(f"Saved extracted features at {args.out_features_path}")
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/dump_feats.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os import time import numpy as np from sklearn.cluster import MiniBatchKMeans import joblib from examples.textless_nlp.gslm.speech2unit.pretrained.utils import ( get_and_dump_features, get_features, ) def get_logger(): log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" logging.basicConfig(format=log_format, level=logging.INFO) logger = logging.getLogger(__name__) return logger def get_parser(): parser = argparse.ArgumentParser( description="Learn K-means clustering over acoustic features." ) # Features arguments parser.add_argument( "--in_features_path", type=str, default=None, help="Features file path" ) parser.add_argument( "--feature_type", type=str, choices=["logmel", "hubert", "w2v2", "cpc"], default=None, help="Acoustic feature type", ) parser.add_argument( "--manifest_path", type=str, default=None, help="Manifest file containing the root dir and file names", ) parser.add_argument( "--out_features_path", type=str, default=None, help="Features file path to write to", ) parser.add_argument( "--checkpoint_path", type=str, help="Pretrained acoustic model checkpoint", ) parser.add_argument( "--layer", type=int, help="The layer of the pretrained model to extract features from", default=-1, ) parser.add_argument( "--sample_pct", type=float, help="Percent data to use for K-means training", default=0.1, ) # K-means arguments parser.add_argument( "--num_clusters", type=int, help="Nubmer of clusters", default=50 ) parser.add_argument("--init", default="k-means++") parser.add_argument( "--max_iter", type=int, help="Maximum number of iterations for K-means training", default=150, ) parser.add_argument( "--batch_size", type=int, help="Batch size for K-means training", default=10000, ) parser.add_argument("--tol", default=0.0, type=float) parser.add_argument("--max_no_improvement", default=100, type=int) parser.add_argument("--n_init", default=20, type=int) parser.add_argument("--reassignment_ratio", default=0.5, type=float) parser.add_argument( "--out_kmeans_model_path", type=str, required=True, help="Path to save K-means model", ) # Leftovers parser.add_argument( "--seed", type=int, help="Random seed to use for K-means training", default=1369, ) return parser def get_kmeans_model( n_clusters, init, max_iter, batch_size, tol, max_no_improvement, n_init, reassignment_ratio, random_state, ): return MiniBatchKMeans( n_clusters=n_clusters, init=init, max_iter=max_iter, batch_size=batch_size, tol=tol, max_no_improvement=max_no_improvement, n_init=n_init, reassignment_ratio=reassignment_ratio, random_state=random_state, verbose=1, compute_labels=True, init_size=None, ) def train_kmeans(kmeans_model, features_batch): start_time = time.time() kmeans_model.fit(features_batch) time_taken = round((time.time() - start_time) // 60, 2) return kmeans_model, time_taken def main(args, logger): # Features loading/extraction for K-means if args.in_features_path: # Feature loading logger.info(f"Loading features from {args.in_features_path}...") features_batch = np.load(args.in_features_path, allow_pickle=True) else: # Feature extraction logger.info(f"Extracting {args.feature_type} acoustic features...") features_batch = ( get_features( feature_type=args.feature_type, checkpoint_path=args.checkpoint_path, layer=args.layer, manifest_path=args.manifest_path, sample_pct=args.sample_pct, flatten=True, ) if not args.out_features_path else get_and_dump_features( feature_type=args.feature_type, checkpoint_path=args.checkpoint_path, layer=args.layer, manifest_path=args.manifest_path, sample_pct=args.sample_pct, flatten=True, out_features_path=args.out_features_path, ) ) if args.out_features_path: logger.info( f"Saved extracted features at {args.out_features_path}" ) logger.info(f"Features shape = {features_batch.shape}\n") # Learn and save K-means model kmeans_model = get_kmeans_model( n_clusters=args.num_clusters, init=args.init, max_iter=args.max_iter, batch_size=args.batch_size, tol=args.tol, max_no_improvement=args.max_no_improvement, n_init=args.n_init, reassignment_ratio=args.reassignment_ratio, random_state=args.seed, ) logger.info("Starting k-means training...") kmeans_model, time_taken = train_kmeans( kmeans_model=kmeans_model, features_batch=features_batch ) logger.info(f"...done k-means training in {time_taken} minutes") inertia = -kmeans_model.score(features_batch) / len(features_batch) logger.info(f"Total intertia: {round(inertia, 2)}\n") logger.info(f"Saving k-means model to {args.out_kmeans_model_path}") os.makedirs(os.path.dirname(args.out_kmeans_model_path), exist_ok=True) joblib.dump(kmeans_model, open(args.out_kmeans_model_path, "wb")) if __name__ == "__main__": parser = get_parser() args = parser.parse_args() logger = get_logger() logger.info(args) main(args, logger)
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/cluster_kmeans.py
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Tuple def get_audio_files(manifest_path: str) -> Tuple[str, List[str], List[int]]: fnames, sizes = [], [] with open(manifest_path, "r") as f: root_dir = f.readline().strip() for line in f: items = line.strip().split("\t") assert ( len(items) == 2 ), f"File must have two columns separated by tab. Got {line}" fnames.append(items[0]) sizes.append(int(items[1])) return root_dir, fnames, sizes
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os import numpy as np import joblib from examples.textless_nlp.gslm.speech2unit.clustering.utils import ( get_audio_files, ) from examples.textless_nlp.gslm.speech2unit.pretrained.utils import ( get_features, ) def get_logger(): log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" logging.basicConfig(format=log_format, level=logging.INFO) logger = logging.getLogger(__name__) return logger def get_parser(): parser = argparse.ArgumentParser( description="Quantize using K-means clustering over acoustic features." ) parser.add_argument( "--feature_type", type=str, choices=["logmel", "hubert", "w2v2", "cpc"], default=None, required=True, help="Acoustic feature type", ) parser.add_argument( "--acoustic_model_path", type=str, help="Pretrained acoustic model checkpoint" ) parser.add_argument( "--layer", type=int, help="The layer of the pretrained model to extract features from", default=-1, ) parser.add_argument( "--kmeans_model_path", type=str, required=True, help="K-means model file path to use for inference", ) parser.add_argument( "--features_path", type=str, default=None, help="Features file path. You don't need to enter acoustic model details if you have dumped features", ) parser.add_argument( "--manifest_path", type=str, default=None, help="Manifest file containing the root dir and file names", ) parser.add_argument( "--out_quantized_file_path", required=True, type=str, help="File path of quantized output.", ) parser.add_argument( "--extension", type=str, default=".flac", help="Features file path" ) return parser def main(args, logger): # Feature extraction if args.features_path is not None: logger.info(f"Loading acoustic features from {args.features_path}...") features_batch = np.load(args.features_path) else: logger.info(f"Extracting {args.feature_type} acoustic features...") features_batch = get_features( feature_type=args.feature_type, checkpoint_path=args.acoustic_model_path, layer=args.layer, manifest_path=args.manifest_path, sample_pct=1.0, flatten=False, ) logger.info( f"Features extracted for {len(features_batch)} utterances.\n" ) logger.info( f"Dimensionality of representation = {features_batch[0].shape[1]}" ) # K-means model logger.info(f"Loading K-means model from {args.kmeans_model_path} ...") kmeans_model = joblib.load(open(args.kmeans_model_path, "rb")) kmeans_model.verbose = False _, fnames, _ = get_audio_files(args.manifest_path) os.makedirs(os.path.dirname(args.out_quantized_file_path), exist_ok=True) print(f"Writing quantized predictions to {args.out_quantized_file_path}") with open(args.out_quantized_file_path, "w") as fout: for i, feats in enumerate(features_batch): pred = kmeans_model.predict(feats) pred_str = " ".join(str(p) for p in pred) base_fname = os.path.basename(fnames[i]).rstrip(args.extension) fout.write(f"{base_fname}|{pred_str}\n") if __name__ == "__main__": parser = get_parser() args = parser.parse_args() logger = get_logger() logger.info(args) main(args, logger)
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/quantize_with_kmeans.py
import soundfile as sf import torch import torch.nn as nn import torch.nn.functional as F class CpcFeatureReader: """ Wrapper class to run inference on CPC model. Helps extract features for a given audio file. """ def __init__( self, checkpoint_path, layer, use_encoder_layer=False, norm_features=False, sample_rate=16000, max_chunk=64000, ): self.model = load_cpc_model(checkpoint_path, layer).eval().cuda() self.sample_rate = sample_rate self.max_chunk = max_chunk self.norm_features = norm_features self.use_encoder_layer = use_encoder_layer def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim assert sr == self.sample_rate, sr if ref_len is not None and abs(ref_len - len(wav)) > 160: print(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, file_path, ref_len=None): x = self.read_audio(file_path, ref_len) # Inspired from CPC_audio feature_loader.py with torch.no_grad(): x = torch.from_numpy(x).float().cuda() x = x.view(1, 1, -1) size = x.size(2) feat = [] start = 0 while start < size: if start + self.max_chunk > size: break x_chunk = x[..., start : start + self.max_chunk] feat_chunk = self.model.extract_features( source=x_chunk, get_encoded=self.use_encoder_layer, norm_output=self.norm_features, ) feat.append(feat_chunk) start += self.max_chunk if start < size: x_chunk = x[:, -self.max_chunk :] feat_chunk = self.model.extract_features( source=x_chunk, get_encoded=self.use_encoder_layer, norm_output=self.norm_features, ) df = x_chunk.size(2) // feat_chunk.size(1) delta = (size - start) // df feat.append(feat_chunk[:, -delta:]) return torch.cat(feat, 1).squeeze(0) def load_cpc_model(checkpoint_path, layer=None): state_dict = torch.load(checkpoint_path) weights = state_dict["weights"] config = state_dict["config"] if layer is not None: config["nLevelsGRU"] = layer encoder = CPCEncoder(config["hiddenEncoder"]) ar_net = CPCAR( config["hiddenEncoder"], config["hiddenGar"], False, config["nLevelsGRU"] ) model = CPCModel(encoder, ar_net) model.load_state_dict(weights, strict=False) model.config = config return model class ChannelNorm(nn.Module): def __init__(self, num_features, epsilon=1e-05, affine=True): super(ChannelNorm, self).__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, num_features, 1)) self.bias = nn.parameter.Parameter(torch.Tensor(1, num_features, 1)) else: self.weight = None self.bias = None self.epsilon = epsilon self.p = 0 self.affine = affine self.reset_parameters() def reset_parameters(self): if self.affine: torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) def forward(self, x): cum_mean = x.mean(dim=1, keepdim=True) cum_var = x.var(dim=1, keepdim=True) x = (x - cum_mean) * torch.rsqrt(cum_var + self.epsilon) if self.weight is not None: x = x * self.weight + self.bias return x class CPCEncoder(nn.Module): def __init__(self, hidden_dim=512): super(CPCEncoder, self).__init__() self.conv0 = nn.Conv1d(1, hidden_dim, 10, stride=5, padding=3) self.batchNorm0 = ChannelNorm(hidden_dim) self.conv1 = nn.Conv1d(hidden_dim, hidden_dim, 8, stride=4, padding=2) self.batchNorm1 = ChannelNorm(hidden_dim) self.conv2 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1) self.batchNorm2 = ChannelNorm(hidden_dim) self.conv3 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1) self.batchNorm3 = ChannelNorm(hidden_dim) self.conv4 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1) self.batchNorm4 = ChannelNorm(hidden_dim) self.DOWNSAMPLING = 160 def get_output_dim(self): return self.conv4.out_channels def forward(self, x): x = F.relu(self.batchNorm0(self.conv0(x))) x = F.relu(self.batchNorm1(self.conv1(x))) x = F.relu(self.batchNorm2(self.conv2(x))) x = F.relu(self.batchNorm3(self.conv3(x))) x = F.relu(self.batchNorm4(self.conv4(x))) return x class CPCAR(nn.Module): def __init__(self, dim_encoded, dim_output, keep_hidden, num_layers): super(CPCAR, self).__init__() self.baseNet = nn.LSTM( dim_encoded, dim_output, num_layers=num_layers, batch_first=True ) self.hidden = None self.keep_hidden = keep_hidden def get_output_dim(self): return self.baseNet.hidden_size def forward(self, x): try: self.baseNet.flatten_parameters() except RuntimeError: pass x, h = self.baseNet(x, self.hidden) if self.keep_hidden: if isinstance(h, tuple): self.hidden = tuple(x.detach() for x in h) else: self.hidden = h.detach() return x class CPCModel(nn.Module): def __init__(self, encoder, ar_net): super(CPCModel, self).__init__() self.gEncoder = encoder self.gAR = ar_net self.config = None def forward(self, x, label): encoded = self.gEncoder(x).permute(0, 2, 1) cpc_feature = self.gAR(encoded) return cpc_feature, encoded, label def extract_features(self, source, get_encoded=False, norm_output=False): cpc_feature, encoded, _ = self.forward(source, None) if get_encoded: cpc_feature = encoded if norm_output: mean = cpc_feature.mean(dim=1, keepdim=True) var = cpc_feature.var(dim=1, keepdim=True) cpc_feature = (cpc_feature - mean) / torch.sqrt(var + 1e-08) return cpc_feature
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import fairseq import soundfile as sf import torch.nn.functional as F class HubertFeatureReader: """ Wrapper class to run inference on HuBERT model. Helps extract features for a given audio file. """ def __init__(self, checkpoint_path, layer, max_chunk=1600000): ( model, cfg, task, ) = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ) self.model = model[0].eval().cuda() self.task = task self.layer = layer self.max_chunk = max_chunk def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim assert sr == self.task.cfg.sample_rate, sr if ref_len is not None and abs(ref_len - len(wav)) > 160: print(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, file_path, ref_len=None): x = self.read_audio(file_path, ref_len) with torch.no_grad(): x = torch.from_numpy(x).float().cuda() if self.task.cfg.normalize: x = F.layer_norm(x, x.shape) x = x.view(1, -1) feat = [] for start in range(0, x.size(1), self.max_chunk): x_chunk = x[:, start: start + self.max_chunk] feat_chunk, _ = self.model.extract_features( source=x_chunk, padding_mask=None, mask=False, output_layer=self.layer, ) feat.append(feat_chunk) return torch.cat(feat, 1).squeeze(0)
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/hubert_feature_reader.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import gc import os import random import shutil import numpy as np import torch import tqdm from examples.textless_nlp.gslm.speech2unit.pretrained.cpc_feature_reader import ( CpcFeatureReader, ) from examples.textless_nlp.gslm.speech2unit.pretrained.hubert_feature_reader import ( HubertFeatureReader, ) from examples.textless_nlp.gslm.speech2unit.pretrained.logmel_feature_reader import ( LogMelFeatureReader, ) from examples.textless_nlp.gslm.speech2unit.pretrained.w2v2_feature_reader import ( Wav2VecFeatureReader, ) def get_feature_reader(feature_type): if feature_type == "logmel": return LogMelFeatureReader elif feature_type == "hubert": return HubertFeatureReader elif feature_type == "w2v2": return Wav2VecFeatureReader elif feature_type == "cpc": return CpcFeatureReader else: raise NotImplementedError(f"{feature_type} is not supported.") def get_feature_iterator( feature_type, checkpoint_path, layer, manifest_path, sample_pct ): feature_reader_cls = get_feature_reader(feature_type) with open(manifest_path, "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() file_path_list = [ os.path.join(root, line.split("\t")[0]) for line in lines if len(line) > 0 ] if sample_pct < 1.0: file_path_list = random.sample( file_path_list, int(sample_pct * len(file_path_list)) ) num_files = len(file_path_list) reader = feature_reader_cls( checkpoint_path=checkpoint_path, layer=layer ) def iterate(): for file_path in file_path_list: feats = reader.get_feats(file_path) yield feats.cpu().numpy() return iterate, num_files def get_features( feature_type, checkpoint_path, layer, manifest_path, sample_pct, flatten ): generator, num_files = get_feature_iterator( feature_type=feature_type, checkpoint_path=checkpoint_path, layer=layer, manifest_path=manifest_path, sample_pct=sample_pct, ) iterator = generator() features_list = [] for features in tqdm.tqdm(iterator, total=num_files): features_list.append(features) # Explicit clean up del iterator del generator gc.collect() torch.cuda.empty_cache() if flatten: return np.concatenate(features_list) return features_list def get_and_dump_features( feature_type, checkpoint_path, layer, manifest_path, sample_pct, flatten, out_features_path, ): # Feature extraction features_batch = get_features( feature_type=feature_type, checkpoint_path=checkpoint_path, layer=layer, manifest_path=manifest_path, sample_pct=sample_pct, flatten=flatten, ) # Save features out_dir_path = os.path.dirname(out_features_path) os.makedirs(out_dir_path, exist_ok=True) shutil.copyfile( manifest_path, os.path.join(out_dir_path, os.path.basename(manifest_path)), ) np.save(out_features_path, features_batch) return features_batch
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import fairseq import soundfile as sf class Wav2VecFeatureReader: """ Wrapper class to run inference on Wav2Vec 2.0 model. Helps extract features for a given audio file. """ def __init__(self, checkpoint_path, layer): state = fairseq.checkpoint_utils.load_checkpoint_to_cpu( checkpoint_path ) w2v_args = state["args"] self.task = fairseq.tasks.setup_task(w2v_args) model = self.task.build_model(w2v_args) model.load_state_dict(state["model"], strict=True) model.eval() model.cuda() self.model = model self.layer = layer def read_audio(self, fname): wav, sr = sf.read(fname) if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim assert sr == self.task.cfg.sample_rate, sr return wav def get_feats(self, file_path): x = self.read_audio(file_path) with torch.no_grad(): source = torch.from_numpy(x).view(1, -1).float().cuda() res = self.model( source=source, mask=False, features_only=True, layer=self.layer ) return res["layer_results"][self.layer][0].squeeze(1)
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/w2v2_feature_reader.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import soundfile as sf import torch import torchaudio.compliance.kaldi as kaldi class LogMelFeatureReader: """ Wrapper class to run inference on HuBERT model. Helps extract features for a given audio file. """ def __init__(self, *args, **kwargs): self.num_mel_bins = kwargs.get("num_mel_bins", 80) self.frame_length = kwargs.get("frame_length", 25.0) def get_feats(self, file_path): wav, sr = sf.read(file_path) feats = torch.from_numpy(wav).float() feats = kaldi.fbank( feats.unsqueeze(0), num_mel_bins=self.num_mel_bins, frame_length=self.frame_length, sample_frequency=sr, ) return feats
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/logmel_feature_reader.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os import soundfile as sf from examples.textless_nlp.gslm.unit2speech.tts_data import ( TacotronInputDataset, ) from examples.textless_nlp.gslm.unit2speech.utils import ( load_quantized_audio_from_file, load_tacotron, load_waveglow, synthesize_audio, ) def get_logger(): log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" logging.basicConfig(format=log_format, level=logging.INFO) logger = logging.getLogger(__name__) return logger def get_parser(): parser = argparse.ArgumentParser( description="Wav2Vec 2.0 speech generator." ) parser.add_argument( "--quantized_unit_path", type=str, help="K-means model file path to use for inference", ) parser.add_argument( "--tts_model_path", type=str, help="TTS model file path to use for inference", ) parser.add_argument( "--waveglow_path", type=str, help="Path to the waveglow checkpoint (vocoder).", ) parser.add_argument( "--code_dict_path", type=str, help="Code dict file path to use for inference", ) parser.add_argument("--max_decoder_steps", type=int, default=2000) parser.add_argument("--denoiser_strength", type=float, default=0.1) parser.add_argument( "--out_audio_dir", type=str, help="Output directory to dump audio files", ) return parser def main(args, logger): # Load quantized audio logger.info(f"Loading quantized audio from {args.quantized_unit_path}...") names_batch, quantized_units_batch = load_quantized_audio_from_file( file_path=args.quantized_unit_path ) logger.info(f"Loading TTS model from {args.tts_model_path}...") tacotron_model, sample_rate, hparams = load_tacotron( tacotron_model_path=args.tts_model_path, max_decoder_steps=args.max_decoder_steps, ) logger.info(f"Loading Waveglow model from {args.waveglow_path}...") waveglow, denoiser = load_waveglow(waveglow_path=args.waveglow_path) if not os.path.exists(hparams.code_dict): hparams.code_dict = args.code_dict_path tts_dataset = TacotronInputDataset(hparams) for name, quantized_units in zip(names_batch, quantized_units_batch): quantized_units_str = " ".join(map(str, quantized_units)) tts_input = tts_dataset.get_tensor(quantized_units_str) mel, aud, aud_dn, has_eos = synthesize_audio( tacotron_model, waveglow, denoiser, tts_input.unsqueeze(0), strength=args.denoiser_strength, ) out_file_path = os.path.join(args.out_audio_dir, f"{name}.wav") sf.write( f"{out_file_path}", aud_dn[0].cpu().float().numpy(), sample_rate ) if __name__ == "__main__": parser = get_parser() args = parser.parse_args() logger = get_logger() logger.info(args) main(args, logger)
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/synthesize_audio_from_units.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import numpy as np from examples.textless_nlp.gslm.unit2speech.tacotron2.text import ( EOS_TOK, SOS_TOK, code_to_sequence, text_to_sequence, ) from examples.textless_nlp.gslm.unit2speech.tacotron2.utils import ( load_code_dict, ) class TacotronInputDataset: def __init__(self, hparams, append_str=""): self.is_text = getattr(hparams, "text_or_code", "text") == "text" if not self.is_text: self.code_dict = load_code_dict( hparams.code_dict, hparams.add_sos, hparams.add_eos ) self.code_key = hparams.code_key self.add_sos = hparams.add_sos self.add_eos = hparams.add_eos self.collapse_code = hparams.collapse_code self.append_str = append_str def process_code(self, inp_str): inp_toks = inp_str.split() if self.add_sos: inp_toks = [SOS_TOK] + inp_toks if self.add_eos: inp_toks = inp_toks + [EOS_TOK] return code_to_sequence(inp_toks, self.code_dict, self.collapse_code) def process_text(self, inp_str): return text_to_sequence(inp_str, ["english_cleaners"]) def get_tensor(self, inp_str): # uid, txt, inp_str = self._get_data(idx) inp_str = inp_str + self.append_str if self.is_text: inp_toks = self.process_text(inp_str) else: inp_toks = self.process_code(inp_str) return torch.from_numpy(np.array(inp_toks)).long() def __len__(self): return len(self.data)
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tts_data.py
# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import torch from torch.autograd import Variable import torch.nn.functional as F @torch.jit.script def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): n_channels_int = n_channels[0] in_act = input_a+input_b t_act = torch.tanh(in_act[:, :n_channels_int, :]) s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) acts = t_act * s_act return acts class WaveGlowLoss(torch.nn.Module): def __init__(self, sigma=1.0): super(WaveGlowLoss, self).__init__() self.sigma = sigma def forward(self, model_output): z, log_s_list, log_det_W_list = model_output for i, log_s in enumerate(log_s_list): if i == 0: log_s_total = torch.sum(log_s) log_det_W_total = log_det_W_list[i] else: log_s_total = log_s_total + torch.sum(log_s) log_det_W_total += log_det_W_list[i] loss = torch.sum(z*z)/(2*self.sigma*self.sigma) - log_s_total - log_det_W_total return loss/(z.size(0)*z.size(1)*z.size(2)) class Invertible1x1Conv(torch.nn.Module): """ The layer outputs both the convolution, and the log determinant of its weight matrix. If reverse=True it does convolution with inverse """ def __init__(self, c): super(Invertible1x1Conv, self).__init__() self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False) # Sample a random orthonormal matrix to initialize weights W = torch.qr(torch.FloatTensor(c, c).normal_())[0] # Ensure determinant is 1.0 not -1.0 if torch.det(W) < 0: W[:,0] = -1*W[:,0] W = W.view(c, c, 1) self.conv.weight.data = W def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.float().inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W class WN(torch.nn.Module): """ This is the WaveNet like layer for the affine coupling. The primary difference from WaveNet is the convolutions need not be causal. There is also no dilation size reset. The dilation only doubles on each layer """ def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels, kernel_size): super(WN, self).__init__() assert(kernel_size % 2 == 1) assert(n_channels % 2 == 0) self.n_layers = n_layers self.n_channels = n_channels self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() start = torch.nn.Conv1d(n_in_channels, n_channels, 1) start = torch.nn.utils.weight_norm(start, name='weight') self.start = start # Initializing last layer to 0 makes the affine coupling layers # do nothing at first. This helps with training stability end = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1) end.weight.data.zero_() end.bias.data.zero_() self.end = end cond_layer = torch.nn.Conv1d(n_mel_channels, 2*n_channels*n_layers, 1) self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') for i in range(n_layers): dilation = 2 ** i padding = int((kernel_size*dilation - dilation)/2) in_layer = torch.nn.Conv1d(n_channels, 2*n_channels, kernel_size, dilation=dilation, padding=padding) in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') self.in_layers.append(in_layer) # last one is not necessary if i < n_layers - 1: res_skip_channels = 2*n_channels else: res_skip_channels = n_channels res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1) res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') self.res_skip_layers.append(res_skip_layer) def forward(self, forward_input): audio, spect = forward_input audio = self.start(audio) output = torch.zeros_like(audio) n_channels_tensor = torch.IntTensor([self.n_channels]) spect = self.cond_layer(spect) for i in range(self.n_layers): spect_offset = i*2*self.n_channels acts = fused_add_tanh_sigmoid_multiply( self.in_layers[i](audio), spect[:,spect_offset:spect_offset+2*self.n_channels,:], n_channels_tensor) res_skip_acts = self.res_skip_layers[i](acts) if i < self.n_layers - 1: audio = audio + res_skip_acts[:,:self.n_channels,:] output = output + res_skip_acts[:,self.n_channels:,:] else: output = output + res_skip_acts return self.end(output) class WaveGlow(torch.nn.Module): def __init__(self, n_mel_channels, n_flows, n_group, n_early_every, n_early_size, WN_config): super(WaveGlow, self).__init__() self.upsample = torch.nn.ConvTranspose1d(n_mel_channels, n_mel_channels, 1024, stride=256) assert(n_group % 2 == 0) self.n_flows = n_flows self.n_group = n_group self.n_early_every = n_early_every self.n_early_size = n_early_size self.WN = torch.nn.ModuleList() self.convinv = torch.nn.ModuleList() n_half = int(n_group/2) # Set up layers with the right sizes based on how many dimensions # have been output already n_remaining_channels = n_group for k in range(n_flows): if k % self.n_early_every == 0 and k > 0: n_half = n_half - int(self.n_early_size/2) n_remaining_channels = n_remaining_channels - self.n_early_size self.convinv.append(Invertible1x1Conv(n_remaining_channels)) self.WN.append(WN(n_half, n_mel_channels*n_group, **WN_config)) self.n_remaining_channels = n_remaining_channels # Useful during inference def forward(self, forward_input): """ forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames forward_input[1] = audio: batch x time """ spect, audio = forward_input # Upsample spectrogram to size of audio spect = self.upsample(spect) assert(spect.size(2) >= audio.size(1)) if spect.size(2) > audio.size(1): spect = spect[:, :, :audio.size(1)] spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3) spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1) audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1) output_audio = [] log_s_list = [] log_det_W_list = [] for k in range(self.n_flows): if k % self.n_early_every == 0 and k > 0: output_audio.append(audio[:,:self.n_early_size,:]) audio = audio[:,self.n_early_size:,:] audio, log_det_W = self.convinv[k](audio) log_det_W_list.append(log_det_W) n_half = int(audio.size(1)/2) audio_0 = audio[:,:n_half,:] audio_1 = audio[:,n_half:,:] output = self.WN[k]((audio_0, spect)) log_s = output[:, n_half:, :] b = output[:, :n_half, :] audio_1 = torch.exp(log_s)*audio_1 + b log_s_list.append(log_s) audio = torch.cat([audio_0, audio_1],1) output_audio.append(audio) return torch.cat(output_audio,1), log_s_list, log_det_W_list def infer(self, spect, sigma=1.0): spect = self.upsample(spect) # trim conv artifacts. maybe pad spec to kernel multiple time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0] spect = spect[:, :, :-time_cutoff] spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3) spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1) if spect.type() == 'torch.cuda.HalfTensor': audio = torch.cuda.HalfTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_() else: audio = torch.cuda.FloatTensor(spect.size(0), self.n_remaining_channels, spect.size(2)).normal_() audio = torch.autograd.Variable(sigma*audio) for k in reversed(range(self.n_flows)): n_half = int(audio.size(1)/2) audio_0 = audio[:,:n_half,:] audio_1 = audio[:,n_half:,:] output = self.WN[k]((audio_0, spect)) s = output[:, n_half:, :] b = output[:, :n_half, :] audio_1 = (audio_1 - b)/torch.exp(s) audio = torch.cat([audio_0, audio_1],1) audio = self.convinv[k](audio, reverse=True) if k % self.n_early_every == 0 and k > 0: if spect.type() == 'torch.cuda.HalfTensor': z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_() else: z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_() audio = torch.cat((sigma*z, audio),1) audio = audio.permute(0,2,1).contiguous().view(audio.size(0), -1).data return audio @staticmethod def remove_weightnorm(model): waveglow = model for WN in waveglow.WN: WN.start = torch.nn.utils.remove_weight_norm(WN.start) WN.in_layers = remove(WN.in_layers) WN.cond_layer = torch.nn.utils.remove_weight_norm(WN.cond_layer) WN.res_skip_layers = remove(WN.res_skip_layers) return waveglow def remove(conv_list): new_conv_list = torch.nn.ModuleList() for old_conv in conv_list: old_conv = torch.nn.utils.remove_weight_norm(old_conv) new_conv_list.append(old_conv) return new_conv_list
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/glow.py
import os import shlex import subprocess import progressbar from time import time from pathlib import Path def find_all_files(path_dir, extension): out = [] for root, dirs, filenames in os.walk(path_dir): for f in filenames: if f.endswith(extension): out.append(((str(Path(f).stem)), os.path.join(root, f))) return out def convert16k(inputfile, outputfile16k): command = ('sox -c 1 -b 16 {} -t wav {} rate 16k'.format(inputfile, outputfile16k)) subprocess.call(shlex.split(command)) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Convert to wav 16k audio using sox.') parser.add_argument('input_dir', type=str, help='Path to the input dir.') parser.add_argument('output_dir', type=str, help='Path to the output dir.') parser.add_argument('--extension', type=str, default='wav', help='Audio file extension in the input. Default: mp3') args = parser.parse_args() # Find all sequences print(f"Finding all audio files with extension '{args.extension}' from {args.input_dir}...") audio_files = find_all_files(args.input_dir, args.extension) print(f"Done! Found {len(audio_files)} files.") # Convert to relative path audio_files = [os.path.relpath(file[-1], start=args.input_dir) for file in audio_files] # Create all the directories needed rel_dirs_set = set([os.path.dirname(file) for file in audio_files]) for rel_dir in rel_dirs_set: Path(os.path.join(args.output_dir, rel_dir)).mkdir(parents=True, exist_ok=True) # Converting wavs files print("Converting the audio to wav files...") bar = progressbar.ProgressBar(maxval=len(audio_files)) bar.start() start_time = time() for index, file in enumerate(audio_files): bar.update(index) input_file = os.path.join(args.input_dir, file) output_file = os.path.join(args.output_dir, os.path.splitext(file)[0]+".wav") convert16k(input_file, output_file) bar.finish() print(f"...done {len(audio_files)} files in {time()-start_time} seconds.")
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/convert_to_16k.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from examples.textless_nlp.gslm.unit2speech.tacotron2.model import Tacotron2 from examples.textless_nlp.gslm.unit2speech.tacotron2.waveglow_denoiser import ( Denoiser, ) def load_quantized_audio_from_file(file_path): base_fname_batch, quantized_units_batch = [], [] with open(file_path) as f: for line in f: base_fname, quantized_units_str = line.rstrip().split("|") quantized_units = [int(q) for q in quantized_units_str.split(" ")] base_fname_batch.append(base_fname) quantized_units_batch.append(quantized_units) return base_fname_batch, quantized_units_batch def synthesize_audio(model, waveglow, denoiser, inp, lab=None, strength=0.0): assert inp.size(0) == 1 inp = inp.cuda() if lab is not None: lab = torch.LongTensor(1).cuda().fill_(lab) with torch.no_grad(): _, mel, _, ali, has_eos = model.inference(inp, lab, ret_has_eos=True) aud = waveglow.infer(mel, sigma=0.666) aud_dn = denoiser(aud, strength=strength).squeeze(1) return mel, aud, aud_dn, has_eos def load_tacotron(tacotron_model_path, max_decoder_steps): ckpt_dict = torch.load(tacotron_model_path) hparams = ckpt_dict["hparams"] hparams.max_decoder_steps = max_decoder_steps sr = hparams.sampling_rate model = Tacotron2(hparams) model.load_state_dict(ckpt_dict["model_dict"]) model = model.cuda().eval().half() return model, sr, hparams def load_waveglow(waveglow_path): waveglow = torch.load(waveglow_path)["model"] waveglow = waveglow.cuda().eval().half() for k in waveglow.convinv: k.float() denoiser = Denoiser(waveglow) return waveglow, denoiser
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/utils.py
import os import time import torch import sys import subprocess argslist = list(sys.argv)[1:] log_dir = argslist[-1] num_gpus = torch.cuda.device_count() argslist.append('--n_gpus={}'.format(num_gpus)) workers = [] job_id = time.strftime("%Y_%m_%d-%H%M%S") argslist.append("--group_name=group_{}".format(job_id)) print("GPU log directory is {}".format(log_dir)) os.makedirs(log_dir, exist_ok=True) for i in range(num_gpus): argslist.append('--rank={}'.format(i)) stdout = None if i == 0 else open("{}/{}_GPU_{}.log".format(log_dir, job_id, i), "w") print(argslist) p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout) workers.append(p) argslist = argslist[:-1] for p in workers: p.wait()
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/multiproc.py
""" from https://github.com/keithito/tacotron """ import re valid_symbols = [ 'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2', 'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2', 'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY', 'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1', 'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0', 'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW', 'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH' ] _valid_symbol_set = set(valid_symbols) class CMUDict: '''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict''' def __init__(self, file_or_path, keep_ambiguous=True): if isinstance(file_or_path, str): with open(file_or_path, encoding='latin-1') as f: entries = _parse_cmudict(f) else: entries = _parse_cmudict(file_or_path) if not keep_ambiguous: entries = {word: pron for word, pron in entries.items() if len(pron) == 1} self._entries = entries def __len__(self): return len(self._entries) def lookup(self, word): '''Returns list of ARPAbet pronunciations of the given word.''' return self._entries.get(word.upper()) _alt_re = re.compile(r'\([0-9]+\)') def _parse_cmudict(file): cmudict = {} for line in file: if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"): parts = line.split(' ') word = re.sub(_alt_re, '', parts[0]) pronunciation = _get_pronunciation(parts[1]) if pronunciation: if word in cmudict: cmudict[word].append(pronunciation) else: cmudict[word] = [pronunciation] return cmudict def _get_pronunciation(s): parts = s.strip().split(' ') for part in parts: if part not in _valid_symbol_set: return None return ' '.join(parts)
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/cmudict.py
# import sys # sys.path.append('tacotron2') import torch from .layers import STFT class Denoiser(torch.nn.Module): """ Removes model bias from audio produced with waveglow """ def __init__(self, waveglow, filter_length=1024, n_overlap=4, win_length=1024, mode='zeros'): super(Denoiser, self).__init__() self.stft = STFT(filter_length=filter_length, hop_length=int(filter_length/n_overlap), win_length=win_length).cuda() if mode == 'zeros': mel_input = torch.zeros( (1, 80, 88), dtype=waveglow.upsample.weight.dtype, device=waveglow.upsample.weight.device) elif mode == 'normal': mel_input = torch.randn( (1, 80, 88), dtype=waveglow.upsample.weight.dtype, device=waveglow.upsample.weight.device) else: raise Exception("Mode {} if not supported".format(mode)) with torch.no_grad(): bias_audio = waveglow.infer(mel_input, sigma=0.0).float() bias_spec, _ = self.stft.transform(bias_audio) self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None]) def forward(self, audio, strength=0.1): audio_spec, audio_angles = self.stft.transform(audio.cuda().float()) audio_spec_denoised = audio_spec - self.bias_spec * strength audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0) audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles) return audio_denoised
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/__init__.py
import torch import numpy as np from scipy.signal import get_window import librosa.util as librosa_util def window_sumsquare(window, n_frames, hop_length=200, win_length=800, n_fft=800, dtype=np.float32, norm=None): """ # from librosa 0.6 Compute the sum-square envelope of a window function at a given hop length. This is used to estimate modulation effects induced by windowing observations in short-time fourier transforms. Parameters ---------- window : string, tuple, number, callable, or list-like Window specification, as in `get_window` n_frames : int > 0 The number of analysis frames hop_length : int > 0 The number of samples to advance between frames win_length : [optional] The length of the window function. By default, this matches `n_fft`. n_fft : int > 0 The length of each analysis frame. dtype : np.dtype The data type of the output Returns ------- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` The sum-squared envelope of the window function """ if win_length is None: win_length = n_fft n = n_fft + hop_length * (n_frames - 1) x = np.zeros(n, dtype=dtype) # Compute the squared window at the desired length win_sq = get_window(window, win_length, fftbins=True) win_sq = librosa_util.normalize(win_sq, norm=norm)**2 win_sq = librosa_util.pad_center(win_sq, n_fft) # Fill the envelope for i in range(n_frames): sample = i * hop_length x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))] return x def griffin_lim(magnitudes, stft_fn, n_iters=30): """ PARAMS ------ magnitudes: spectrogram magnitudes stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods """ angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size()))) angles = angles.astype(np.float32) angles = torch.autograd.Variable(torch.from_numpy(angles)) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) for i in range(n_iters): _, angles = stft_fn.transform(signal) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) return signal def dynamic_range_compression(x, C=1, clip_val=1e-5): """ PARAMS ------ C: compression factor """ return torch.log(torch.clamp(x, min=clip_val) * C) def dynamic_range_decompression(x, C=1): """ PARAMS ------ C: compression factor used to compress """ return torch.exp(x) / C
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/audio_processing.py
""" from https://github.com/keithito/tacotron """ import inflect import re _inflect = inflect.engine() _comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') _decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') _pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') _dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') _ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') _number_re = re.compile(r'[0-9]+') def _remove_commas(m): return m.group(1).replace(',', '') def _expand_decimal_point(m): return m.group(1).replace('.', ' point ') def _expand_dollars(m): match = m.group(1) parts = match.split('.') if len(parts) > 2: return match + ' dollars' # Unexpected format dollars = int(parts[0]) if parts[0] else 0 cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 if dollars and cents: dollar_unit = 'dollar' if dollars == 1 else 'dollars' cent_unit = 'cent' if cents == 1 else 'cents' return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) elif dollars: dollar_unit = 'dollar' if dollars == 1 else 'dollars' return '%s %s' % (dollars, dollar_unit) elif cents: cent_unit = 'cent' if cents == 1 else 'cents' return '%s %s' % (cents, cent_unit) else: return 'zero dollars' def _expand_ordinal(m): return _inflect.number_to_words(m.group(0)) def _expand_number(m): num = int(m.group(0)) if num > 1000 and num < 3000: if num == 2000: return 'two thousand' elif num > 2000 and num < 2010: return 'two thousand ' + _inflect.number_to_words(num % 100) elif num % 100 == 0: return _inflect.number_to_words(num // 100) + ' hundred' else: return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') else: return _inflect.number_to_words(num, andword='') def normalize_numbers(text): text = re.sub(_comma_number_re, _remove_commas, text) text = re.sub(_pounds_re, r'\1 pounds', text) text = re.sub(_dollars_re, _expand_dollars, text) text = re.sub(_decimal_number_re, _expand_decimal_point, text) text = re.sub(_ordinal_re, _expand_ordinal, text) text = re.sub(_number_re, _expand_number, text) return text
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/numbers.py
from math import sqrt import torch import torch.distributions as distr from torch.autograd import Variable from torch import nn from torch.nn import functional as F from .layers import ConvNorm, LinearNorm, GlobalAvgPool from .utils import to_gpu, get_mask_from_lengths class LocationLayer(nn.Module): def __init__(self, attention_n_filters, attention_kernel_size, attention_dim): super(LocationLayer, self).__init__() padding = int((attention_kernel_size - 1) / 2) self.location_conv = ConvNorm(2, attention_n_filters, kernel_size=attention_kernel_size, padding=padding, bias=False, stride=1, dilation=1) self.location_dense = LinearNorm(attention_n_filters, attention_dim, bias=False, w_init_gain='tanh') def forward(self, attention_weights_cat): processed_attention = self.location_conv(attention_weights_cat) processed_attention = processed_attention.transpose(1, 2) processed_attention = self.location_dense(processed_attention) return processed_attention class Attention(nn.Module): def __init__(self, attention_rnn_dim, embedding_dim, attention_dim, attention_location_n_filters, attention_location_kernel_size): super(Attention, self).__init__() self.query_layer = LinearNorm(attention_rnn_dim, attention_dim, bias=False, w_init_gain='tanh') self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False, w_init_gain='tanh') self.v = LinearNorm(attention_dim, 1, bias=False) self.location_layer = LocationLayer(attention_location_n_filters, attention_location_kernel_size, attention_dim) self.score_mask_value = -float("inf") def get_alignment_energies(self, query, processed_memory, attention_weights_cat): """ PARAMS ------ query: decoder output (batch, n_mel_channels * n_frames_per_step) processed_memory: processed encoder outputs (B, T_in, attention_dim) attention_weights_cat: cumulative and prev. att weights (B, 2, max_time) RETURNS ------- alignment (batch, max_time) """ processed_query = self.query_layer(query.unsqueeze(1)) processed_attention_weights = self.location_layer(attention_weights_cat) energies = self.v(torch.tanh( processed_query + processed_attention_weights + processed_memory)) energies = energies.squeeze(-1) return energies def forward(self, attention_hidden_state, memory, processed_memory, attention_weights_cat, mask): """ PARAMS ------ attention_hidden_state: attention rnn last output memory: encoder outputs processed_memory: processed encoder outputs attention_weights_cat: previous and cummulative attention weights mask: binary mask for padded data """ alignment = self.get_alignment_energies( attention_hidden_state, processed_memory, attention_weights_cat) if mask is not None: alignment.data.masked_fill_(mask, self.score_mask_value) attention_weights = F.softmax(alignment, dim=1) attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) attention_context = attention_context.squeeze(1) return attention_context, attention_weights class Prenet(nn.Module): def __init__(self, in_dim, sizes): super(Prenet, self).__init__() in_sizes = [in_dim] + sizes[:-1] self.layers = nn.ModuleList( [LinearNorm(in_size, out_size, bias=False) for (in_size, out_size) in zip(in_sizes, sizes)]) def forward(self, x): for linear in self.layers: x = F.dropout(F.relu(linear(x)), p=0.5, training=True) return x class Postnet(nn.Module): """Postnet - Five 1-d convolution with 512 channels and kernel size 5 """ def __init__(self, hparams): super(Postnet, self).__init__() self.convolutions = nn.ModuleList() self.convolutions.append( nn.Sequential( ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.postnet_embedding_dim)) ) for i in range(1, hparams.postnet_n_convolutions - 1): self.convolutions.append( nn.Sequential( ConvNorm(hparams.postnet_embedding_dim, hparams.postnet_embedding_dim, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.postnet_embedding_dim)) ) self.convolutions.append( nn.Sequential( ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='linear'), nn.BatchNorm1d(hparams.n_mel_channels)) ) def forward(self, x): for i in range(len(self.convolutions) - 1): x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training) x = F.dropout(self.convolutions[-1](x), 0.5, self.training) return x class Encoder(nn.Module): """Encoder module: - Three 1-d convolution banks - Bidirectional LSTM """ def __init__(self, hparams): super(Encoder, self).__init__() convolutions = [] for _ in range(hparams.encoder_n_convolutions): conv_layer = nn.Sequential( ConvNorm(hparams.encoder_embedding_dim, hparams.encoder_embedding_dim, kernel_size=hparams.encoder_kernel_size, stride=1, padding=int((hparams.encoder_kernel_size - 1) / 2), dilation=1, w_init_gain='relu'), nn.BatchNorm1d(hparams.encoder_embedding_dim)) convolutions.append(conv_layer) self.convolutions = nn.ModuleList(convolutions) self.lstm = nn.LSTM(hparams.encoder_embedding_dim, int(hparams.encoder_embedding_dim / 2), 1, batch_first=True, bidirectional=True) def forward(self, x, input_lengths): for conv in self.convolutions: x = F.dropout(F.relu(conv(x)), 0.5, self.training) x = x.transpose(1, 2) # pytorch tensor are not reversible, hence the conversion input_lengths = input_lengths.cpu().numpy() x = nn.utils.rnn.pack_padded_sequence( x, input_lengths, batch_first=True) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) outputs, _ = nn.utils.rnn.pad_packed_sequence( outputs, batch_first=True) return outputs def inference(self, x): for conv in self.convolutions: x = F.dropout(F.relu(conv(x)), 0.5, self.training) x = x.transpose(1, 2) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) return outputs class AudioEncoder(nn.Module): def __init__(self, hparams): super(AudioEncoder, self).__init__() assert hparams.lat_dim > 0 convolutions = [] inp_dim = hparams.n_mel_channels for _ in range(hparams.lat_n_convolutions): conv_layer = nn.Sequential( ConvNorm(inp_dim, hparams.lat_n_filters, kernel_size=hparams.lat_kernel_size, stride=1, padding=int((hparams.lat_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.lat_n_filters)) inp_dim = hparams.lat_n_filters convolutions.append(conv_layer) self.convolutions = nn.ModuleList(convolutions) self.lstm = nn.LSTM(hparams.lat_n_filters, int(hparams.lat_n_filters / 2), hparams.lat_n_blstms, batch_first=True, bidirectional=True) self.pool = GlobalAvgPool() self.mu_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim) self.logvar_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim) self.lat_dim = hparams.lat_dim def forward(self, x, lengths): """ Args: x (torch.Tensor): (B, F, T) """ for conv in self.convolutions: x = F.dropout(F.tanh(conv(x)), 0.5, self.training) x = x.transpose(1, 2) # (B, T, D) # x may not be sorted by length. Sort->process->unsort max_len = x.size(1) assert max_len == torch.max(lengths).item() lengths, perm_idx = lengths.sort(0, descending=True) x = x[perm_idx] x = nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True) _, unperm_idx = perm_idx.sort(0) outputs = outputs[unperm_idx] # (B, T, D) lengths = lengths[unperm_idx] # (B, T, D) outputs = self.pool(outputs, lengths) # (B, D) mu = self.mu_proj(outputs) logvar = self.logvar_proj(outputs) z = distr.Normal(mu, logvar).rsample() return z, mu, logvar class Decoder(nn.Module): def __init__(self, hparams): super(Decoder, self).__init__() self.n_mel_channels = hparams.n_mel_channels self.n_frames_per_step = hparams.n_frames_per_step self.encoder_embedding_dim = hparams.encoder_embedding_dim self.obs_dim = hparams.obs_dim self.lat_dim = hparams.lat_dim self.attention_rnn_dim = hparams.attention_rnn_dim self.decoder_rnn_dim = hparams.decoder_rnn_dim self.prenet_dim = hparams.prenet_dim self.max_decoder_steps = hparams.max_decoder_steps self.gate_threshold = hparams.gate_threshold self.p_attention_dropout = hparams.p_attention_dropout self.p_decoder_dropout = hparams.p_decoder_dropout self.prenet = Prenet( hparams.n_mel_channels * hparams.n_frames_per_step, [hparams.prenet_dim, hparams.prenet_dim]) self.attention_rnn = nn.LSTMCell( hparams.prenet_dim + hparams.encoder_embedding_dim, hparams.attention_rnn_dim) self.attention_layer = Attention( hparams.attention_rnn_dim, hparams.encoder_embedding_dim, hparams.attention_dim, hparams.attention_location_n_filters, hparams.attention_location_kernel_size) encoder_tot_dim = (hparams.encoder_embedding_dim + \ hparams.lat_dim + hparams.obs_dim) self.decoder_rnn = nn.LSTMCell( hparams.attention_rnn_dim + encoder_tot_dim, hparams.decoder_rnn_dim, 1) self.linear_projection = LinearNorm( hparams.decoder_rnn_dim + encoder_tot_dim, hparams.n_mel_channels * hparams.n_frames_per_step) self.gate_layer = LinearNorm( hparams.decoder_rnn_dim + encoder_tot_dim, 1, bias=True, w_init_gain='sigmoid') def get_go_frame(self, memory): """ Gets all zeros frames to use as first decoder input PARAMS ------ memory: decoder outputs RETURNS ------- decoder_input: all zeros frames """ B = memory.size(0) decoder_input = Variable(memory.data.new( B, self.n_mel_channels * self.n_frames_per_step).zero_()) return decoder_input def initialize_decoder_states(self, memory, obs_and_lat, mask): """ Initializes attention rnn states, decoder rnn states, attention weights, attention cumulative weights, attention context, stores memory and stores processed memory PARAMS ------ memory: Encoder outputs obs_and_lat: Observed and latent attribute embeddings mask: Mask for padded data if training, expects None for inference """ B = memory.size(0) MAX_TIME = memory.size(1) self.attention_hidden = Variable(memory.data.new( B, self.attention_rnn_dim).zero_()) self.attention_cell = Variable(memory.data.new( B, self.attention_rnn_dim).zero_()) self.decoder_hidden = Variable(memory.data.new( B, self.decoder_rnn_dim).zero_()) self.decoder_cell = Variable(memory.data.new( B, self.decoder_rnn_dim).zero_()) self.attention_weights = Variable(memory.data.new( B, MAX_TIME).zero_()) self.attention_weights_cum = Variable(memory.data.new( B, MAX_TIME).zero_()) self.attention_context = Variable(memory.data.new( B, self.encoder_embedding_dim).zero_()) self.memory = memory self.processed_memory = self.attention_layer.memory_layer(memory) self.obs_and_lat = obs_and_lat self.mask = mask def parse_decoder_inputs(self, decoder_inputs): """ Prepares decoder inputs, i.e. mel outputs PARAMS ------ decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs RETURNS ------- inputs: processed decoder inputs """ # (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels) decoder_inputs = decoder_inputs.transpose(1, 2) decoder_inputs = decoder_inputs.view( decoder_inputs.size(0), int(decoder_inputs.size(1)/self.n_frames_per_step), -1) # (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels) decoder_inputs = decoder_inputs.transpose(0, 1) return decoder_inputs def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments): """ Prepares decoder outputs for output PARAMS ------ mel_outputs: gate_outputs: gate output energies alignments: RETURNS ------- mel_outputs: gate_outpust: gate output energies alignments: """ # (T_out, B) -> (B, T_out) alignments = torch.stack(alignments).transpose(0, 1) # (T_out, B) -> (B, T_out) gate_outputs = torch.stack(gate_outputs).transpose(0, 1) gate_outputs = gate_outputs.contiguous() # (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels) mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous() # decouple frames per step mel_outputs = mel_outputs.view( mel_outputs.size(0), -1, self.n_mel_channels) # (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out) mel_outputs = mel_outputs.transpose(1, 2) return mel_outputs, gate_outputs, alignments def decode(self, decoder_input): """ Decoder step using stored states, attention and memory PARAMS ------ decoder_input: previous mel output RETURNS ------- mel_output: gate_output: gate output energies attention_weights: """ cell_input = torch.cat((decoder_input, self.attention_context), -1) self.attention_hidden, self.attention_cell = self.attention_rnn( cell_input, (self.attention_hidden, self.attention_cell)) self.attention_hidden = F.dropout( self.attention_hidden, self.p_attention_dropout, self.training) attention_weights_cat = torch.cat( (self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1) self.attention_context, self.attention_weights = self.attention_layer( self.attention_hidden, self.memory, self.processed_memory, attention_weights_cat, self.mask) self.attention_weights_cum += self.attention_weights decoder_input = torch.cat( (self.attention_hidden, self.attention_context), -1) if self.obs_and_lat is not None: decoder_input = torch.cat((decoder_input, self.obs_and_lat), -1) self.decoder_hidden, self.decoder_cell = self.decoder_rnn( decoder_input, (self.decoder_hidden, self.decoder_cell)) self.decoder_hidden = F.dropout( self.decoder_hidden, self.p_decoder_dropout, self.training) decoder_hidden_attention_context = torch.cat( (self.decoder_hidden, self.attention_context), dim=1) if self.obs_and_lat is not None: decoder_hidden_attention_context = torch.cat( (decoder_hidden_attention_context, self.obs_and_lat), dim=1) decoder_output = self.linear_projection( decoder_hidden_attention_context) gate_prediction = self.gate_layer(decoder_hidden_attention_context) return decoder_output, gate_prediction, self.attention_weights def forward(self, memory, obs_and_lat, decoder_inputs, memory_lengths): """ Decoder forward pass for training PARAMS ------ memory: Encoder outputs obs_and_lat: Observed and latent attribute embeddings decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs memory_lengths: Encoder output lengths for attention masking. RETURNS ------- mel_outputs: mel outputs from the decoder gate_outputs: gate outputs from the decoder alignments: sequence of attention weights from the decoder """ decoder_input = self.get_go_frame(memory).unsqueeze(0) decoder_inputs = self.parse_decoder_inputs(decoder_inputs) decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0) decoder_inputs = self.prenet(decoder_inputs) self.initialize_decoder_states( memory, obs_and_lat, mask=~get_mask_from_lengths(memory_lengths)) mel_outputs, gate_outputs, alignments = [], [], [] while len(mel_outputs) < decoder_inputs.size(0) - 1: decoder_input = decoder_inputs[len(mel_outputs)] mel_output, gate_output, attention_weights = self.decode( decoder_input) mel_outputs += [mel_output.squeeze(1)] gate_outputs += [gate_output.squeeze()] alignments += [attention_weights] mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( mel_outputs, gate_outputs, alignments) return mel_outputs, gate_outputs, alignments def inference(self, memory, obs_and_lat, ret_has_eos=False): """ Decoder inference PARAMS ------ memory: Encoder outputs obs_and_lat: Observed and latent attribute embeddings RETURNS ------- mel_outputs: mel outputs from the decoder gate_outputs: gate outputs from the decoder alignments: sequence of attention weights from the decoder """ decoder_input = self.get_go_frame(memory) self.initialize_decoder_states(memory, obs_and_lat, mask=None) mel_outputs, gate_outputs, alignments = [], [], [] has_eos = False while True: decoder_input = self.prenet(decoder_input) mel_output, gate_output, alignment = self.decode(decoder_input) mel_outputs += [mel_output.squeeze(1)] gate_outputs += [gate_output] alignments += [alignment] if torch.sigmoid(gate_output.data) > self.gate_threshold: has_eos = True break elif len(mel_outputs) == self.max_decoder_steps: # print("Warning! Reached max decoder steps") break decoder_input = mel_output mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( mel_outputs, gate_outputs, alignments) if ret_has_eos: return mel_outputs, gate_outputs, alignments, has_eos else: return mel_outputs, gate_outputs, alignments class Tacotron2(nn.Module): def __init__(self, hparams): super(Tacotron2, self).__init__() self.mask_padding = hparams.mask_padding self.fp16_run = hparams.fp16_run self.n_mel_channels = hparams.n_mel_channels self.n_frames_per_step = hparams.n_frames_per_step # initialize text encoder embedding self.embedding = nn.Embedding( hparams.n_symbols, hparams.symbols_embedding_dim) std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim)) val = sqrt(3.0) * std # uniform bounds for std self.embedding.weight.data.uniform_(-val, val) # initialize observed attribute embedding self.obs_embedding = None if hparams.obs_dim > 0: self.obs_embedding = nn.Embedding( hparams.obs_n_class, hparams.obs_dim) std = sqrt(2.0 / (hparams.obs_n_class + hparams.obs_dim)) val = sqrt(3.0) * std # uniform bounds for std self.obs_embedding.weight.data.uniform_(-val, val) self.encoder = Encoder(hparams) self.decoder = Decoder(hparams) self.postnet = Postnet(hparams) self.lat_encoder = None if hparams.lat_dim > 0: self.lat_encoder = AudioEncoder(hparams) def parse_batch(self, batch): (text_padded, input_lengths, obs_labels, mel_padded, gate_padded, output_lengths) = batch text_padded = to_gpu(text_padded).long() input_lengths = to_gpu(input_lengths).long() obs_labels = to_gpu(obs_labels).long() max_len = torch.max(input_lengths.data).item() mel_padded = to_gpu(mel_padded).float() gate_padded = to_gpu(gate_padded).float() output_lengths = to_gpu(output_lengths).long() return ( (text_padded, input_lengths, obs_labels, mel_padded, max_len, output_lengths), (mel_padded, gate_padded)) def parse_output(self, outputs, output_lengths=None): if self.mask_padding and output_lengths is not None: mask = ~get_mask_from_lengths(output_lengths) mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1)) mask = mask.permute(1, 0, 2) outputs[0].data.masked_fill_(mask, 0.0) outputs[1].data.masked_fill_(mask, 0.0) outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies return outputs def forward(self, inputs): (text_inputs, text_lengths, obs_labels, mels, max_len, output_lengths) = inputs text_lengths, output_lengths = text_lengths.data, output_lengths.data embedded_inputs = self.embedding(text_inputs).transpose(1, 2) encoder_outputs = self.encoder(embedded_inputs, text_lengths) obs = None if self.obs_embedding is not None: obs = self.obs_embedding(obs_labels) lat, lat_mu, lat_logvar = None, None, None if self.lat_encoder is not None: (lat, lat_mu, lat_logvar) = self.lat_encoder(mels, output_lengths) obs_and_lat = [x for x in [obs, lat] if x is not None] if bool(obs_and_lat): obs_and_lat = torch.cat(obs_and_lat, dim=-1) else: obs_and_lat = None mel_outputs, gate_outputs, alignments = self.decoder( encoder_outputs, obs_and_lat, mels, memory_lengths=text_lengths) mel_outputs_postnet = self.postnet(mel_outputs) mel_outputs_postnet = mel_outputs + mel_outputs_postnet return self.parse_output( [mel_outputs, mel_outputs_postnet, gate_outputs, alignments, lat_mu, lat_logvar], output_lengths) def inference(self, inputs, obs_labels=None, lat=None, ret_has_eos=False): embedded_inputs = self.embedding(inputs).transpose(1, 2) encoder_outputs = self.encoder.inference(embedded_inputs) if obs_labels is None: obs_labels = torch.LongTensor(len(inputs)) obs_labels = obs_labels.to(inputs.device).zero_() obs = None if self.obs_embedding is not None: obs = self.obs_embedding(obs_labels) if self.lat_encoder is not None: if lat is None: lat = torch.FloatTensor(len(inputs), self.lat_encoder.lat_dim) lat = lat.to(inputs.device).zero_().type(encoder_outputs.type()) obs_and_lat = [x for x in [obs, lat] if x is not None] if bool(obs_and_lat): obs_and_lat = torch.cat(obs_and_lat, dim=-1) else: obs_and_lat = None mel_outputs, gate_outputs, alignments, has_eos = self.decoder.inference( encoder_outputs, obs_and_lat, ret_has_eos=True) mel_outputs_postnet = self.postnet(mel_outputs) mel_outputs_postnet = mel_outputs + mel_outputs_postnet outputs = self.parse_output( [mel_outputs, mel_outputs_postnet, gate_outputs, alignments]) if ret_has_eos: return outputs + [has_eos] else: return outputs
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/model.py
""" BSD 3-Clause License Copyright (c) 2017, Prem Seetharaman All rights reserved. * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import torch import numpy as np import torch.nn.functional as F from torch.autograd import Variable from scipy.signal import get_window from librosa.util import pad_center, tiny from .audio_processing import window_sumsquare class STFT(torch.nn.Module): """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'): super(STFT, self).__init__() self.filter_length = filter_length self.hop_length = hop_length self.win_length = win_length self.window = window self.forward_transform = None scale = self.filter_length / self.hop_length fourier_basis = np.fft.fft(np.eye(self.filter_length)) cutoff = int((self.filter_length / 2 + 1)) fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]) forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) inverse_basis = torch.FloatTensor( np.linalg.pinv(scale * fourier_basis).T[:, None, :]) if window is not None: assert(filter_length >= win_length) # get window and zero center pad it to filter_length fft_window = get_window(window, win_length, fftbins=True) fft_window = pad_center(fft_window, filter_length) fft_window = torch.from_numpy(fft_window).float() # window the bases forward_basis *= fft_window inverse_basis *= fft_window self.register_buffer('forward_basis', forward_basis.float()) self.register_buffer('inverse_basis', inverse_basis.float()) def transform(self, input_data): num_batches = input_data.size(0) num_samples = input_data.size(1) self.num_samples = num_samples # similar to librosa, reflect-pad the input input_data = input_data.view(num_batches, 1, num_samples) input_data = F.pad( input_data.unsqueeze(1), (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0), mode='reflect') input_data = input_data.squeeze(1) forward_transform = F.conv1d( input_data, Variable(self.forward_basis, requires_grad=False), stride=self.hop_length, padding=0) cutoff = int((self.filter_length / 2) + 1) real_part = forward_transform[:, :cutoff, :] imag_part = forward_transform[:, cutoff:, :] magnitude = torch.sqrt(real_part**2 + imag_part**2) phase = torch.autograd.Variable( torch.atan2(imag_part.data, real_part.data)) return magnitude, phase def inverse(self, magnitude, phase): recombine_magnitude_phase = torch.cat( [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1) inverse_transform = F.conv_transpose1d( recombine_magnitude_phase, Variable(self.inverse_basis, requires_grad=False), stride=self.hop_length, padding=0) if self.window is not None: window_sum = window_sumsquare( self.window, magnitude.size(-1), hop_length=self.hop_length, win_length=self.win_length, n_fft=self.filter_length, dtype=np.float32) # remove modulation effects approx_nonzero_indices = torch.from_numpy( np.where(window_sum > tiny(window_sum))[0]) window_sum = torch.autograd.Variable( torch.from_numpy(window_sum), requires_grad=False) window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices] # scale by hop ratio inverse_transform *= float(self.filter_length) / self.hop_length inverse_transform = inverse_transform[:, :, int(self.filter_length/2):] inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):] return inverse_transform def forward(self, input_data): self.magnitude, self.phase = self.transform(input_data) reconstruction = self.inverse(self.magnitude, self.phase) return reconstruction
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/stft.py
""" from https://github.com/keithito/tacotron """ ''' Defines the set of symbols used in text input to the model. The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. ''' from . import cmudict _pad = '_' _punctuation = '!\'(),.:;? ' _special = '-' _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' # Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): _arpabet = ['@' + s for s in cmudict.valid_symbols] # Export all symbols: symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import io import json import librosa import numpy as np import soundfile as sf import time import torch from .text import SOS_TOK, EOS_TOK def get_mask_from_lengths(lengths): max_len = torch.max(lengths).item() ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len)) mask = (ids < lengths.unsqueeze(1)) return mask def load_wav_to_torch(full_path, sr=None): data, sr = librosa.load(full_path, sr=sr) data = np.clip(data, -1, 1) # potentially out of [-1, 1] due to resampling data = data * 32768.0 # match values loaded by scipy return torch.FloatTensor(data.astype(np.float32)), sr def read_binary_audio(bin_data, tar_sr=None): """ read binary audio (`bytes` or `uint8` `numpy.ndarray`) to `float32` `numpy.ndarray` RETURNS: data (np.ndarray) : audio of shape (n,) or (2, n) tar_sr (int) : sample rate """ data, ori_sr = sf.read(io.BytesIO(bin_data), dtype='float32') data = data.T if (tar_sr is not None) and (ori_sr != tar_sr): data = librosa.resample(data, ori_sr, tar_sr) else: tar_sr = ori_sr data = np.clip(data, -1, 1) data = data * 32768.0 return torch.FloatTensor(data.astype(np.float32)), tar_sr def load_filepaths_and_text(filename): with open(filename, encoding='utf-8') as f: data = [json.loads(line.rstrip()) for line in f] return data def to_gpu(x): x = x.contiguous() if torch.cuda.is_available(): x = x.cuda(non_blocking=True) return torch.autograd.Variable(x) def load_code_dict(path, add_sos=False, add_eos=False): if not path: return {} with open(path, 'r') as f: codes = ['_'] + [line.rstrip() for line in f] # '_' for pad code_dict = {c: i for i, c in enumerate(codes)} if add_sos: code_dict[SOS_TOK] = len(code_dict) if add_eos: code_dict[EOS_TOK] = len(code_dict) assert(set(code_dict.values()) == set(range(len(code_dict)))) return code_dict def load_obs_label_dict(path): if not path: return {} with open(path, 'r') as f: obs_labels = [line.rstrip() for line in f] return {c: i for i, c in enumerate(obs_labels)} # A simple timer class inspired from `tnt.TimeMeter` class CudaTimer: def __init__(self, keys): self.keys = keys self.reset() def start(self, key): s = torch.cuda.Event(enable_timing=True) s.record() self.start_events[key].append(s) return self def stop(self, key): e = torch.cuda.Event(enable_timing=True) e.record() self.end_events[key].append(e) return self def reset(self): self.start_events = collections.defaultdict(list) self.end_events = collections.defaultdict(list) self.running_times = collections.defaultdict(float) self.n = collections.defaultdict(int) return self def value(self): self._synchronize() return {k: self.running_times[k] / self.n[k] for k in self.keys} def _synchronize(self): torch.cuda.synchronize() for k in self.keys: starts = self.start_events[k] ends = self.end_events[k] if len(starts) == 0: raise ValueError("Trying to divide by zero in TimeMeter") if len(ends) != len(starts): raise ValueError("Call stop before checking value!") time = 0 for start, end in zip(starts, ends): time += start.elapsed_time(end) self.running_times[k] += time * 1e-3 self.n[k] += len(starts) self.start_events = collections.defaultdict(list) self.end_events = collections.defaultdict(list) # Used to measure the time taken for multiple events class Timer: def __init__(self, keys): self.keys = keys self.n = {} self.running_time = {} self.total_time = {} self.reset() def start(self, key): self.running_time[key] = time.time() return self def stop(self, key): self.total_time[key] = time.time() - self.running_time[key] self.n[key] += 1 self.running_time[key] = None return self def reset(self): for k in self.keys: self.total_time[k] = 0 self.running_time[k] = None self.n[k] = 0 return self def value(self): vals = {} for k in self.keys: if self.n[k] == 0: raise ValueError("Trying to divide by zero in TimeMeter") else: vals[k] = self.total_time[k] / self.n[k] return vals
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/utils.py
""" from https://github.com/keithito/tacotron """ import numpy as np import re from . import cleaners from .symbols import symbols # Mappings from symbol to numeric ID and vice versa: _symbol_to_id = {s: i for i, s in enumerate(symbols)} _id_to_symbol = {i: s for i, s in enumerate(symbols)} # Regular expression matching text enclosed in curly braces: _curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)') # Special symbols SOS_TOK = '<s>' EOS_TOK = '</s>' def text_to_sequence(text, cleaner_names): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) return sequence def sample_code_chunk(code, size): assert(size > 0 and size <= len(code)) start = np.random.randint(len(code) - size + 1) end = start + size return code[start:end], start, end def code_to_sequence(code, code_dict, collapse_code): if collapse_code: prev_c = None sequence = [] for c in code: if c in code_dict and c != prev_c: sequence.append(code_dict[c]) prev_c = c else: sequence = [code_dict[c] for c in code if c in code_dict] if len(sequence) < 0.95 * len(code): print('WARNING : over 5%% codes are OOV') return sequence def sequence_to_text(sequence): '''Converts a sequence of IDs back to a string''' result = '' for symbol_id in sequence: if symbol_id in _id_to_symbol: s = _id_to_symbol[symbol_id] # Enclose ARPAbet back in curly braces: if len(s) > 1 and s[0] == '@': s = '{%s}' % s[1:] result += s return result.replace('}{', ' ') def sequence_to_code(sequence, code_dict): '''Analogous to sequence_to_text''' id_to_code = {i: c for c, i in code_dict.items()} return ' '.join([id_to_code[i] for i in sequence]) def _clean_text(text, cleaner_names): for name in cleaner_names: cleaner = getattr(cleaners, name) if not cleaner: raise Exception('Unknown cleaner: %s' % name) text = cleaner(text) return text def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)] def _arpabet_to_sequence(text): return _symbols_to_sequence(['@' + s for s in text.split()]) def _should_keep_symbol(s): return s in _symbol_to_id and s != '_' and s != '~'
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/text.py
""" from https://github.com/keithito/tacotron """ ''' Cleaners are transformations that run over the input text at both training and eval time. Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" hyperparameter. Some cleaners are English-specific. You'll typically want to use: 1. "english_cleaners" for English text 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using the Unidecode library (https://pypi.python.org/pypi/Unidecode) 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update the symbols in symbols.py to match your data). ''' import re from unidecode import unidecode from .numbers import normalize_numbers # Regular expression matching whitespace: _whitespace_re = re.compile(r'\s+') # List of (regular expression, replacement) pairs for abbreviations: _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ ('mrs', 'misess'), ('mr', 'mister'), ('dr', 'doctor'), ('st', 'saint'), ('co', 'company'), ('jr', 'junior'), ('maj', 'major'), ('gen', 'general'), ('drs', 'doctors'), ('rev', 'reverend'), ('lt', 'lieutenant'), ('hon', 'honorable'), ('sgt', 'sergeant'), ('capt', 'captain'), ('esq', 'esquire'), ('ltd', 'limited'), ('col', 'colonel'), ('ft', 'fort'), ]] def expand_abbreviations(text): for regex, replacement in _abbreviations: text = re.sub(regex, replacement, text) return text def expand_numbers(text): return normalize_numbers(text) def lowercase(text): return text.lower() def collapse_whitespace(text): return re.sub(_whitespace_re, ' ', text) def convert_to_ascii(text): return unidecode(text) def basic_cleaners(text): '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' text = lowercase(text) text = collapse_whitespace(text) return text def transliteration_cleaners(text): '''Pipeline for non-English text that transliterates to ASCII.''' text = convert_to_ascii(text) text = lowercase(text) text = collapse_whitespace(text) return text def english_cleaners(text): '''Pipeline for English text, including number and abbreviation expansion.''' text = convert_to_ascii(text) text = lowercase(text) text = expand_numbers(text) text = expand_abbreviations(text) text = collapse_whitespace(text) return text
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/cleaners.py
import torch from librosa.filters import mel as librosa_mel_fn from .audio_processing import dynamic_range_compression from .audio_processing import dynamic_range_decompression from .stft import STFT from .utils import get_mask_from_lengths class LinearNorm(torch.nn.Module): def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): super(LinearNorm, self).__init__() self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) torch.nn.init.xavier_uniform_( self.linear_layer.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) def forward(self, x): return self.linear_layer(x) class ConvNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNorm, self).__init__() if padding is None: assert(kernel_size % 2 == 1) padding = int(dilation * (kernel_size - 1) / 2) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_( self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) def forward(self, signal): conv_signal = self.conv(signal) return conv_signal class GlobalAvgPool(torch.nn.Module): def __init__(self): super(GlobalAvgPool, self).__init__() def forward(self, x, lengths=None): """Average pooling across time steps (dim=1) with optionally lengths. Args: x: torch.Tensor of shape (N, T, ...) lengths: None or torch.Tensor of shape (N,) dim: dimension to pool """ if lengths is None: return x.mean(dim=1, keepdim=False) else: mask = get_mask_from_lengths(lengths).type(x.type()).to(x.device) mask_shape = list(mask.size()) + [1 for _ in range(x.ndimension()-2)] mask = mask.reshape(*mask_shape) numer = (x * mask).sum(dim=1, keepdim=False) denom = mask.sum(dim=1, keepdim=False) return numer / denom class TacotronSTFT(torch.nn.Module): def __init__(self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0, mel_fmax=8000.0): super(TacotronSTFT, self).__init__() self.n_mel_channels = n_mel_channels self.sampling_rate = sampling_rate self.stft_fn = STFT(filter_length, hop_length, win_length) mel_basis = librosa_mel_fn( sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax) mel_basis = torch.from_numpy(mel_basis).float() self.register_buffer('mel_basis', mel_basis) def spectral_normalize(self, magnitudes): output = dynamic_range_compression(magnitudes) return output def spectral_de_normalize(self, magnitudes): output = dynamic_range_decompression(magnitudes) return output def mel_spectrogram(self, y): """Computes mel-spectrograms from a batch of waves PARAMS ------ y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1] RETURNS ------- mel_output: torch.FloatTensor of shape (B, n_mel_channels, T) """ assert(torch.min(y.data) >= -1) assert(torch.max(y.data) <= 1) magnitudes, phases = self.stft_fn.transform(y) magnitudes = magnitudes.data mel_output = torch.matmul(self.mel_basis, magnitudes) mel_output = self.spectral_normalize(mel_output) return mel_output
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/layers.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Sample from a trained LM; hacked fairseq-interactive """ from collections import namedtuple import os import ast import numpy as np from fairseq import checkpoint_utils, options, tasks, utils import tqdm Batch = namedtuple('Batch', 'ids src_tokens src_lengths') Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments') def make_batches(lines, args, task, max_positions): tokens = [ task.source_dictionary.encode_line( src_str, add_if_not_exist=False ).long() for src_str in lines ] lengths = [t.numel() for t in tokens] itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference(tokens, lengths), max_tokens=args.dataset.max_tokens, max_sentences=args.dataset.batch_size, max_positions=max_positions, ignore_invalid_inputs=args.dataset.skip_invalid_size_inputs_valid_test ).next_epoch_itr(shuffle=False) for batch in itr: yield Batch( ids=batch['id'], src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'], ) def main(args): arg_prompts = args.prompts arg_output = args.output arg_debug = args.debug arg_sample_size = args.samples_per_prompt try: from fairseq.dataclass.utils import convert_namespace_to_omegaconf args = convert_namespace_to_omegaconf(args) except: pass # if args.max_tokens is None and args.max_sentences is None: if args.common.seed is not None: np.random.seed(args.common.seed) utils.set_torch_seed(args.common.seed) if args.generation.sampling: args.generation.nbest = args.generation.beam = arg_sample_size task = tasks.setup_task(args.task) overrides = ast.literal_eval(args.common_eval.model_overrides) models, _model_args = checkpoint_utils.load_model_ensemble( args.common_eval.path.split(os.pathsep), arg_overrides=overrides, task=task, suffix=getattr(args, "checkpoint_suffix", ""), ) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: model.prepare_for_inference_(args) model.cuda() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.generation.replace_unk) max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) output_file = open(arg_output, 'w') with open(arg_prompts, 'r') as fin: lines = fin.readlines() split = [x.split('|', 1) for x in lines] seq_id = [x[0] for x in split] prompts = [x[1] for x in split] if args.generation.prefix_size >= 0: prompts = [' '.join(l.split()[:args.generation.prefix_size]) for l in prompts] if arg_debug: prompts = prompts[:10] generator = task.build_generator(models, args.generation) start_id = 0 pbar = tqdm.tqdm(total=len(prompts)) for batch in make_batches(prompts, args, task, max_positions): src_tokens = batch.src_tokens src_lengths = batch.src_lengths src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() sample = { 'net_input': { 'src_tokens': src_tokens, 'src_lengths': src_lengths, }, } results = [] translations = task.inference_step(generator, models, sample) for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) results.append((i + start_id, src_tokens_i, hypos)) # sort output to match input order for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]): if src_dict is not None: src_str = src_dict.string( src_tokens, args.common_eval.post_process) # Process top predictions for hypo_id, hypo in enumerate(hypos): _hypo_tokens, hypo_str, _alignment = utils.post_process_prediction( hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.common_eval.post_process, ) detok_hypo_str = hypo_str utterance = detok_hypo_str print(f'{seq_id[id]}__{hypo_id}|{utterance}', file=output_file) pbar.update(1) start_id += len(results) # output_file.close() def cli_main(): parser = options.get_interactive_generation_parser() parser.add_argument('--prompts', type=str, default=None, required=True) parser.add_argument('--output', type=str, default=None, required=True) parser.add_argument('--debug', action='store_true') parser.add_argument('--samples-per-prompt', type=int, default=1) args = options.parse_args_and_arch(parser) np.random.seed(args.seed) utils.set_torch_seed(args.seed) main(args) if __name__ == '__main__': cli_main()
KosmosX-API-main
kosmosX/fairseq/examples/textless_nlp/gslm/ulm/sample.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.tasks import register_task from fairseq.tasks.multilingual_translation import MultilingualTranslationTask from fairseq.utils import safe_hasattr from .loss.latent_depth import LatentLayersKLLoss, LatentLayersSparsityLoss @register_task("multilingual_translation_latent_depth") class MultilingualTranslationTaskLatentDepth(MultilingualTranslationTask): """A task for multiple translation with latent depth. See `"Deep Transformer with Latent Depth" (Li et al., 2020) <https://arxiv.org/pdf/2009.13102.pdf>`_. """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off MultilingualTranslationTask.add_args(parser) parser.add_argument('--encoder-latent-layer', action='store_true', help='latent layer selection in encoder') parser.add_argument('--decoder-latent-layer', action='store_true', help='latent layer selection in decoder') parser.add_argument('--target-layers', default=-1, type=int, help='number of effective layers to learn; -1 means no constraint') parser.add_argument('--sparsity-weight', default=0.0, type=float, help='weight for sparsity loss') parser.add_argument('--share-weight', default=0.0, type=float, help='weight for sharing loss') parser.add_argument('--soft-update', default=1, type=int, help='number of updates with soft sampling') parser.add_argument('--anneal-updates', default=1, type=int, help='number of updates to anneal the KL loss weight') parser.add_argument('--prior', default="uniform", type=str, help='prior used for computing KL loss') # fmt: on def __init__(self, args, dicts, training): super().__init__(args, dicts, training) self.src_langs, self.tgt_langs = zip( *[(lang.split("-")[0], lang.split("-")[1]) for lang in args.lang_pairs] ) if self.training and self.encoder_latent_layer: assert self.args.share_encoders if self.training and self.decoder_latent_layer: assert self.args.share_decoders if training or self.encoder_latent_layer or self.decoder_latent_layer: self.lang_pairs = args.lang_pairs else: self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)] self.eval_lang_pairs = self.lang_pairs self.model_lang_pairs = self.lang_pairs if self.training and (self.encoder_latent_layer or self.decoder_latent_layer): self.kl_loss = LatentLayersKLLoss(self.args) self.sparsity_loss = LatentLayersSparsityLoss(self.args) def _per_lang_pair_train_loss( self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad ): src, tgt = lang_pair.split("-") if self.encoder_latent_layer: src_lang_idx = self.src_lang_idx_dict[src] model.models[lang_pair].encoder.set_lang_idx(src_lang_idx) model.models[lang_pair].encoder.layer_select.hard_select = ( update_num > self.args.soft_update ) if self.decoder_latent_layer: tgt_lang_idx = self.tgt_lang_idx_dict[tgt] model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx) model.models[lang_pair].decoder.layer_select.hard_select = ( update_num > self.args.soft_update ) loss, sample_size, logging_output = criterion( model.models[lang_pair], sample[lang_pair] ) if self.encoder_latent_layer: none_samples = sum( 1 if x is None else 0 for x in model.models[lang_pair].encoder.layer_select.layer_samples ) if none_samples == 0 or self.args.prior != "agged_posterior": loss += self.kl_loss( model.models[lang_pair].encoder.layer_select.layer_samples, src_lang_idx, update_num, sample_size, ) if self.decoder_latent_layer: none_samples = sum( 1 if x is None else 0 for x in model.models[lang_pair].decoder.layer_select.layer_samples ) if none_samples == 0 or self.args.prior != "agged_posterior": loss += self.kl_loss( model.models[lang_pair].decoder.layer_select.layer_samples, tgt_lang_idx, update_num, sample_size, ) if ignore_grad: loss *= 0 if hasattr(self, "sparsity_loss") and self.sparsity_loss.is_valid(update_num): # need to retain the graph if sparsity loss needs to be added loss.backward(retain_graph=True) else: optimizer.backward(loss) return loss, sample_size, logging_output def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): agg_loss, agg_sample_size, agg_logging_output = super().train_step( sample, model, criterion, optimizer, update_num, ignore_grad ) # compute auxiliary loss from layere sparsity, based on all samples from all languages if hasattr(self, "sparsity_loss") and self.sparsity_loss.is_valid(update_num): sparsity_loss = 0 if self.encoder_latent_layer: sparsity_loss += self.sparsity_loss( next( iter(model.models.values()) ).encoder.layer_select.layer_samples, update_num, agg_sample_size, ) if self.decoder_latent_layer: sparsity_loss += self.sparsity_loss( next( iter(model.models.values()) ).decoder.layer_select.layer_samples, update_num, agg_sample_size, ) if sparsity_loss > 0: optimizer.backward(sparsity_loss) return agg_loss, agg_sample_size, agg_logging_output def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample): src, tgt = lang_pair.split("-") if self.encoder_latent_layer: src_lang_idx = self.src_lang_idx_dict[src] model.models[lang_pair].encoder.set_lang_idx(src_lang_idx) if self.decoder_latent_layer: tgt_lang_idx = self.tgt_lang_idx_dict[tgt] model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx) loss, sample_size, logging_output = criterion( model.models[lang_pair], sample[lang_pair] ) return loss, sample_size, logging_output def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): if self.encoder_latent_layer or self.decoder_latent_layer: for model in models: if self.encoder_latent_layer: assert model.encoder.layer_select is not None src_lang_idx = self.src_lang_idx_dict[self.args.source_lang] model.encoder.set_lang_idx(src_lang_idx) if self.decoder_latent_layer: assert model.decoder.layer_select is not None tgt_lang_idx = self.tgt_lang_idx_dict[self.args.target_lang] model.decoder.set_lang_idx(tgt_lang_idx) return super().inference_step( generator, models, sample, prefix_tokens, constraints ) @property def encoder_latent_layer(self): return ( safe_hasattr(self.args, "encoder_latent_layer") and self.args.encoder_latent_layer ) @property def decoder_latent_layer(self): return ( safe_hasattr(self.args, "decoder_latent_layer") and self.args.decoder_latent_layer ) @property def src_lang_idx_dict(self): return {lang: lang_idx for lang_idx, lang in enumerate(self.src_langs)} @property def tgt_lang_idx_dict(self): return {lang: lang_idx for lang_idx, lang in enumerate(self.tgt_langs)}
KosmosX-API-main
kosmosX/fairseq/examples/latent_depth/latent_depth_src/multilingual_translation_latent_depth.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import multilingual_translation_latent_depth # noqa from .loss import latent_depth # noqa from .models import latent_multilingual_transformer # noqa from .modules import latent_layers # noqa
KosmosX-API-main
kosmosX/fairseq/examples/latent_depth/latent_depth_src/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from torch.nn.modules.loss import _Loss class LatentLayersKLLoss(_Loss): def __init__(self, args): super().__init__() self.args = args def forward(self, layer_samples, lang_idx, update_num, sample_size): prior = self.args.prior samples = layer_samples[lang_idx] eps = 1e-7 if prior == "uniform": # uniform prior kl_loss = (samples * (torch.log(samples + eps) - math.log(0.5))).sum(-1) elif prior == "agged_posterior": # aggregated posterior y_t = torch.stack([x.detach() for x in layer_samples], dim=0) agged_q = torch.sum(y_t, dim=0) row_norm = agged_q.sum(-1) normed_agg_q = agged_q / row_norm kl_loss = ( samples * (torch.log(samples + eps) - torch.log(normed_agg_q + eps)) ).sum(-1) else: raise NotImplementedError("The specified prior is not implemented.") # normalized by number of layers kl_loss /= layer_samples[0].size()[0] kl_weight = min( self.args.sparsity_weight, (update_num - self.args.soft_update) * self.args.sparsity_weight / self.args.anneal_updates, ) kl_loss *= kl_weight * sample_size return kl_loss class LatentLayersSparsityLoss(_Loss): def __init__(self, args): super().__init__() self.args = args def is_valid(self, update_num): if self.args.target_layers <= 0: return False return update_num > (self.args.soft_update + self.args.anneal_updates) def forward(self, layer_samples_list, update_num, sample_size): batch_loss = 0 share_loss = 0 global_sparsity_loss = 0 layer_samples = torch.stack(layer_samples_list, dim=0) if ( self.args.target_layers > 0 or self.args.share_weight > 0 ) and update_num > (self.args.soft_update + self.args.anneal_updates): # anneal sparsity weight if update_num < (self.args.anneal_updates + self.args.soft_update): weight_anneal = 0 elif update_num < (2 * self.args.anneal_updates + self.args.soft_update): weight_anneal = ( (update_num - self.args.soft_update - self.args.anneal_updates) * self.args.share_weight / self.args.anneal_updates ) else: weight_anneal = 1 # compute ratio among languages layer_utilization = torch.sum(layer_samples, dim=0) layer_utilization /= layer_samples.size()[0] if self.args.share_weight > 0: # encouraging sharing across languages share_loss = sum( -1.0 * v * math.log(v) for v in layer_utilization if v > 0 ) batch_loss += ( weight_anneal * self.args.share_weight * sample_size * share_loss ) if self.args.target_layers > 0: # computed expected number of layers selected expeted_layers = sum(layer_utilization) # compute l2 loss wrt target number of layers global_sparsity_loss = (expeted_layers - self.args.target_layers) ** 2 batch_loss += ( weight_anneal * self.args.share_weight * sample_size * global_sparsity_loss ) return batch_loss
KosmosX-API-main
kosmosX/fairseq/examples/latent_depth/latent_depth_src/loss/latent_depth.py
KosmosX-API-main
kosmosX/fairseq/examples/latent_depth/latent_depth_src/loss/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import register_model, register_model_architecture from fairseq.models.multilingual_transformer import MultilingualTransformerModel from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, base_architecture, ) from fairseq.utils import safe_hasattr from .latent_transformer import LatentTransformerDecoder, LatentTransformerEncoder @register_model("latent_multilingual_transformer") class LatentMultilingualTransformerModel(MultilingualTransformerModel): """A variant of standard multilingual Transformer models which encoder and/or decoders supports latent depth, as is in "Deep Transformer with Latent Depth" (https://arxiv.org/abs/2009.13102). """ @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" MultilingualTransformerModel.add_args(parser) parser.add_argument( '--soft-select', action='store_true', help='use soft samples in training an inference', ) parser.add_argument( '--sampling-tau', type=float, default=5., help='sampling temperature', ) @classmethod def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs): if is_encoder: if safe_hasattr(args, "encoder_latent_layer") and args.encoder_latent_layer: return LatentTransformerEncoder( args, lang_dict, embed_tokens, num_logits=len(langs) ) else: return TransformerEncoder(args, lang_dict, embed_tokens) else: if safe_hasattr(args, "decoder_latent_layer") and args.decoder_latent_layer: return LatentTransformerDecoder( args, lang_dict, embed_tokens, num_logits=len(langs) ) else: return TransformerDecoder(args, lang_dict, embed_tokens) @register_model_architecture( "latent_multilingual_transformer", "latent_multilingual_transformer" ) def latent_multilingual_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 12) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 24) args.share_encoders = getattr(args, "share_encoders", True) args.share_decoders = getattr(args, "share_decoders", True) args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", True) args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", True) base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict, Optional import torch.nn as nn from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.transformer import TransformerDecoder, TransformerEncoder from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer from torch import Tensor from ..modules.latent_layers import LayerSelect class LatentTransformerEncoder(TransformerEncoder): """Latent depth (https://arxiv.org/abs/2009.13102) implemented in TransformerEncoder. """ def __init__(self, args, dictionary, embed_tokens, num_logits=1): self.num_logits = num_logits self.num_layers = args.encoder_layers super().__init__(args, dictionary, embed_tokens) self.layer_select = LayerSelect( num_layers=self.num_layers, num_logits=self.num_logits, soft_select=getattr(args, "soft_select", False), sampling_tau=getattr(args, "sampling_tau", 5.), ) self.lang_idx = None self.layers = nn.ModuleList( [self._build_encoder_layer(args, idx) for idx in range(args.encoder_layers)] ) def set_lang_idx(self, lang_idx): self.lang_idx = lang_idx def _build_encoder_layer(self, args, idx=None): return LatentTransformerEncoderLayer(args, idx, layer_select=self.layer_select) def forward(self, src_tokens, src_lengths, return_all_hiddens: bool = False): self.layer_select.sample(self.lang_idx) return super().forward(src_tokens, src_lengths, return_all_hiddens) class LatentTransformerEncoderLayer(TransformerEncoderLayer): """Encoder layer with each (non_residual) block weighted by samples of Bernouli or Gumbel Signmoid samples. Args: args (argparse.Namespace): parsed command-line arguments from standard TransformerEncoderLayer. idx (int): layer index (used to retrieve samples). layer_select (LayerSelect, optional): instance of LayerSelect module with logits parameters and sampling method. """ def __init__(self, args, idx, layer_select=None): super().__init__(args) self.idx = idx self.layer_select = layer_select def residual_connection(self, x, residual): return residual + x * self.layer_select(self.idx) class LatentTransformerDecoder(TransformerDecoder): """Latent depth (https://arxiv.org/abs/2009.13102) implemented in TransformerDecoder. """ def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, num_logits=1 ): self.num_logits = num_logits self.num_layers = args.decoder_layers super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.layer_select = LayerSelect( num_layers=self.num_layers, num_logits=self.num_logits, soft_select=getattr(args, "soft_select", False), sampling_tau=getattr(args, "sampling_tau", 5.), ) self.lang_idx = None self.layers = nn.ModuleList( [ self._build_decoder_layer(args, no_encoder_attn, idx) for idx in range(args.decoder_layers) ] ) def set_lang_idx(self, lang_idx): self.lang_idx = lang_idx def _build_decoder_layer(self, args, no_encoder_attn=False, idx=None): return LatentTransformerDecoderLayer( args, idx, layer_select=self.layer_select, no_encoder_attn=no_encoder_attn ) def forward( self, prev_output_tokens, encoder_out: Optional[EncoderOut] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, ): self.layer_select.sample(self.lang_idx) return super().forward( prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, features_only=features_only, alignment_layer=alignment_layer, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) class LatentTransformerDecoderLayer(TransformerDecoderLayer): """Decoder layer with each (non_residual) block weighted by samples of Bernouli or Gumbel Signmoid samples. Args: args (argparse.Namespace): parsed command-line arguments from standard TransformerDecoderLayer. idx (int): layer index (used to retrieve samples). layer_select (LayerSelect, optional): instance of LayerSelect module with logits parameters and sampling method. no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, args, idx, layer_select=None, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, ): super().__init__(args, no_encoder_attn, add_bias_kv, add_zero_attn) self.idx = idx self.layer_select = layer_select def residual_connection(self, x, residual): return residual + x * self.layer_select(self.idx)
KosmosX-API-main
kosmosX/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py
KosmosX-API-main
kosmosX/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py
KosmosX-API-main
kosmosX/fairseq/examples/latent_depth/latent_depth_src/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn class LayerSelect(nn.Module): """Compute samples (from a Gumbel-Sigmoid distribution) which is used as either (soft) weighting or (hard) selection of residual connection. https://arxiv.org/abs/2009.13102 """ def __init__(self, num_layers, num_logits, soft_select=False, sampling_tau=5.): super(LayerSelect, self).__init__() self.layer_logits = torch.nn.Parameter( torch.Tensor(num_logits, num_layers), requires_grad=True, ) self.hard_select = not soft_select self.tau = sampling_tau self.detach_grad = False self.layer_samples = [None] * num_logits def sample(self, logit_idx): """To leverage the efficiency of distributed training, samples for all layers are computed at once for each logit_idx. Logits are parameters learnt independent of each other. Args: logit_idx: The index of logit parameters used for sampling. """ assert logit_idx is not None self.samples = self._gumbel_sigmoid( self.layer_logits[logit_idx, :].detach() if self.detach_grad else self.layer_logits[logit_idx, :], dim=-1, tau=self.tau, hard=self.hard_select, ) self.layer_samples[logit_idx] = self.samples def forward(self, i): sample = self.samples[i] return sample def _gumbel_sigmoid( self, logits, tau=1, hard=False, eps=1e-10, dim=-1, threshold=0.5 ): # ~Gumbel(0,1) gumbels1 = ( -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format) .exponential_() .log() ) gumbels2 = ( -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format) .exponential_() .log() ) # Difference of two gumbels because we apply a sigmoid gumbels1 = (logits + gumbels1 - gumbels2) / tau y_soft = gumbels1.sigmoid() if hard: # Straight through. y_hard = torch.zeros_like( logits, memory_format=torch.legacy_contiguous_format ).masked_fill(y_soft > threshold, 1.0) ret = y_hard - y_soft.detach() + y_soft else: # Reparametrization trick. ret = y_soft return ret
KosmosX-API-main
kosmosX/fairseq/examples/latent_depth/latent_depth_src/modules/latent_layers.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Scoring script for computing pairwise BLEU and multi-ref BLEU over a set of candidate hypotheses. See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_. """ import argparse import random import sys from itertools import chain import numpy as np from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu def main(): parser = argparse.ArgumentParser(sys.argv[0]) parser.add_argument( "--sys", nargs="*", default="", metavar="FILE", help="path to system output" ) parser.add_argument("--ref", default="", metavar="FILE", help="path to references") parser.add_argument( "--output", default="", metavar="FILE", help="print outputs into a pretty format", ) args = parser.parse_args() if args.sys: src, tgt, hypos, log_probs = load_sys(args.sys) print("pairwise BLEU: %.2f" % pairwise(hypos)) if args.output: merge(src, tgt, hypos, log_probs, args.output) if args.ref: _, _, refs = load_ref(args.ref) if args.sys: multi_ref(refs, hypos) else: intra_ref(refs) def dictolist(d): a = sorted(d.items(), key=lambda i: i[0]) return [i[1] for i in a] def load_sys(paths): src, tgt, hypos, log_probs = {}, {}, {}, {} for path in paths: with open(path) as f: for line in f: line = line.rstrip() # S: source # T: target # D: detokenized system output if line.startswith(("S-", "T-", "D-")): i = int(line[line.find("-") + 1 : line.find("\t")]) if line.startswith("S-"): src[i] = line.split("\t")[1] if line.startswith("T-"): tgt[i] = line.split("\t")[1] if line.startswith("D-"): if i not in hypos: hypos[i] = [] log_probs[i] = [] hypos[i].append(line.split("\t")[2]) log_probs[i].append(float(line.split("\t")[1])) return dictolist(src), dictolist(tgt), dictolist(hypos), dictolist(log_probs) def load_ref(path): with open(path) as f: lines = f.readlines() src, tgt, refs = [], [], [] i = 0 while i < len(lines): if lines[i].startswith("S-"): src.append(lines[i].split("\t")[1].rstrip()) i += 1 elif lines[i].startswith("T-"): tgt.append(lines[i].split("\t")[1].rstrip()) i += 1 else: a = [] while i < len(lines) and lines[i].startswith("R"): a.append(lines[i].split("\t")[1].rstrip()) i += 1 refs.append(a) return src, tgt, refs def merge(src, tgt, hypos, log_probs, path): with open(path, "w") as f: for s, t, hs, lps in zip(src, tgt, hypos, log_probs): f.write(s + "\n") f.write(t + "\n") f.write("\n") for h, lp in zip(hs, lps): f.write("\t%f\t%s\n" % (lp, h.strip())) f.write("------------------------------------------------------\n") def corpus_bleu(sys_stream, ref_streams): bleu = _corpus_bleu(sys_stream, ref_streams, tokenize="none") return bleu.score def sentence_bleu(hypothesis, reference): bleu = _corpus_bleu(hypothesis, reference) for i in range(1, 4): bleu.counts[i] += 1 bleu.totals[i] += 1 bleu = compute_bleu( bleu.counts, bleu.totals, bleu.sys_len, bleu.ref_len, smooth_method="exp", ) return bleu.score def pairwise(sents): _ref, _hypo = [], [] for s in sents: for i in range(len(s)): for j in range(len(s)): if i != j: _ref.append(s[i]) _hypo.append(s[j]) return corpus_bleu(_hypo, [_ref]) def multi_ref(refs, hypos): _ref, _hypo = [], [] ref_cnt = 0 assert len(refs) == len(hypos) # count number of refs covered for rs, hs in zip(refs, hypos): a = set() for h in hs: s = [sentence_bleu(h, r) for r in rs] j = np.argmax(s) _ref.append(rs[j]) _hypo.append(h) best = [k for k in range(len(rs)) if s[k] == s[j]] a.add(random.choice(best)) ref_cnt += len(a) print("#refs covered: %.2f" % (ref_cnt / len(refs))) # transpose refs and hypos refs = list(zip(*refs)) hypos = list(zip(*hypos)) # compute multi-ref corpus BLEU (leave-one-out to be comparable to intra_ref) k = len(hypos) m = len(refs) flat_hypos = [hypos[j][i] for i in range(len(hypos[0])) for j in range(k)] duplicated_refs = [[ref for ref in refs_i for _ in range(k)] for refs_i in refs] loo_bleus = [] for held_out_ref in range(m): remaining_refs = ( duplicated_refs[:held_out_ref] + duplicated_refs[held_out_ref + 1 :] ) assert len(remaining_refs) == m - 1 loo_bleus.append(corpus_bleu(flat_hypos, remaining_refs)) print("average multi-reference BLEU (leave-one-out): %.2f" % np.mean(loo_bleus)) def intra_ref(refs): print("ref pairwise BLEU: %.2f" % pairwise(refs)) refs = list(zip(*refs)) m = len(refs) concat_h = [] concat_rest = [[] for j in range(m - 1)] for i, h in enumerate(refs): rest = refs[:i] + refs[i + 1 :] concat_h.append(h) for j in range(m - 1): concat_rest[j].extend(rest[j]) concat_h = list(chain.from_iterable(concat_h)) bleu = corpus_bleu(concat_h, concat_rest) print("multi-reference BLEU (leave-one-out): %.2f" % bleu) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/translation_moe/score.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F class MeanPoolGatingNetwork(torch.nn.Module): """A simple mean-pooling gating network for selecting experts. This module applies mean pooling over an encoder's output and returns reponsibilities for each expert. The encoder format is expected to match :class:`fairseq.models.transformer.TransformerEncoder`. """ def __init__(self, embed_dim, num_experts, dropout=None): super().__init__() self.embed_dim = embed_dim self.num_experts = num_experts self.fc1 = torch.nn.Linear(embed_dim, embed_dim) self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None self.fc2 = torch.nn.Linear(embed_dim, num_experts) def forward(self, encoder_out): if not ( "encoder_out" in encoder_out and "encoder_padding_mask" in encoder_out and encoder_out["encoder_out"][0].size(2) == self.embed_dim ): raise ValueError("Unexpected format for encoder_out") # mean pooling over time encoder_padding_mask = encoder_out["encoder_padding_mask"][0] # B x T encoder_out = encoder_out["encoder_out"][0].transpose(0, 1) # B x T x C if encoder_padding_mask is not None: encoder_out = encoder_out.clone() # required because of transpose above encoder_out[encoder_padding_mask] = 0 ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True) x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out) else: x = torch.mean(encoder_out, dim=1) x = torch.tanh(self.fc1(x)) if self.dropout is not None: x = self.dropout(x) x = self.fc2(x) return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x)
KosmosX-API-main
kosmosX/fairseq/examples/translation_moe/translation_moe_src/mean_pool_gating_network.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import translation_moe # noqa
KosmosX-API-main
kosmosX/fairseq/examples/translation_moe/translation_moe_src/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class LogSumExpMoE(torch.autograd.Function): """Standard LogSumExp forward pass, but use *posterior* for the backward. See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_. """ @staticmethod def forward(ctx, logp, posterior, dim=-1): ctx.save_for_backward(posterior) ctx.dim = dim return torch.logsumexp(logp, dim=dim) @staticmethod def backward(ctx, grad_output): (posterior,) = ctx.saved_tensors grad_logp = grad_output.unsqueeze(ctx.dim) * posterior return grad_logp, None, None
KosmosX-API-main
kosmosX/fairseq/examples/translation_moe/translation_moe_src/logsumexp_moe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import torch from omegaconf import II from fairseq import metrics, utils from fairseq.dataclass import ChoiceEnum from fairseq.tasks import register_task from fairseq.tasks.translation import TranslationConfig, TranslationTask from .logsumexp_moe import LogSumExpMoE from .mean_pool_gating_network import MeanPoolGatingNetwork METHOD_CHOICES = ChoiceEnum(["sMoElp", "sMoEup", "hMoElp", "hMoEup"]) @dataclass class TranslationMoEConfig(TranslationConfig): method: METHOD_CHOICES = field( default="hMoEup", metadata={"help": "MoE method"}, ) num_experts: int = field( default=3, metadata={"help": "number of experts"}, ) mean_pool_gating_network: bool = field( default=False, metadata={"help": "use a simple mean-pooling gating network"}, ) mean_pool_gating_network_dropout: float = field( default=0, metadata={"help": "dropout for mean-pooling gating network"}, ) mean_pool_gating_network_encoder_dim: int = field( default=0, metadata={"help": "encoder output dim for mean-pooling gating network"}, ) gen_expert: int = field( default=0, metadata={"help": "which expert to use for generation"}, ) sentence_avg: bool = II("optimization.sentence_avg") @register_task("translation_moe", dataclass=TranslationMoEConfig) class TranslationMoETask(TranslationTask): """ Translation task for Mixture of Experts (MoE) models. See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_. Args: src_dict (~fairseq.data.Dictionary): dictionary for the source language tgt_dict (~fairseq.data.Dictionary): dictionary for the target language .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. The translation task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.translation_parser :prog: """ cfg: TranslationMoEConfig def __init__(self, cfg: TranslationMoEConfig, src_dict, tgt_dict): if cfg.method == "sMoElp": # soft MoE with learned prior self.uniform_prior = False self.hard_selection = False elif cfg.method == "sMoEup": # soft MoE with uniform prior self.uniform_prior = True self.hard_selection = False elif cfg.method == "hMoElp": # hard MoE with learned prior self.uniform_prior = False self.hard_selection = True elif cfg.method == "hMoEup": # hard MoE with uniform prior self.uniform_prior = True self.hard_selection = True # add indicator tokens for each expert for i in range(cfg.num_experts): # add to both dictionaries in case we're sharing embeddings src_dict.add_symbol("<expert_{}>".format(i)) tgt_dict.add_symbol("<expert_{}>".format(i)) super().__init__(cfg, src_dict, tgt_dict) def build_model(self, cfg, from_checkpoint=False): from fairseq import models model = models.build_model(cfg, self) if not self.uniform_prior and not hasattr(model, "gating_network"): if self.cfg.mean_pool_gating_network: if self.cfg.mean_pool_gating_network_encoder_dim > 0: encoder_dim = self.cfg.mean_pool_gating_network_encoder_dim elif getattr(cfg, "encoder_embed_dim", None): # assume that encoder_embed_dim is the encoder's output dimension encoder_dim = cfg.encoder_embed_dim else: raise ValueError( "Must specify --mean-pool-gating-network-encoder-dim" ) if self.cfg.mean_pool_gating_network_dropout > 0: dropout = self.cfg.mean_pool_gating_network_dropout elif getattr(cfg, "dropout", None): dropout = cfg.dropout else: raise ValueError("Must specify task.mean_pool_gating_network_dropout") model.gating_network = MeanPoolGatingNetwork( encoder_dim, self.cfg.num_experts, dropout, ) else: raise ValueError( "translation_moe task with learned prior requires the model to " "have a gating network; try using --mean-pool-gating-network" ) return model def expert_index(self, i): return i + self.tgt_dict.index("<expert_0>") def _get_loss(self, sample, model, criterion): assert hasattr( criterion, "compute_loss" ), "translation_moe task requires the criterion to implement the compute_loss() method" k = self.cfg.num_experts bsz = sample["target"].size(0) def get_lprob_y(encoder_out, prev_output_tokens_k): net_output = model.decoder( prev_output_tokens=prev_output_tokens_k, encoder_out=encoder_out, ) loss, _ = criterion.compute_loss(model, net_output, sample, reduce=False) loss = loss.view(bsz, -1) return -loss.sum(dim=1, keepdim=True) # -> B x 1 def get_lprob_yz(winners=None): encoder_out = model.encoder( src_tokens=sample["net_input"]["src_tokens"], src_lengths=sample["net_input"]["src_lengths"], ) if winners is None: lprob_y = [] for i in range(k): prev_output_tokens_k = sample["net_input"][ "prev_output_tokens" ].clone() assert not prev_output_tokens_k.requires_grad prev_output_tokens_k[:, 0] = self.expert_index(i) lprob_y.append(get_lprob_y(encoder_out, prev_output_tokens_k)) lprob_y = torch.cat(lprob_y, dim=1) # -> B x K else: prev_output_tokens_k = sample["net_input"]["prev_output_tokens"].clone() prev_output_tokens_k[:, 0] = self.expert_index(winners) lprob_y = get_lprob_y(encoder_out, prev_output_tokens_k) # -> B if self.uniform_prior: lprob_yz = lprob_y else: lprob_z = model.gating_network(encoder_out) # B x K if winners is not None: lprob_z = lprob_z.gather(dim=1, index=winners.unsqueeze(-1)) lprob_yz = lprob_y + lprob_z.type_as(lprob_y) # B x K return lprob_yz # compute responsibilities without dropout with utils.model_eval(model): # disable dropout with torch.no_grad(): # disable autograd lprob_yz = get_lprob_yz() # B x K prob_z_xy = torch.nn.functional.softmax(lprob_yz, dim=1) assert not prob_z_xy.requires_grad # compute loss with dropout if self.hard_selection: winners = prob_z_xy.max(dim=1)[1] loss = -get_lprob_yz(winners) else: lprob_yz = get_lprob_yz() # B x K loss = -LogSumExpMoE.apply(lprob_yz, prob_z_xy, 1) loss = loss.sum() sample_size = ( sample["target"].size(0) if self.cfg.sentence_avg else sample["ntokens"] ) logging_output = { "loss": utils.item(loss.data), "ntokens": sample["ntokens"], "nsentences": bsz, "sample_size": sample_size, "posterior": prob_z_xy.float().sum(dim=0).cpu(), } return loss, sample_size, logging_output def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() loss, sample_size, logging_output = self._get_loss(sample, model, criterion) if ignore_grad: loss *= 0 optimizer.backward(loss) return loss, sample_size, logging_output def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): loss, sample_size, logging_output = self._get_loss(sample, model, criterion) return loss, sample_size, logging_output def inference_step( self, generator, models, sample, prefix_tokens=None, expert=None, constraints=None, ): expert = expert or self.cfg.gen_expert with torch.no_grad(): return generator.generate( models, sample, prefix_tokens=prefix_tokens, constraints=constraints, bos_token=self.expert_index(expert), ) def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) metrics.log_scalar( "posterior", sum(log["posterior"] for log in logging_outputs if "posterior" in log), )
KosmosX-API-main
kosmosX/fairseq/examples/translation_moe/translation_moe_src/translation_moe.py
from . import criterions, models, tasks # noqa
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/__init__.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Flashlight decoders. """ import gc import itertools as it import os.path as osp from typing import List import warnings from collections import deque, namedtuple import numpy as np import torch from fairseq import tasks from fairseq.utils import apply_to_sample from omegaconf import open_dict from fairseq.dataclass.utils import convert_namespace_to_omegaconf try: from flashlight.lib.text.dictionary import create_word_dict, load_words from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes from flashlight.lib.text.decoder import ( CriterionType, LexiconDecoderOptions, KenLM, LM, LMState, SmearingMode, Trie, LexiconDecoder, ) except: warnings.warn( "flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python" ) LM = object LMState = object class W2lDecoder(object): def __init__(self, args, tgt_dict): self.tgt_dict = tgt_dict self.vocab_size = len(tgt_dict) self.nbest = args.nbest # criterion-specific init self.criterion_type = CriterionType.CTC self.blank = ( tgt_dict.index("<ctc_blank>") if "<ctc_blank>" in tgt_dict.indices else tgt_dict.bos() ) if "<sep>" in tgt_dict.indices: self.silence = tgt_dict.index("<sep>") elif "|" in tgt_dict.indices: self.silence = tgt_dict.index("|") else: self.silence = tgt_dict.eos() self.asg_transitions = None def generate(self, models, sample, **unused): """Generate a batch of inferences.""" # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } emissions = self.get_emissions(models, encoder_input) return self.decode(emissions) def get_emissions(self, models, encoder_input): """Run encoder and normalize emissions""" model = models[0] encoder_out = model(**encoder_input) if hasattr(model, "get_logits"): emissions = model.get_logits(encoder_out) # no need to normalize emissions else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return emissions.transpose(0, 1).float().cpu().contiguous() def get_tokens(self, idxs): """Normalize tokens by handling CTC blank, ASG replabels, etc.""" idxs = (g[0] for g in it.groupby(idxs)) idxs = filter(lambda x: x != self.blank, idxs) return torch.LongTensor(list(idxs)) class W2lViterbiDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) def decode(self, emissions): B, T, N = emissions.size() if self.asg_transitions is None: transitions = torch.FloatTensor(N, N).zero_() else: transitions = torch.FloatTensor(self.asg_transitions).view(N, N) viterbi_path = torch.IntTensor(B, T) workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N)) CpuViterbiPath.compute( B, T, N, get_data_ptr_as_bytes(emissions), get_data_ptr_as_bytes(transitions), get_data_ptr_as_bytes(viterbi_path), get_data_ptr_as_bytes(workspace), ) return [ [{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}] for b in range(B) ] class W2lKenLMDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) self.unit_lm = getattr(args, "unit_lm", False) if args.lexicon: self.lexicon = load_words(args.lexicon) self.word_dict = create_word_dict(self.lexicon) self.unk_word = self.word_dict.get_index("<unk>") self.lm = KenLM(args.kenlm_model, self.word_dict) self.trie = Trie(self.vocab_size, self.silence) start_state = self.lm.start(False) for i, (word, spellings) in enumerate(self.lexicon.items()): word_idx = self.word_dict.get_index(word) _, score = self.lm.score(start_state, word_idx) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, word_score=args.word_score, unk_score=args.unk_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) if self.asg_transitions is None: # self.asg_transitions = torch.FloatTensor(N, N).zero_() self.asg_transitions = [] self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, self.asg_transitions, self.unit_lm, ) else: assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(args.kenlm_model, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def get_timesteps(self, token_idxs: List[int]) -> List[int]: """Returns frame numbers corresponding to every non-blank token. Parameters ---------- token_idxs : List[int] IDs of decoded tokens. Returns ------- List[int] Frame numbers corresponding to every non-blank token. """ timesteps = [] for i, token_idx in enumerate(token_idxs): if token_idx == self.blank: continue if i == 0 or token_idx != token_idxs[i-1]: timesteps.append(i) return timesteps def decode(self, emissions): B, T, N = emissions.size() hypos = [] for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append( [ { "tokens": self.get_tokens(result.tokens), "score": result.score, "timesteps": self.get_timesteps(result.tokens), "words": [ self.word_dict.get_entry(x) for x in result.words if x >= 0 ], } for result in nbest_results ] ) return hypos FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"]) class FairseqLM(LM): def __init__(self, dictionary, model): LM.__init__(self) self.dictionary = dictionary self.model = model self.unk = self.dictionary.unk() self.save_incremental = False # this currently does not work properly self.max_cache = 20_000 model.cuda() model.eval() model.make_generation_fast_() self.states = {} self.stateq = deque() def start(self, start_with_nothing): state = LMState() prefix = torch.LongTensor([[self.dictionary.eos()]]) incremental_state = {} if self.save_incremental else None with torch.no_grad(): res = self.model(prefix.cuda(), incremental_state=incremental_state) probs = self.model.get_normalized_probs(res, log_probs=True, sample=None) if incremental_state is not None: incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state) self.states[state] = FairseqLMState( prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy() ) self.stateq.append(state) return state def score(self, state: LMState, token_index: int, no_cache: bool = False): """ Evaluate language model based on the current lm state and new word Parameters: ----------- state: current lm state token_index: index of the word (can be lexicon index then you should store inside LM the mapping between indices of lexicon and lm, or lm index of a word) Returns: -------- (LMState, float): pair of (new state, score for the current word) """ curr_state = self.states[state] def trim_cache(targ_size): while len(self.stateq) > targ_size: rem_k = self.stateq.popleft() rem_st = self.states[rem_k] rem_st = FairseqLMState(rem_st.prefix, None, None) self.states[rem_k] = rem_st if curr_state.probs is None: new_incremental_state = ( curr_state.incremental_state.copy() if curr_state.incremental_state is not None else None ) with torch.no_grad(): if new_incremental_state is not None: new_incremental_state = apply_to_sample( lambda x: x.cuda(), new_incremental_state ) elif self.save_incremental: new_incremental_state = {} res = self.model( torch.from_numpy(curr_state.prefix).cuda(), incremental_state=new_incremental_state, ) probs = self.model.get_normalized_probs( res, log_probs=True, sample=None ) if new_incremental_state is not None: new_incremental_state = apply_to_sample( lambda x: x.cpu(), new_incremental_state ) curr_state = FairseqLMState( curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy() ) if not no_cache: self.states[state] = curr_state self.stateq.append(state) score = curr_state.probs[token_index].item() trim_cache(self.max_cache) outstate = state.child(token_index) if outstate not in self.states and not no_cache: prefix = np.concatenate( [curr_state.prefix, torch.LongTensor([[token_index]])], -1 ) incr_state = curr_state.incremental_state self.states[outstate] = FairseqLMState(prefix, incr_state, None) if token_index == self.unk: score = float("-inf") return outstate, score def finish(self, state: LMState): """ Evaluate eos for language model based on the current lm state Returns: -------- (LMState, float): pair of (new state, score for the current word) """ return self.score(state, self.dictionary.eos()) def empty_cache(self): self.states = {} self.stateq = deque() gc.collect() class W2lFairseqLMDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) self.unit_lm = getattr(args, "unit_lm", False) self.lexicon = load_words(args.lexicon) if args.lexicon else None self.idx_to_wrd = {} checkpoint = torch.load(args.kenlm_model, map_location="cpu") if "cfg" in checkpoint and checkpoint["cfg"] is not None: lm_args = checkpoint["cfg"] else: lm_args = convert_namespace_to_omegaconf(checkpoint["args"]) with open_dict(lm_args.task): lm_args.task.data = osp.dirname(args.kenlm_model) task = tasks.setup_task(lm_args.task) model = task.build_model(lm_args.model) model.load_state_dict(checkpoint["model"], strict=False) self.trie = Trie(self.vocab_size, self.silence) self.word_dict = task.dictionary self.unk_word = self.word_dict.unk() self.lm = FairseqLM(self.word_dict, model) if self.lexicon: start_state = self.lm.start(False) for i, (word, spellings) in enumerate(self.lexicon.items()): if self.unit_lm: word_idx = i self.idx_to_wrd[i] = word score = 0 else: word_idx = self.word_dict.index(word) _, score = self.lm.score(start_state, word_idx, no_cache=True) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, word_score=args.word_score, unk_score=args.unk_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], self.unit_lm, ) else: assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(args.kenlm_model, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def decode(self, emissions): B, T, N = emissions.size() hypos = [] def idx_to_word(idx): if self.unit_lm: return self.idx_to_wrd[idx] else: return self.word_dict[idx] def make_hypo(result): hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score} if self.lexicon: hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0] return hypo for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append([make_hypo(result) for result in nbest_results]) self.lm.empty_cache() return hypos
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/w2l_decoder.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Run inference for pre-processed data with a trained model. """ import ast import logging import math import os import sys import editdistance import numpy as np import torch from fairseq import checkpoint_utils, options, progress_bar, tasks, utils from fairseq.data.data_utils import post_process from fairseq.logging.meters import StopwatchMeter, TimeMeter logging.basicConfig() logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def add_asr_eval_argument(parser): parser.add_argument("--kspmodel", default=None, help="sentence piece model") parser.add_argument( "--wfstlm", default=None, help="wfstlm on dictonary output units" ) parser.add_argument( "--rnnt_decoding_type", default="greedy", help="wfstlm on dictonary\ output units", ) try: parser.add_argument( "--lm-weight", "--lm_weight", type=float, default=0.2, help="weight for lm while interpolating with neural score", ) except: pass parser.add_argument( "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level" ) parser.add_argument( "--w2l-decoder", choices=["viterbi", "kenlm", "fairseqlm"], help="use a w2l decoder", ) parser.add_argument("--lexicon", help="lexicon for w2l decoder") parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm") parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder") parser.add_argument("--beam-threshold", type=float, default=25.0) parser.add_argument("--beam-size-token", type=float, default=100) parser.add_argument("--word-score", type=float, default=1.0) parser.add_argument("--unk-weight", type=float, default=-math.inf) parser.add_argument("--sil-weight", type=float, default=0.0) parser.add_argument( "--dump-emissions", type=str, default=None, help="if present, dumps emissions into this file and exits", ) parser.add_argument( "--dump-features", type=str, default=None, help="if present, dumps features into this file and exits", ) parser.add_argument( "--load-emissions", type=str, default=None, help="if present, loads emissions from this file", ) return parser def check_args(args): # assert args.path is not None, "--path required for generation!" # assert args.results_path is not None, "--results_path required for generation!" assert ( not args.sampling or args.nbest == args.beam ), "--sampling requires --nbest to be equal to --beam" assert ( args.replace_unk is None or args.raw_text ), "--replace-unk requires a raw text dataset (--raw-text)" def get_dataset_itr(args, task, models): return task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_sentences=args.batch_size, max_positions=(sys.maxsize, sys.maxsize), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, data_buffer_size=args.data_buffer_size, ).next_epoch_itr(shuffle=False) def process_predictions( args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id ): for hypo in hypos[: min(len(hypos), args.nbest)]: hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu()) if "words" in hypo: hyp_words = " ".join(hypo["words"]) else: hyp_words = post_process(hyp_pieces, args.post_process) if res_files is not None: print( "{} ({}-{})".format(hyp_pieces, speaker, id), file=res_files["hypo.units"], ) print( "{} ({}-{})".format(hyp_words, speaker, id), file=res_files["hypo.words"], ) tgt_pieces = tgt_dict.string(target_tokens) tgt_words = post_process(tgt_pieces, args.post_process) if res_files is not None: print( "{} ({}-{})".format(tgt_pieces, speaker, id), file=res_files["ref.units"], ) print( "{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"] ) if not args.quiet: logger.info("HYPO:" + hyp_words) logger.info("TARGET:" + tgt_words) logger.info("___________________") hyp_words = hyp_words.split() tgt_words = tgt_words.split() return editdistance.eval(hyp_words, tgt_words), len(tgt_words) def prepare_result_files(args): def get_res_file(file_prefix): if args.num_shards > 1: file_prefix = f"{args.shard_id}_{file_prefix}" path = os.path.join( args.results_path, "{}-{}-{}.txt".format( file_prefix, os.path.basename(args.path), args.gen_subset ), ) return open(path, "w", buffering=1) if not args.results_path: return None return { "hypo.words": get_res_file("hypo.word"), "hypo.units": get_res_file("hypo.units"), "ref.words": get_res_file("ref.word"), "ref.units": get_res_file("ref.units"), } def optimize_models(args, use_cuda, models): """Optimize ensemble for generation""" for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda() def apply_half(t): if t.dtype is torch.float32: return t.to(dtype=torch.half) return t class ExistingEmissionsDecoder(object): def __init__(self, decoder, emissions): self.decoder = decoder self.emissions = emissions def generate(self, models, sample, **unused): ids = sample["id"].cpu().numpy() try: emissions = np.stack(self.emissions[ids]) except: print([x.shape for x in self.emissions[ids]]) raise Exception("invalid sizes") emissions = torch.from_numpy(emissions) return self.decoder.decode(emissions) def main(args, task=None, model_state=None): check_args(args) use_fp16 = args.fp16 if args.max_tokens is None and args.batch_size is None: args.max_tokens = 4000000 logger.info(args) use_cuda = torch.cuda.is_available() and not args.cpu logger.info("| decoding with criterion {}".format(args.criterion)) task = tasks.setup_task(args) # Load ensemble if args.load_emissions: models, criterions = [], [] task.load_dataset(args.gen_subset) else: logger.info("| loading model(s) from {}".format(args.path)) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( utils.split_paths(args.path, separator="\\"), arg_overrides=ast.literal_eval(args.model_overrides), task=task, suffix=args.checkpoint_suffix, strict=(args.checkpoint_shard_count == 1), num_shards=args.checkpoint_shard_count, state=model_state, ) optimize_models(args, use_cuda, models) task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task) # Set dictionary tgt_dict = task.target_dictionary logger.info( "| {} {} {} examples".format( args.data, args.gen_subset, len(task.dataset(args.gen_subset)) ) ) # hack to pass transitions to W2lDecoder if args.criterion == "asg_loss": raise NotImplementedError("asg_loss is currently not supported") # trans = criterions[0].asg.trans.data # args.asg_transitions = torch.flatten(trans).tolist() # Load dataset (possibly sharded) itr = get_dataset_itr(args, task, models) # Initialize generator gen_timer = StopwatchMeter() def build_generator(args): w2l_decoder = getattr(args, "w2l_decoder", None) if w2l_decoder == "viterbi": from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder return W2lViterbiDecoder(args, task.target_dictionary) elif w2l_decoder == "kenlm": from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder return W2lKenLMDecoder(args, task.target_dictionary) elif w2l_decoder == "fairseqlm": from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder return W2lFairseqLMDecoder(args, task.target_dictionary) else: print( "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment" ) # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task generator = build_generator(args) if args.load_emissions: generator = ExistingEmissionsDecoder( generator, np.load(args.load_emissions, allow_pickle=True) ) logger.info("loaded emissions from " + args.load_emissions) num_sentences = 0 if args.results_path is not None and not os.path.exists(args.results_path): os.makedirs(args.results_path) max_source_pos = ( utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ), ) if max_source_pos is not None: max_source_pos = max_source_pos[0] if max_source_pos is not None: max_source_pos = max_source_pos[0] - 1 if args.dump_emissions: emissions = {} if args.dump_features: features = {} models[0].bert.proj = None else: res_files = prepare_result_files(args) errs_t = 0 lengths_t = 0 with progress_bar.build_progress_bar(args, itr) as t: wps_meter = TimeMeter() for sample in t: sample = utils.move_to_cuda(sample) if use_cuda else sample if use_fp16: sample = utils.apply_to_sample(apply_half, sample) if "net_input" not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample["target"][:, : args.prefix_size] gen_timer.start() if args.dump_emissions: with torch.no_grad(): encoder_out = models[0](**sample["net_input"]) emm = models[0].get_normalized_probs(encoder_out, log_probs=True) emm = emm.transpose(0, 1).cpu().numpy() for i, id in enumerate(sample["id"]): emissions[id.item()] = emm[i] continue elif args.dump_features: with torch.no_grad(): encoder_out = models[0](**sample["net_input"]) feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy() for i, id in enumerate(sample["id"]): padding = ( encoder_out["encoder_padding_mask"][i].cpu().numpy() if encoder_out["encoder_padding_mask"] is not None else None ) features[id.item()] = (feat[i], padding) continue hypos = task.inference_step(generator, models, sample, prefix_tokens) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample["id"].tolist()): speaker = None # id = task.dataset(args.gen_subset).ids[int(sample_id)] id = sample_id toks = ( sample["target"][i, :] if "target_label" not in sample else sample["target_label"][i, :] ) target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu() # Process top predictions errs, length = process_predictions( args, hypos[i], None, tgt_dict, target_tokens, res_files, speaker, id, ) errs_t += errs lengths_t += length wps_meter.update(num_generated_tokens) t.log({"wps": round(wps_meter.avg)}) num_sentences += ( sample["nsentences"] if "nsentences" in sample else sample["id"].numel() ) wer = None if args.dump_emissions: emm_arr = [] for i in range(len(emissions)): emm_arr.append(emissions[i]) np.save(args.dump_emissions, emm_arr) logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}") elif args.dump_features: feat_arr = [] for i in range(len(features)): feat_arr.append(features[i]) np.save(args.dump_features, feat_arr) logger.info(f"saved {len(features)} emissions to {args.dump_features}") else: if lengths_t > 0: wer = errs_t * 100.0 / lengths_t logger.info(f"WER: {wer}") logger.info( "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}" "sentences/s, {:.2f} tokens/s)".format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1.0 / gen_timer.avg, ) ) logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam)) return task, wer def make_parser(): parser = options.get_generation_parser() parser = add_asr_eval_argument(parser) return parser def cli_main(): parser = make_parser() args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/infer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import re import sys from examples.speech_recognition.data import AsrDataset from examples.speech_recognition.data.replabels import replabel_symbol from fairseq.data import Dictionary from fairseq.tasks import LegacyFairseqTask, register_task def get_asr_dataset_from_json(data_json_path, tgt_dict): """ Parse data json and create dataset. See scripts/asr_prep_json.py which pack json from raw files Json example: { "utts": { "4771-29403-0025": { "input": { "length_ms": 170, "path": "/tmp/file1.flac" }, "output": { "text": "HELLO \n", "token": "HE LLO", "tokenid": "4815, 861" } }, "1564-142299-0096": { ... } } """ if not os.path.isfile(data_json_path): raise FileNotFoundError("Dataset not found: {}".format(data_json_path)) with open(data_json_path, "rb") as f: data_samples = json.load(f)["utts"] assert len(data_samples) != 0 sorted_samples = sorted( data_samples.items(), key=lambda sample: int(sample[1]["input"]["length_ms"]), reverse=True, ) aud_paths = [s[1]["input"]["path"] for s in sorted_samples] ids = [s[0] for s in sorted_samples] speakers = [] for s in sorted_samples: m = re.search("(.+?)-(.+?)-(.+?)", s[0]) speakers.append(m.group(1) + "_" + m.group(2)) frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples] tgt = [ [int(i) for i in s[1]["output"]["tokenid"].split(", ")] for s in sorted_samples ] # append eos tgt = [[*t, tgt_dict.eos()] for t in tgt] return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers) @register_task("speech_recognition") class SpeechRecognitionTask(LegacyFairseqTask): """ Task for training speech recognition model. """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("data", help="path to data directory") parser.add_argument( "--silence-token", default="\u2581", help="token for silence (used by w2l)" ) parser.add_argument( "--max-source-positions", default=sys.maxsize, type=int, metavar="N", help="max number of frames in the source sequence", ) parser.add_argument( "--max-target-positions", default=1024, type=int, metavar="N", help="max number of tokens in the target sequence", ) def __init__(self, args, tgt_dict): super().__init__(args) self.tgt_dict = tgt_dict @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries).""" dict_path = os.path.join(args.data, "dict.txt") if not os.path.isfile(dict_path): raise FileNotFoundError("Dict not found: {}".format(dict_path)) tgt_dict = Dictionary.load(dict_path) if args.criterion == "ctc_loss": tgt_dict.add_symbol("<ctc_blank>") elif args.criterion == "asg_loss": for i in range(1, args.max_replabel + 1): tgt_dict.add_symbol(replabel_symbol(i)) print("| dictionary: {} types".format(len(tgt_dict))) return cls(args, tgt_dict) def load_dataset(self, split, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ data_json_path = os.path.join(self.args.data, "{}.json".format(split)) self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict) def build_generator(self, models, args, **unused): w2l_decoder = getattr(args, "w2l_decoder", None) if w2l_decoder == "viterbi": from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder return W2lViterbiDecoder(args, self.target_dictionary) elif w2l_decoder == "kenlm": from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder return W2lKenLMDecoder(args, self.target_dictionary) elif w2l_decoder == "fairseqlm": from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder return W2lFairseqLMDecoder(args, self.target_dictionary) else: return super().build_generator(models, args) @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.tgt_dict @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary` (if applicable for this task).""" return None def max_positions(self): """Return the max speech and sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions)
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/tasks/speech_recognition.py
import importlib import os for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): task_name = file[: file.find(".py")] importlib.import_module("examples.speech_recognition.tasks." + task_name)
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/tasks/__init__.py