python_code
stringlengths
0
229k
import os from typing import Tuple, Union from pathlib import Path import torchaudio from torch import Tensor from torch.utils.data import Dataset from torchaudio.datasets.utils import ( download_url, extract_archive, ) URL = "train-clean-100" FOLDER_IN_ARCHIVE = "LibriTTS" _CHECKSUMS = { "http://www.openslr.org/60/dev-clean.tar.gz": "0c3076c1e5245bb3f0af7d82087ee207", "http://www.openslr.org/60/dev-other.tar.gz": "815555d8d75995782ac3ccd7f047213d", "http://www.openslr.org/60/test-clean.tar.gz": "7bed3bdb047c4c197f1ad3bc412db59f", "http://www.openslr.org/60/test-other.tar.gz": "ae3258249472a13b5abef2a816f733e4", "http://www.openslr.org/60/train-clean-100.tar.gz": "4a8c202b78fe1bc0c47916a98f3a2ea8", "http://www.openslr.org/60/train-clean-360.tar.gz": "a84ef10ddade5fd25df69596a2767b2d", "http://www.openslr.org/60/train-other-500.tar.gz": "7b181dd5ace343a5f38427999684aa6f", } def load_libritts_item( fileid: str, path: str, ext_audio: str, ext_original_txt: str, ext_normalized_txt: str, ) -> Tuple[Tensor, int, str, str, int, int, str]: speaker_id, chapter_id, segment_id, utterance_id = fileid.split("_") utterance_id = fileid normalized_text = utterance_id + ext_normalized_txt normalized_text = os.path.join(path, speaker_id, chapter_id, normalized_text) original_text = utterance_id + ext_original_txt original_text = os.path.join(path, speaker_id, chapter_id, original_text) file_audio = utterance_id + ext_audio file_audio = os.path.join(path, speaker_id, chapter_id, file_audio) # Load audio waveform, sample_rate = torchaudio.load(file_audio) # Load original text with open(original_text) as ft: original_text = ft.readline() # Load normalized text with open(normalized_text, "r") as ft: normalized_text = ft.readline() return ( waveform, sample_rate, original_text, normalized_text, int(speaker_id), int(chapter_id), utterance_id, ) class LIBRITTS(Dataset): """Create a Dataset for LibriTTS. Args: root (str or Path): Path to the directory where the dataset is found or downloaded. url (str, optional): The URL to download the dataset from, or the type of the dataset to dowload. Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``, ``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and ``"train-other-500"``. (default: ``"train-clean-100"``) folder_in_archive (str, optional): The top-level directory of the dataset. (default: ``"LibriTTS"``) download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``). """ _ext_original_txt = ".original.txt" _ext_normalized_txt = ".normalized.txt" _ext_audio = ".wav" def __init__( self, root: Union[str, Path], url: str = URL, folder_in_archive: str = FOLDER_IN_ARCHIVE, download: bool = False, ) -> None: if url in [ "dev-clean", "dev-other", "test-clean", "test-other", "train-clean-100", "train-clean-360", "train-other-500", ]: ext_archive = ".tar.gz" base_url = "http://www.openslr.org/resources/60/" url = os.path.join(base_url, url + ext_archive) # Get string representation of 'root' in case Path object is passed root = os.fspath(root) basename = os.path.basename(url) archive = os.path.join(root, basename) basename = basename.split(".")[0] folder_in_archive = os.path.join(folder_in_archive, basename) self._path = os.path.join(root, folder_in_archive) if download: if not os.path.isdir(self._path): if not os.path.isfile(archive): checksum = _CHECKSUMS.get(url, None) download_url(url, root, hash_value=checksum) extract_archive(archive) self._walker = sorted(str(p.stem) for p in Path(self._path).glob('*/*/*' + self._ext_audio)) def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int, int, str]: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: (Tensor, int, str, str, str, int, int, str): ``(waveform, sample_rate, original_text, normalized_text, speaker_id, chapter_id, utterance_id)`` """ fileid = self._walker[n] return load_libritts_item( fileid, self._path, self._ext_audio, self._ext_original_txt, self._ext_normalized_txt, ) def __len__(self) -> int: return len(self._walker)
import os from typing import Tuple, Union from pathlib import Path import torchaudio from torch import Tensor from torch.utils.data import Dataset from torchaudio.datasets.utils import ( download_url, extract_archive, ) _RELEASE_CONFIGS = { "release1": { "folder_in_archive": "TEDLIUM_release1", "url": "http://www.openslr.org/resources/7/TEDLIUM_release1.tar.gz", "checksum": "30301975fd8c5cac4040c261c0852f57cfa8adbbad2ce78e77e4986957445f27", "data_path": "", "subset": "train", "supported_subsets": ["train", "test", "dev"], "dict": "TEDLIUM.150K.dic", }, "release2": { "folder_in_archive": "TEDLIUM_release2", "url": "http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz", "checksum": "93281b5fcaaae5c88671c9d000b443cb3c7ea3499ad12010b3934ca41a7b9c58", "data_path": "", "subset": "train", "supported_subsets": ["train", "test", "dev"], "dict": "TEDLIUM.152k.dic", }, "release3": { "folder_in_archive": "TEDLIUM_release-3", "url": "http://www.openslr.org/resources/51/TEDLIUM_release-3.tgz", "checksum": "ad1e454d14d1ad550bc2564c462d87c7a7ec83d4dc2b9210f22ab4973b9eccdb", "data_path": "data/", "subset": None, "supported_subsets": [None], "dict": "TEDLIUM.152k.dic", }, } class TEDLIUM(Dataset): """ Create a Dataset for Tedlium. It supports releases 1,2 and 3. Args: root (str or Path): Path to the directory where the dataset is found or downloaded. release (str, optional): Release version. Allowed values are ``"release1"``, ``"release2"`` or ``"release3"``. (default: ``"release1"``). subset (str, optional): The subset of dataset to use. Valid options are ``"train"``, ``"dev"``, and ``"test"`` for releases 1&2, ``None`` for release3. Defaults to ``"train"`` or ``None``. download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``). audio_ext (str, optional): extension for audio file (default: ``"audio_ext"``) """ def __init__( self, root: Union[str, Path], release: str = "release1", subset: str = None, download: bool = False, audio_ext: str = ".sph" ) -> None: self._ext_audio = audio_ext if release in _RELEASE_CONFIGS.keys(): folder_in_archive = _RELEASE_CONFIGS[release]["folder_in_archive"] url = _RELEASE_CONFIGS[release]["url"] subset = subset if subset else _RELEASE_CONFIGS[release]["subset"] else: # Raise warning raise RuntimeError( "The release {} does not match any of the supported tedlium releases{} ".format( release, _RELEASE_CONFIGS.keys(), ) ) if subset not in _RELEASE_CONFIGS[release]["supported_subsets"]: # Raise warning raise RuntimeError( "The subset {} does not match any of the supported tedlium subsets{} ".format( subset, _RELEASE_CONFIGS[release]["supported_subsets"], ) ) # Get string representation of 'root' in case Path object is passed root = os.fspath(root) basename = os.path.basename(url) archive = os.path.join(root, basename) basename = basename.split(".")[0] self._path = os.path.join(root, folder_in_archive, _RELEASE_CONFIGS[release]["data_path"]) if subset in ["train", "dev", "test"]: self._path = os.path.join(self._path, subset) if download: if not os.path.isdir(self._path): if not os.path.isfile(archive): checksum = _RELEASE_CONFIGS[release]["checksum"] download_url(url, root, hash_value=checksum) extract_archive(archive) # Create list for all samples self._filelist = [] stm_path = os.path.join(self._path, "stm") for file in sorted(os.listdir(stm_path)): if file.endswith(".stm"): stm_path = os.path.join(self._path, "stm", file) with open(stm_path) as f: l = len(f.readlines()) file = file.replace(".stm", "") self._filelist.extend((file, line) for line in range(l)) # Create dict path for later read self._dict_path = os.path.join(root, folder_in_archive, _RELEASE_CONFIGS[release]["dict"]) self._phoneme_dict = None def _load_tedlium_item(self, fileid: str, line: int, path: str) -> Tuple[Tensor, int, str, int, int, int]: """Loads a TEDLIUM dataset sample given a file name and corresponding sentence name. Args: fileid (str): File id to identify both text and audio files corresponding to the sample line (int): Line identifier for the sample inside the text file path (str): Dataset root path Returns: (Tensor, int, str, int, int, int): ``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier)`` """ transcript_path = os.path.join(path, "stm", fileid) with open(transcript_path + ".stm") as f: transcript = f.readlines()[line] talk_id, _, speaker_id, start_time, end_time, identifier, transcript = transcript.split(" ", 6) wave_path = os.path.join(path, "sph", fileid) waveform, sample_rate = self._load_audio(wave_path + self._ext_audio, start_time=start_time, end_time=end_time) return (waveform, sample_rate, transcript, talk_id, speaker_id, identifier) def _load_audio(self, path: str, start_time: float, end_time: float, sample_rate: int = 16000) -> [Tensor, int]: """Default load function used in TEDLIUM dataset, you can overwrite this function to customize functionality and load individual sentences from a full ted audio talk file. Args: path (str): Path to audio file start_time (int): Time in seconds where the sample sentence stars end_time (int): Time in seconds where the sample sentence finishes sample_rate (float, optional): Sampling rate Returns: [Tensor, int]: Audio tensor representation and sample rate """ start_time = int(float(start_time) * sample_rate) end_time = int(float(end_time) * sample_rate) kwargs = {"frame_offset": start_time, "num_frames": end_time - start_time} return torchaudio.load(path, **kwargs) def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: tuple: ``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier)`` """ fileid, line = self._filelist[n] return self._load_tedlium_item(fileid, line, self._path) def __len__(self) -> int: """TEDLIUM dataset custom function overwritting len default behaviour. Returns: int: TEDLIUM dataset length """ return len(self._filelist) @property def phoneme_dict(self): """dict[str, tuple[str]]: Phonemes. Mapping from word to tuple of phonemes. Note that some words have empty phonemes. """ # Read phoneme dictionary if not self._phoneme_dict: self._phoneme_dict = {} with open(self._dict_path, "r", encoding="utf-8") as f: for line in f.readlines(): content = line.strip().split() self._phoneme_dict[content[0]] = tuple(content[1:]) # content[1:] can be empty list return self._phoneme_dict.copy()
import hashlib import logging import os import tarfile import urllib import urllib.request import zipfile from typing import Any, Iterable, List, Optional from torch.utils.model_zoo import tqdm def stream_url(url: str, start_byte: Optional[int] = None, block_size: int = 32 * 1024, progress_bar: bool = True) -> Iterable: """Stream url by chunk Args: url (str): Url. start_byte (int or None, optional): Start streaming at that point (Default: ``None``). block_size (int, optional): Size of chunks to stream (Default: ``32 * 1024``). progress_bar (bool, optional): Display a progress bar (Default: ``True``). """ # If we already have the whole file, there is no need to download it again req = urllib.request.Request(url, method="HEAD") with urllib.request.urlopen(req) as response: url_size = int(response.info().get("Content-Length", -1)) if url_size == start_byte: return req = urllib.request.Request(url) if start_byte: req.headers["Range"] = "bytes={}-".format(start_byte) with urllib.request.urlopen(req) as upointer, tqdm( unit="B", unit_scale=True, unit_divisor=1024, total=url_size, disable=not progress_bar, ) as pbar: num_bytes = 0 while True: chunk = upointer.read(block_size) if not chunk: break yield chunk num_bytes += len(chunk) pbar.update(len(chunk)) def download_url(url: str, download_folder: str, filename: Optional[str] = None, hash_value: Optional[str] = None, hash_type: str = "sha256", progress_bar: bool = True, resume: bool = False) -> None: """Download file to disk. Args: url (str): Url. download_folder (str): Folder to download file. filename (str or None, optional): Name of downloaded file. If None, it is inferred from the url (Default: ``None``). hash_value (str or None, optional): Hash for url (Default: ``None``). hash_type (str, optional): Hash type, among "sha256" and "md5" (Default: ``"sha256"``). progress_bar (bool, optional): Display a progress bar (Default: ``True``). resume (bool, optional): Enable resuming download (Default: ``False``). """ req = urllib.request.Request(url, method="HEAD") req_info = urllib.request.urlopen(req).info() # Detect filename filename = filename or req_info.get_filename() or os.path.basename(url) filepath = os.path.join(download_folder, filename) if resume and os.path.exists(filepath): mode = "ab" local_size: Optional[int] = os.path.getsize(filepath) elif not resume and os.path.exists(filepath): raise RuntimeError( "{} already exists. Delete the file manually and retry.".format(filepath) ) else: mode = "wb" local_size = None if hash_value and local_size == int(req_info.get("Content-Length", -1)): with open(filepath, "rb") as file_obj: if validate_file(file_obj, hash_value, hash_type): return raise RuntimeError( "The hash of {} does not match. Delete the file manually and retry.".format( filepath ) ) with open(filepath, mode) as fpointer: for chunk in stream_url(url, start_byte=local_size, progress_bar=progress_bar): fpointer.write(chunk) with open(filepath, "rb") as file_obj: if hash_value and not validate_file(file_obj, hash_value, hash_type): raise RuntimeError( "The hash of {} does not match. Delete the file manually and retry.".format( filepath ) ) def validate_file(file_obj: Any, hash_value: str, hash_type: str = "sha256") -> bool: """Validate a given file object with its hash. Args: file_obj: File object to read from. hash_value (str): Hash for url. hash_type (str, optional): Hash type, among "sha256" and "md5" (Default: ``"sha256"``). Returns: bool: return True if its a valid file, else False. """ if hash_type == "sha256": hash_func = hashlib.sha256() elif hash_type == "md5": hash_func = hashlib.md5() else: raise ValueError while True: # Read by chunk to avoid filling memory chunk = file_obj.read(1024 ** 2) if not chunk: break hash_func.update(chunk) return hash_func.hexdigest() == hash_value def extract_archive(from_path: str, to_path: Optional[str] = None, overwrite: bool = False) -> List[str]: """Extract archive. Args: from_path (str): the path of the archive. to_path (str or None, optional): the root path of the extraced files (directory of from_path) (Default: ``None``) overwrite (bool, optional): overwrite existing files (Default: ``False``) Returns: List[str]: List of paths to extracted files even if not overwritten. Examples: >>> url = 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz' >>> from_path = './validation.tar.gz' >>> to_path = './' >>> torchaudio.datasets.utils.download_from_url(url, from_path) >>> torchaudio.datasets.utils.extract_archive(from_path, to_path) """ if to_path is None: to_path = os.path.dirname(from_path) try: with tarfile.open(from_path, "r") as tar: logging.info("Opened tar file {}.".format(from_path)) files = [] for file_ in tar: # type: Any file_path = os.path.join(to_path, file_.name) if file_.isfile(): files.append(file_path) if os.path.exists(file_path): logging.info("{} already extracted.".format(file_path)) if not overwrite: continue tar.extract(file_, to_path) return files except tarfile.ReadError: pass try: with zipfile.ZipFile(from_path, "r") as zfile: logging.info("Opened zip file {}.".format(from_path)) files = zfile.namelist() for file_ in files: file_path = os.path.join(to_path, file_) if os.path.exists(file_path): logging.info("{} already extracted.".format(file_path)) if not overwrite: continue zfile.extract(file_, to_path) return files except zipfile.BadZipFile: pass raise NotImplementedError("We currently only support tar.gz, tgz, and zip achives.")
import os import csv from typing import Tuple, Union from pathlib import Path import torchaudio from torchaudio.datasets.utils import download_url, extract_archive from torch import Tensor from torch.utils.data import Dataset _RELEASE_CONFIGS = { "release1": { "folder_in_archive": "wavs", "url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2", "checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5", } } class LJSPEECH(Dataset): """Create a Dataset for LJSpeech-1.1. Args: root (str or Path): Path to the directory where the dataset is found or downloaded. url (str, optional): The URL to download the dataset from. (default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``) folder_in_archive (str, optional): The top-level directory of the dataset. (default: ``"wavs"``) download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``). """ def __init__(self, root: Union[str, Path], url: str = _RELEASE_CONFIGS["release1"]["url"], folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"], download: bool = False) -> None: self._parse_filesystem(root, url, folder_in_archive, download) def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None: root = Path(root) basename = os.path.basename(url) archive = root / basename basename = Path(basename.split(".tar.bz2")[0]) folder_in_archive = basename / folder_in_archive self._path = root / folder_in_archive self._metadata_path = root / basename / 'metadata.csv' if download: if not os.path.isdir(self._path): if not os.path.isfile(archive): checksum = _RELEASE_CONFIGS["release1"]["checksum"] download_url(url, root, hash_value=checksum) extract_archive(archive) with open(self._metadata_path, "r", newline='') as metadata: flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE) self._flist = list(flist) def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: (Tensor, int, str, str): ``(waveform, sample_rate, transcript, normalized_transcript)`` """ line = self._flist[n] fileid, transcript, normalized_transcript = line fileid_audio = self._path / (fileid + ".wav") # Load audio waveform, sample_rate = torchaudio.load(fileid_audio) return ( waveform, sample_rate, transcript, normalized_transcript, ) def __len__(self) -> int: return len(self._flist)
import os from pathlib import Path from typing import List, Tuple, Union from torch import Tensor from torch.utils.data import Dataset import torchaudio from torchaudio.datasets.utils import ( download_url, extract_archive, ) _RELEASE_CONFIGS = { "release1": { "folder_in_archive": "waves_yesno", "url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz", "checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73", } } class YESNO(Dataset): """Create a Dataset for YesNo. Args: root (str or Path): Path to the directory where the dataset is found or downloaded. url (str, optional): The URL to download the dataset from. (default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``) folder_in_archive (str, optional): The top-level directory of the dataset. (default: ``"waves_yesno"``) download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``). """ def __init__( self, root: Union[str, Path], url: str = _RELEASE_CONFIGS["release1"]["url"], folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"], download: bool = False ) -> None: self._parse_filesystem(root, url, folder_in_archive, download) def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None: root = Path(root) archive = os.path.basename(url) archive = root / archive self._path = root / folder_in_archive if download: if not os.path.isdir(self._path): if not os.path.isfile(archive): checksum = _RELEASE_CONFIGS["release1"]["checksum"] download_url(url, root, hash_value=checksum) extract_archive(archive) if not os.path.isdir(self._path): raise RuntimeError( "Dataset not found. Please use `download=True` to download it." ) self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav")) def _load_item(self, fileid: str, path: str): labels = [int(c) for c in fileid.split("_")] file_audio = os.path.join(path, fileid + ".wav") waveform, sample_rate = torchaudio.load(file_audio) return waveform, sample_rate, labels def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: (Tensor, int, List[int]): ``(waveform, sample_rate, labels)`` """ fileid = self._walker[n] item = self._load_item(fileid, self._path) return item def __len__(self) -> int: return len(self._walker)
import os from typing import Tuple, Union from pathlib import Path import torchaudio from torch import Tensor from torch.utils.data import Dataset from torchaudio.datasets.utils import ( download_url, extract_archive, ) URL = "train-clean-100" FOLDER_IN_ARCHIVE = "LibriSpeech" _CHECKSUMS = { "http://www.openslr.org/resources/12/dev-clean.tar.gz": "76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3", "http://www.openslr.org/resources/12/dev-other.tar.gz": "12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365", "http://www.openslr.org/resources/12/test-clean.tar.gz": "39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23", "http://www.openslr.org/resources/12/test-other.tar.gz": "d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29", "http://www.openslr.org/resources/12/train-clean-100.tar.gz": "d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2", "http://www.openslr.org/resources/12/train-clean-360.tar.gz": "146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf", "http://www.openslr.org/resources/12/train-other-500.tar.gz": "ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2" } def load_librispeech_item(fileid: str, path: str, ext_audio: str, ext_txt: str) -> Tuple[Tensor, int, str, int, int, int]: speaker_id, chapter_id, utterance_id = fileid.split("-") file_text = speaker_id + "-" + chapter_id + ext_txt file_text = os.path.join(path, speaker_id, chapter_id, file_text) fileid_audio = speaker_id + "-" + chapter_id + "-" + utterance_id file_audio = fileid_audio + ext_audio file_audio = os.path.join(path, speaker_id, chapter_id, file_audio) # Load audio waveform, sample_rate = torchaudio.load(file_audio) # Load text with open(file_text) as ft: for line in ft: fileid_text, transcript = line.strip().split(" ", 1) if fileid_audio == fileid_text: break else: # Translation not found raise FileNotFoundError("Translation not found for " + fileid_audio) return ( waveform, sample_rate, transcript, int(speaker_id), int(chapter_id), int(utterance_id), ) class LIBRISPEECH(Dataset): """Create a Dataset for LibriSpeech. Args: root (str or Path): Path to the directory where the dataset is found or downloaded. url (str, optional): The URL to download the dataset from, or the type of the dataset to dowload. Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``, ``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and ``"train-other-500"``. (default: ``"train-clean-100"``) folder_in_archive (str, optional): The top-level directory of the dataset. (default: ``"LibriSpeech"``) download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``). """ _ext_txt = ".trans.txt" _ext_audio = ".flac" def __init__(self, root: Union[str, Path], url: str = URL, folder_in_archive: str = FOLDER_IN_ARCHIVE, download: bool = False) -> None: if url in [ "dev-clean", "dev-other", "test-clean", "test-other", "train-clean-100", "train-clean-360", "train-other-500", ]: ext_archive = ".tar.gz" base_url = "http://www.openslr.org/resources/12/" url = os.path.join(base_url, url + ext_archive) # Get string representation of 'root' in case Path object is passed root = os.fspath(root) basename = os.path.basename(url) archive = os.path.join(root, basename) basename = basename.split(".")[0] folder_in_archive = os.path.join(folder_in_archive, basename) self._path = os.path.join(root, folder_in_archive) if download: if not os.path.isdir(self._path): if not os.path.isfile(archive): checksum = _CHECKSUMS.get(url, None) download_url(url, root, hash_value=checksum) extract_archive(archive) self._walker = sorted(str(p.stem) for p in Path(self._path).glob('*/*/*' + self._ext_audio)) def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: (Tensor, int, str, int, int, int): ``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)`` """ fileid = self._walker[n] return load_librispeech_item(fileid, self._path, self._ext_audio, self._ext_txt) def __len__(self) -> int: return len(self._walker)
from pathlib import Path from typing import Union, Tuple, List import torch from torch.utils.data import Dataset import torchaudio SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]] class LibriMix(Dataset): r"""Create the LibriMix dataset. Args: root (str or Path): The path to the directory where the directory ``Libri2Mix`` or ``Libri3Mix`` is stored. subset (str, optional): The subset to use. Options: [``train-360`, ``train-100``, ``dev``, and ``test``] (Default: ``train-360``). num_speakers (int, optional): The number of speakers, which determines the directories to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect N source audios. (Default: 2) sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines which subdirectory the audio are fetched. If any of the audio has a different sample rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000) task (str, optional): the task of LibriMix. Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``] (Default: ``sep_clean``) Note: The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix """ def __init__( self, root: Union[str, Path], subset: str = "train-360", num_speakers: int = 2, sample_rate: int = 8000, task: str = "sep_clean", ): self.root = Path(root) / f"Libri{num_speakers}Mix" if sample_rate == 8000: self.root = self.root / "wav8k/min" / subset elif sample_rate == 16000: self.root = self.root / "wav16k/min" / subset else: raise ValueError( f"Unsupported sample rate. Found {sample_rate}." ) self.sample_rate = sample_rate self.task = task self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve() self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)] self.files = [p.name for p in self.mix_dir.glob("*wav")] self.files.sort() def _load_audio(self, path) -> torch.Tensor: waveform, sample_rate = torchaudio.load(path) if sample_rate != self.sample_rate: raise ValueError( f"The dataset contains audio file of sample rate {sample_rate}, " f"but the requested sample rate is {self.sample_rate}." ) return waveform def _load_sample(self, filename) -> SampleType: mixed = self._load_audio(str(self.mix_dir / filename)) srcs = [] for i, dir_ in enumerate(self.src_dirs): src = self._load_audio(str(dir_ / filename)) if mixed.shape != src.shape: raise ValueError( f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}" ) srcs.append(src) return self.sample_rate, mixed, srcs def __len__(self) -> int: return len(self.files) def __getitem__(self, key: int) -> SampleType: """Load the n-th sample from the dataset. Args: key (int): The index of the sample to be loaded Returns: (int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)`` """ return self._load_sample(self.files[key])
import os from typing import Tuple from torch import Tensor from torch.utils.data import Dataset import torchaudio from torchaudio.datasets.utils import ( download_url, extract_archive, ) URL = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip" _CHECKSUMS = { "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip": "8a6ba2946b36fcbef0212cad601f4bfa" } SampleType = Tuple[Tensor, int, str, str, str] class VCTK_092(Dataset): """Create VCTK 0.92 Dataset Args: root (str): Root directory where the dataset's top level directory is found. mic_id (str, optional): Microphone ID. Either ``"mic1"`` or ``"mic2"``. (default: ``"mic2"``) download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``). url (str, optional): The URL to download the dataset from. (default: ``"https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip"``) audio_ext (str, optional): Custom audio extension if dataset is converted to non-default audio format. Note: * All the speeches from speaker ``p315`` will be skipped due to the lack of the corresponding text files. * All the speeches from ``p280`` will be skipped for ``mic_id="mic2"`` due to the lack of the audio files. * Some of the speeches from speaker ``p362`` will be skipped due to the lack of the audio files. * See Also: https://datashare.is.ed.ac.uk/handle/10283/3443 """ def __init__( self, root: str, mic_id: str = "mic2", download: bool = False, url: str = URL, audio_ext=".flac", ): if mic_id not in ["mic1", "mic2"]: raise RuntimeError( f'`mic_id` has to be either "mic1" or "mic2". Found: {mic_id}' ) archive = os.path.join(root, "VCTK-Corpus-0.92.zip") self._path = os.path.join(root, "VCTK-Corpus-0.92") self._txt_dir = os.path.join(self._path, "txt") self._audio_dir = os.path.join(self._path, "wav48_silence_trimmed") self._mic_id = mic_id self._audio_ext = audio_ext if download: if not os.path.isdir(self._path): if not os.path.isfile(archive): checksum = _CHECKSUMS.get(url, None) download_url(url, root, hash_value=checksum, hash_type="md5") extract_archive(archive, self._path) if not os.path.isdir(self._path): raise RuntimeError( "Dataset not found. Please use `download=True` to download it." ) # Extracting speaker IDs from the folder structure self._speaker_ids = sorted(os.listdir(self._txt_dir)) self._sample_ids = [] """ Due to some insufficient data complexity in the 0.92 version of this dataset, we start traversing the audio folder structure in accordance with the text folder. As some of the audio files are missing of either ``mic_1`` or ``mic_2`` but the text is present for the same, we first check for the existence of the audio file before adding it to the ``sample_ids`` list. Once the ``audio_ids`` are loaded into memory we can quickly access the list for different parameters required by the user. """ for speaker_id in self._speaker_ids: if speaker_id == "p280" and mic_id == "mic2": continue utterance_dir = os.path.join(self._txt_dir, speaker_id) for utterance_file in sorted( f for f in os.listdir(utterance_dir) if f.endswith(".txt") ): utterance_id = os.path.splitext(utterance_file)[0] audio_path_mic = os.path.join( self._audio_dir, speaker_id, f"{utterance_id}_{mic_id}{self._audio_ext}", ) if speaker_id == "p362" and not os.path.isfile(audio_path_mic): continue self._sample_ids.append(utterance_id.split("_")) def _load_text(self, file_path) -> str: with open(file_path) as file_path: return file_path.readlines()[0] def _load_audio(self, file_path) -> Tuple[Tensor, int]: return torchaudio.load(file_path) def _load_sample(self, speaker_id: str, utterance_id: str, mic_id: str) -> SampleType: transcript_path = os.path.join( self._txt_dir, speaker_id, f"{speaker_id}_{utterance_id}.txt" ) audio_path = os.path.join( self._audio_dir, speaker_id, f"{speaker_id}_{utterance_id}_{mic_id}{self._audio_ext}", ) # Reading text transcript = self._load_text(transcript_path) # Reading FLAC waveform, sample_rate = self._load_audio(audio_path) return (waveform, sample_rate, transcript, speaker_id, utterance_id) def __getitem__(self, n: int) -> SampleType: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: (Tensor, int, str, str, str): ``(waveform, sample_rate, transcript, speaker_id, utterance_id)`` """ speaker_id, utterance_id = self._sample_ids[n] return self._load_sample(speaker_id, utterance_id, self._mic_id) def __len__(self) -> int: return len(self._sample_ids)
from ._wav2vec2.impl import ( Wav2Vec2Bundle, Wav2Vec2ASRBundle, WAV2VEC2_BASE, WAV2VEC2_LARGE, WAV2VEC2_LARGE_LV60K, WAV2VEC2_ASR_BASE_10M, WAV2VEC2_ASR_BASE_100H, WAV2VEC2_ASR_BASE_960H, WAV2VEC2_ASR_LARGE_10M, WAV2VEC2_ASR_LARGE_100H, WAV2VEC2_ASR_LARGE_960H, WAV2VEC2_ASR_LARGE_LV60K_10M, WAV2VEC2_ASR_LARGE_LV60K_100H, WAV2VEC2_ASR_LARGE_LV60K_960H, WAV2VEC2_XLSR53, VOXPOPULI_ASR_BASE_10K_EN, VOXPOPULI_ASR_BASE_10K_ES, VOXPOPULI_ASR_BASE_10K_DE, VOXPOPULI_ASR_BASE_10K_FR, VOXPOPULI_ASR_BASE_10K_IT, HUBERT_BASE, HUBERT_LARGE, HUBERT_XLARGE, HUBERT_ASR_LARGE, HUBERT_ASR_XLARGE, ) from ._tts import ( Tacotron2TTSBundle, TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH, TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH, TACOTRON2_WAVERNN_CHAR_LJSPEECH, TACOTRON2_WAVERNN_PHONE_LJSPEECH, ) __all__ = [ 'Wav2Vec2Bundle', 'Wav2Vec2ASRBundle', 'WAV2VEC2_BASE', 'WAV2VEC2_LARGE', 'WAV2VEC2_LARGE_LV60K', 'WAV2VEC2_ASR_BASE_10M', 'WAV2VEC2_ASR_BASE_100H', 'WAV2VEC2_ASR_BASE_960H', 'WAV2VEC2_ASR_LARGE_10M', 'WAV2VEC2_ASR_LARGE_100H', 'WAV2VEC2_ASR_LARGE_960H', 'WAV2VEC2_ASR_LARGE_LV60K_10M', 'WAV2VEC2_ASR_LARGE_LV60K_100H', 'WAV2VEC2_ASR_LARGE_LV60K_960H', 'WAV2VEC2_XLSR53', 'VOXPOPULI_ASR_BASE_10K_EN', 'VOXPOPULI_ASR_BASE_10K_ES', 'VOXPOPULI_ASR_BASE_10K_DE', 'VOXPOPULI_ASR_BASE_10K_FR', 'VOXPOPULI_ASR_BASE_10K_IT', 'HUBERT_BASE', 'HUBERT_LARGE', 'HUBERT_XLARGE', 'HUBERT_ASR_LARGE', 'HUBERT_ASR_XLARGE', 'Tacotron2TTSBundle', 'TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH', 'TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH', 'TACOTRON2_WAVERNN_CHAR_LJSPEECH', 'TACOTRON2_WAVERNN_PHONE_LJSPEECH', ]
def _get_en_labels(): return ( '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z', ) def _get_de_labels(): return ( "|", "e", "n", "i", "r", "s", "t", "a", "d", "h", "u", "l", "g", "c", "m", "o", "b", "w", "f", "k", "z", "p", "v", "ü", "ä", "ö", "j", "ß", "y", "x", "q", ) def _get_vp_en_labels(): return ( "|", "e", "t", "o", "i", "a", "n", "s", "r", "h", "l", "d", "c", "u", "m", "p", "f", "g", "w", "y", "b", "v", "k", "x", "j", "q", "z", ) def _get_es_labels(): return ( "|", "e", "a", "o", "s", "n", "r", "i", "l", "d", "c", "t", "u", "p", "m", "b", "q", "y", "g", "v", "h", "ó", "f", "í", "á", "j", "z", "ñ", "é", "x", "ú", "k", "w", "ü", ) def _get_fr_labels(): return ( "|", "e", "s", "n", "i", "t", "r", "a", "o", "u", "l", "d", "c", "p", "m", "é", "v", "q", "f", "g", "b", "h", "x", "à", "j", "è", "y", "ê", "z", "ô", "k", "ç", "œ", "û", "ù", "î", "â", "w", "ï", "ë", "ü", "æ", ) def _get_it_labels(): return ( "|", "e", "i", "a", "o", "n", "t", "r", "l", "s", "c", "d", "u", "p", "m", "g", "v", "h", "z", "f", "b", "q", "à", "è", "ù", "é", "ò", "ì", "k", "y", "x", "w", "j", "ó", "í", "ï", )
from dataclasses import dataclass from typing import Dict, Tuple, Any import torch from torchaudio._internal import load_state_dict_from_url from torchaudio.models import wav2vec2_model, Wav2Vec2Model from . import utils __all__ = [] @dataclass class Wav2Vec2Bundle: """torchaudio.pipelines.Wav2Vec2Bundle() Data class that bundles associated information to use pretrained Wav2Vec2Model. This class provides interfaces for instantiating the pretrained model along with the information necessary to retrieve pretrained weights and additional data to be used with the model. Torchaudio library instantiates objects of this class, each of which represents a different pretrained model. Client code should access pretrained models via these instances. Please see below for the usage and the available values. Example - Feature Extraction >>> import torchaudio >>> >>> bundle = torchaudio.pipelines.HUBERT_BASE >>> >>> # Build the model and load pretrained weight. >>> model = bundle.get_model() Downloading: 100%|███████████████████████████████| 360M/360M [00:06<00:00, 60.6MB/s] >>> >>> # Resample audio to the expected sampling rate >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate) >>> >>> # Extract acoustic features >>> features, _ = model.extract_features(waveform) """ # noqa: E501 _path: str _params: Dict[str, Any] _sample_rate: float @property def sample_rate(self) -> float: """Sample rate of the audio that the model is trained on. :type: float """ return self._sample_rate def _get_state_dict(self, dl_kwargs): url = f'https://download.pytorch.org/torchaudio/models/{self._path}' dl_kwargs = {} if dl_kwargs is None else dl_kwargs state_dict = load_state_dict_from_url(url, **dl_kwargs) return state_dict def get_model(self, *, dl_kwargs=None) -> Wav2Vec2Model: # Overriding the signature so that the return type is correct on Sphinx """get_model(self, *, dl_kwargs=None) -> torchaudio.models.Wav2Vec2Model Construct the model and load the pretrained weight. The weight file is downloaded from the internet and cached with :func:`torch.hub.load_state_dict_from_url` Args: dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`. """ model = wav2vec2_model(**self._params) model.load_state_dict(self._get_state_dict(dl_kwargs)) model.eval() return model @dataclass class Wav2Vec2ASRBundle(Wav2Vec2Bundle): """torchaudio.pipelines.Wav2Vec2ASRBundle() Data class that bundles associated information to use pretrained Wav2Vec2Model. This class provides interfaces for instantiating the pretrained model along with the information necessary to retrieve pretrained weights and additional data to be used with the model. Torchaudio library instantiates objects of this class, each of which represents a different pretrained model. Client code should access pretrained models via these instances. Please see below for the usage and the available values. Example - ASR >>> import torchaudio >>> >>> bundle = torchaudio.pipelines.HUBERT_ASR_LARGE >>> >>> # Build the model and load pretrained weight. >>> model = bundle.get_model() Downloading: 100%|███████████████████████████████| 1.18G/1.18G [00:17<00:00, 73.8MB/s] >>> >>> # Check the corresponding labels of the output. >>> labels = bundle.get_labels() >>> print(labels) ('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z') >>> >>> # Resample audio to the expected sampling rate >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate) >>> >>> # Infer the label probability distribution >>> emissions, _ = model(waveform) >>> >>> # Pass emission to decoder >>> # `ctc_decode` is for illustration purpose only >>> transcripts = ctc_decode(emissions, labels) """ # noqa: E501 _labels: Tuple[str] _remove_aux_axis: Tuple[int] = (1, 2, 3) def get_labels( self, *, blank: str = '-', ) -> Tuple[str]: """The output class labels (only applicable to fine-tuned bundles) The first is blank token, and it is customizable. Args: blank (str, optional): Blank token. (default: ``'-'``) Returns: Tuple[str]: For models fine-tuned on ASR, returns the tuple of strings representing the output class labels. Example >>> import torchaudio >>> torchaudio.models.HUBERT_ASR_LARGE.get_labels() ('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z') """ # noqa: E501 return (blank, *self._labels) def _get_state_dict(self, dl_kwargs): state_dict = super()._get_state_dict(dl_kwargs) if self._remove_aux_axis: # Remove the seemingly unnecessary axis # For ASR task, the pretrained weights originated from fairseq has unrelated dimensions at index 1, 2, 3 # It's originated from the Dictionary implementation of fairseq, which was intended for NLP tasks, # but not used during the ASR training. # https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/data/dictionary.py#L21-L37 # https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/criterions/ctc.py#L126-L129 # # Also, some pretrained weights originated from voxpopuli has an extra dimensions that almost never used and # that resembles mistake. # The label `1` shows up in the training dataset of German (1 out of 16M), # English (1 / 28M), Spanish (1 / 9.4M), Romanian (1 / 4.7M) and Polish (6 / 5.8M) for key in ['aux.weight', 'aux.bias']: t = state_dict[key] state_dict[key] = torch.stack([t[i] for i in range(t.size(0)) if i not in self._remove_aux_axis]) return state_dict WAV2VEC2_BASE = Wav2Vec2Bundle( _path='wav2vec2_fairseq_base_ls960.pth', _params={ 'extractor_mode': 'group_norm', 'extractor_conv_layer_config': [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], 'extractor_conv_bias': False, 'encoder_embed_dim': 768, 'encoder_projection_dropout': 0.1, 'encoder_pos_conv_kernel': 128, 'encoder_pos_conv_groups': 16, 'encoder_num_layers': 12, 'encoder_num_heads': 12, 'encoder_attention_dropout': 0.1, 'encoder_ff_interm_features': 3072, 'encoder_ff_interm_dropout': 0.0, 'encoder_dropout': 0.1, 'encoder_layer_norm_first': False, 'encoder_layer_drop': 0.05, "aux_num_out": None, }, _sample_rate=16000, ) WAV2VEC2_BASE.__doc__ = """wav2vec 2.0 model with "Base" configuration. Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"). Not fine-tuned. Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. """ # noqa: E501 WAV2VEC2_ASR_BASE_10M = Wav2Vec2ASRBundle( _path='wav2vec2_fairseq_base_ls960_asr_ll10m.pth', _params={ 'extractor_mode': 'group_norm', 'extractor_conv_layer_config': [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], 'extractor_conv_bias': False, 'encoder_embed_dim': 768, 'encoder_projection_dropout': 0.1, 'encoder_pos_conv_kernel': 128, 'encoder_pos_conv_groups': 16, 'encoder_num_layers': 12, 'encoder_num_heads': 12, 'encoder_attention_dropout': 0.1, 'encoder_ff_interm_features': 3072, 'encoder_ff_interm_dropout': 0.0, 'encoder_dropout': 0.1, 'encoder_layer_norm_first': False, 'encoder_layer_drop': 0.05, "aux_num_out": 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_BASE_10M.__doc__ = """Build "base" wav2vec2 model with an extra linear module Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and fine-tuned for ASR on 10 minutes of transcribed audio from *Libri-Light* dataset [:footcite:`librilight`] ("train-10min" subset). Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 WAV2VEC2_ASR_BASE_100H = Wav2Vec2ASRBundle( 'wav2vec2_fairseq_base_ls960_asr_ls100.pth', { 'extractor_mode': 'group_norm', 'extractor_conv_layer_config': [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], 'extractor_conv_bias': False, 'encoder_embed_dim': 768, 'encoder_projection_dropout': 0.1, 'encoder_pos_conv_kernel': 128, 'encoder_pos_conv_groups': 16, 'encoder_num_layers': 12, 'encoder_num_heads': 12, 'encoder_attention_dropout': 0.1, 'encoder_ff_interm_features': 3072, 'encoder_ff_interm_dropout': 0.0, 'encoder_dropout': 0.1, 'encoder_layer_norm_first': False, 'encoder_layer_drop': 0.05, "aux_num_out": 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_BASE_100H.__doc__ = """Build "base" wav2vec2 model with an extra linear module Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and fine-tuned for ASR on 100 hours of transcribed audio from "train-clean-100" subset. Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 WAV2VEC2_ASR_BASE_960H = Wav2Vec2ASRBundle( 'wav2vec2_fairseq_base_ls960_asr_ls960.pth', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 768, "encoder_projection_dropout": 0.1, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 12, "encoder_num_heads": 12, "encoder_attention_dropout": 0.1, "encoder_ff_interm_features": 3072, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.1, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.05, "aux_num_out": 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_BASE_960H.__doc__ = """Build "base" wav2vec2 model with an extra linear module Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and fine-tuned for ASR on the same audio with the corresponding transcripts. Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 WAV2VEC2_LARGE = Wav2Vec2Bundle( 'wav2vec2_fairseq_large_ls960.pth', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 1024, "encoder_projection_dropout": 0.1, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 24, "encoder_num_heads": 16, "encoder_attention_dropout": 0.1, "encoder_ff_interm_features": 4096, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.2, "aux_num_out": None, }, _sample_rate=16000, ) WAV2VEC2_LARGE.__doc__ = """Build "large" wav2vec2 model. Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"). Not fine-tuned. Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. """ # noqa: E501 WAV2VEC2_ASR_LARGE_10M = Wav2Vec2ASRBundle( 'wav2vec2_fairseq_large_ls960_asr_ll10m.pth', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 1024, "encoder_projection_dropout": 0.1, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 24, "encoder_num_heads": 16, "encoder_attention_dropout": 0.1, "encoder_ff_interm_features": 4096, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.2, "aux_num_out": 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_10M.__doc__ = """Build "large" wav2vec2 model with an extra linear module Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and fine-tuned for ASR on 10 minutes of transcribed audio from *Libri-Light* dataset [:footcite:`librilight`] ("train-10min" subset). Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 WAV2VEC2_ASR_LARGE_100H = Wav2Vec2ASRBundle( 'wav2vec2_fairseq_large_ls960_asr_ls100.pth', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 1024, "encoder_projection_dropout": 0.1, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 24, "encoder_num_heads": 16, "encoder_attention_dropout": 0.1, "encoder_ff_interm_features": 4096, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.2, "aux_num_out": 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_100H.__doc__ = """Build "large" wav2vec2 model with an extra linear module Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and fine-tuned for ASR on 100 hours of transcribed audio from the same dataset ("train-clean-100" subset). Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 WAV2VEC2_ASR_LARGE_960H = Wav2Vec2ASRBundle( 'wav2vec2_fairseq_large_ls960_asr_ls960.pth', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 1024, "encoder_projection_dropout": 0.1, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 24, "encoder_num_heads": 16, "encoder_attention_dropout": 0.1, "encoder_ff_interm_features": 4096, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.2, "aux_num_out": 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_960H.__doc__ = """Build "large" wav2vec2 model with an extra linear module Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and fine-tuned for ASR on the same audio with the corresponding transcripts. Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 WAV2VEC2_LARGE_LV60K = Wav2Vec2Bundle( 'wav2vec2_fairseq_large_lv60k.pth', { "extractor_mode": "layer_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": True, "encoder_embed_dim": 1024, "encoder_projection_dropout": 0.1, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 24, "encoder_num_heads": 16, "encoder_attention_dropout": 0.1, "encoder_ff_interm_features": 4096, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.0, "encoder_layer_norm_first": True, "encoder_layer_drop": 0.0, "aux_num_out": None, }, _sample_rate=16000, ) WAV2VEC2_LARGE_LV60K.__doc__ = """Build "large-lv60k" wav2vec2 model. Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset [:footcite:`librilight`]. Not fine-tuned. Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. """ # noqa: E501 WAV2VEC2_ASR_LARGE_LV60K_10M = Wav2Vec2ASRBundle( 'wav2vec2_fairseq_large_lv60k_asr_ll10m.pth', { "extractor_mode": "layer_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": True, "encoder_embed_dim": 1024, "encoder_projection_dropout": 0.1, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 24, "encoder_num_heads": 16, "encoder_attention_dropout": 0.1, "encoder_ff_interm_features": 4096, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.0, "encoder_layer_norm_first": True, "encoder_layer_drop": 0.0, "aux_num_out": 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_LV60K_10M.__doc__ = """Build "large-lv60k" wav2vec2 model with an extra linear module Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset [:footcite:`librilight`], and fine-tuned for ASR on 10 minutes of transcribed audio from the same dataset ("train-10min" subset). Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 WAV2VEC2_ASR_LARGE_LV60K_100H = Wav2Vec2ASRBundle( 'wav2vec2_fairseq_large_lv60k_asr_ls100.pth', { "extractor_mode": "layer_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": True, "encoder_embed_dim": 1024, "encoder_projection_dropout": 0.1, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 24, "encoder_num_heads": 16, "encoder_attention_dropout": 0.1, "encoder_ff_interm_features": 4096, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.0, "encoder_layer_norm_first": True, "encoder_layer_drop": 0.0, "aux_num_out": 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_LV60K_100H.__doc__ = """Build "large-lv60k" wav2vec2 model with an extra linear module Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset [:footcite:`librilight`], and fine-tuned for ASR on 100 hours of transcribed audio from *LibriSpeech* dataset [:footcite:`7178964`] ("train-clean-100" subset). Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 WAV2VEC2_ASR_LARGE_LV60K_960H = Wav2Vec2ASRBundle( 'wav2vec2_fairseq_large_lv60k_asr_ls960.pth', { "extractor_mode": "layer_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": True, "encoder_embed_dim": 1024, "encoder_projection_dropout": 0.1, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 24, "encoder_num_heads": 16, "encoder_attention_dropout": 0.1, "encoder_ff_interm_features": 4096, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.0, "encoder_layer_norm_first": True, "encoder_layer_drop": 0.0, "aux_num_out": 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) WAV2VEC2_ASR_LARGE_LV60K_960H.__doc__ = """Build "large-lv60k" wav2vec2 model with an extra linear module Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* [:footcite:`librilight`] dataset, and fine-tuned for ASR on 960 hours of transcribed audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"). Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 WAV2VEC2_XLSR53 = Wav2Vec2Bundle( 'wav2vec2_fairseq_large_xlsr53.pth', { "extractor_mode": "layer_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": True, "encoder_embed_dim": 1024, "encoder_projection_dropout": 0.0, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 24, "encoder_num_heads": 16, "encoder_attention_dropout": 0.0, "encoder_ff_interm_features": 4096, "encoder_ff_interm_dropout": 0.0, "encoder_dropout": 0.0, "encoder_layer_norm_first": True, "encoder_layer_drop": 0.0, "aux_num_out": None, }, _sample_rate=16000, ) WAV2VEC2_XLSR53.__doc__ = """wav2vec 2.0 model with "Base" configuration. Trained on 56,000 hours of unlabeled audio from multiple datasets ( *Multilingual LibriSpeech* [:footcite:`Pratap_2020`], *CommonVoice* [:footcite:`ardila2020common`] and *BABEL* [:footcite:`Gales2014SpeechRA`]). Not fine-tuned. Originally published by the authors of *Unsupervised Cross-lingual Representation Learning for Speech Recognition* [:footcite:`conneau2020unsupervised`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. """ # noqa: E501 HUBERT_BASE = Wav2Vec2Bundle( 'hubert_fairseq_base_ls960.pth', { 'extractor_mode': 'group_norm', 'extractor_conv_layer_config': [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], 'extractor_conv_bias': False, 'encoder_embed_dim': 768, 'encoder_projection_dropout': 0.1, 'encoder_pos_conv_kernel': 128, 'encoder_pos_conv_groups': 16, 'encoder_num_layers': 12, 'encoder_num_heads': 12, 'encoder_attention_dropout': 0.1, 'encoder_ff_interm_features': 3072, 'encoder_ff_interm_dropout': 0.0, 'encoder_dropout': 0.1, 'encoder_layer_norm_first': False, 'encoder_layer_drop': 0.05, 'aux_num_out': None, }, _sample_rate=16000, ) HUBERT_BASE.__doc__ = """HuBERT model with "Base" configuration. Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"). Not fine-tuned. Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. """ # noqa: E501 HUBERT_LARGE = Wav2Vec2Bundle( 'hubert_fairseq_large_ll60k.pth', { 'extractor_mode': 'layer_norm', 'extractor_conv_layer_config': [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], 'extractor_conv_bias': False, 'encoder_embed_dim': 1024, 'encoder_projection_dropout': 0.0, 'encoder_pos_conv_kernel': 128, 'encoder_pos_conv_groups': 16, 'encoder_num_layers': 24, 'encoder_num_heads': 16, 'encoder_attention_dropout': 0.0, 'encoder_ff_interm_features': 4096, 'encoder_ff_interm_dropout': 0.0, 'encoder_dropout': 0.0, 'encoder_layer_norm_first': True, 'encoder_layer_drop': 0.0, 'aux_num_out': None, }, _sample_rate=16000, ) HUBERT_LARGE.__doc__ = """HuBERT model with "Large" configuration. Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset [:footcite:`librilight`]. Not fine-tuned. Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. """ # noqa: E501 HUBERT_XLARGE = Wav2Vec2Bundle( 'hubert_fairseq_xlarge_ll60k.pth', { 'extractor_mode': 'layer_norm', 'extractor_conv_layer_config': [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], 'extractor_conv_bias': False, 'encoder_embed_dim': 1280, 'encoder_projection_dropout': 0.0, 'encoder_pos_conv_kernel': 128, 'encoder_pos_conv_groups': 16, 'encoder_num_layers': 48, 'encoder_num_heads': 16, 'encoder_attention_dropout': 0.0, 'encoder_ff_interm_features': 5120, 'encoder_ff_interm_dropout': 0.0, 'encoder_dropout': 0.0, 'encoder_layer_norm_first': True, 'encoder_layer_drop': 0.0, 'aux_num_out': None, }, _sample_rate=16000, ) HUBERT_XLARGE.__doc__ = """HuBERT model with "Extra Large" configuration. Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset [:footcite:`librilight`]. Not fine-tuned. Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. """ # noqa: E501 HUBERT_ASR_LARGE = Wav2Vec2ASRBundle( 'hubert_fairseq_large_ll60k_asr_ls960.pth', { 'extractor_mode': 'layer_norm', 'extractor_conv_layer_config': [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], 'extractor_conv_bias': False, 'encoder_embed_dim': 1024, 'encoder_projection_dropout': 0.0, 'encoder_pos_conv_kernel': 128, 'encoder_pos_conv_groups': 16, 'encoder_num_layers': 24, 'encoder_num_heads': 16, 'encoder_attention_dropout': 0.0, 'encoder_ff_interm_features': 4096, 'encoder_ff_interm_dropout': 0.1, 'encoder_dropout': 0.0, 'encoder_layer_norm_first': True, 'encoder_layer_drop': 0.1, 'aux_num_out': 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) HUBERT_ASR_LARGE.__doc__ = """HuBERT model with "Large" configuration. Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset [:footcite:`librilight`], and fine-tuned for ASR on 960 hours of transcribed audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"). Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 HUBERT_ASR_XLARGE = Wav2Vec2ASRBundle( 'hubert_fairseq_xlarge_ll60k_asr_ls960.pth', { 'extractor_mode': 'layer_norm', 'extractor_conv_layer_config': [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], 'extractor_conv_bias': False, 'encoder_embed_dim': 1280, 'encoder_projection_dropout': 0.0, 'encoder_pos_conv_kernel': 128, 'encoder_pos_conv_groups': 16, 'encoder_num_layers': 48, 'encoder_num_heads': 16, 'encoder_attention_dropout': 0.0, 'encoder_ff_interm_features': 5120, 'encoder_ff_interm_dropout': 0.1, 'encoder_dropout': 0.0, 'encoder_layer_norm_first': True, 'encoder_layer_drop': 0.1, 'aux_num_out': 29, }, _labels=utils._get_en_labels(), _sample_rate=16000, ) HUBERT_ASR_XLARGE.__doc__ = """HuBERT model with "Extra Large" configuration. Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset [:footcite:`librilight`], and fine-tuned for ASR on 960 hours of transcribed audio from *LibriSpeech* dataset [:footcite:`7178964`] (the combination of "train-clean-100", "train-clean-360", and "train-other-500"). Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and redistributed with the same license. [`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__, `Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 VOXPOPULI_ASR_BASE_10K_DE = Wav2Vec2ASRBundle( 'wav2vec2_voxpopuli_base_10k_asr_de.pt', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 768, "encoder_projection_dropout": 0.0, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 12, "encoder_num_heads": 12, "encoder_attention_dropout": 0.0, "encoder_ff_interm_features": 3072, "encoder_ff_interm_dropout": 0.1, "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.1, "aux_num_out": 32, }, _labels=utils._get_de_labels(), _sample_rate=16000, _remove_aux_axis=(1, 2, 3, 35), ) VOXPOPULI_ASR_BASE_10K_DE.__doc__ = """wav2vec 2.0 model with "Base" configuration. Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`] ("10k" subset, consisting of 23 languages). Fine-tuned for ASR on 282 hours of transcribed audio from "de" subset. Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and redistributed with the same license. [`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__, `Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 VOXPOPULI_ASR_BASE_10K_EN = Wav2Vec2ASRBundle( 'wav2vec2_voxpopuli_base_10k_asr_en.pt', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 768, "encoder_projection_dropout": 0.0, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 12, "encoder_num_heads": 12, "encoder_attention_dropout": 0.0, "encoder_ff_interm_features": 3072, "encoder_ff_interm_dropout": 0.1, "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.1, "aux_num_out": 28 }, _labels=utils._get_vp_en_labels(), _sample_rate=16000, _remove_aux_axis=(1, 2, 3, 31), ) VOXPOPULI_ASR_BASE_10K_EN.__doc__ = """wav2vec 2.0 model with "Base" configuration. Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`] ("10k" subset, consisting of 23 languages). Fine-tuned for ASR on 543 hours of transcribed audio from "en" subset. Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and redistributed with the same license. [`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__, `Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 VOXPOPULI_ASR_BASE_10K_ES = Wav2Vec2ASRBundle( 'wav2vec2_voxpopuli_base_10k_asr_es.pt', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 768, "encoder_projection_dropout": 0.0, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 12, "encoder_num_heads": 12, "encoder_attention_dropout": 0.0, "encoder_ff_interm_features": 3072, "encoder_ff_interm_dropout": 0.1, "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.1, "aux_num_out": 35 }, _labels=utils._get_es_labels(), _sample_rate=16000, _remove_aux_axis=(1, 2, 3, 35), ) VOXPOPULI_ASR_BASE_10K_ES.__doc__ = """wav2vec 2.0 model with "Base" configuration. Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`] ("10k" subset, consisting of 23 languages). Fine-tuned for ASR on 166 hours of transcribed audio from "es" subset. Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and redistributed with the same license. [`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__, `Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 VOXPOPULI_ASR_BASE_10K_FR = Wav2Vec2ASRBundle( 'wav2vec2_voxpopuli_base_10k_asr_fr.pt', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 768, "encoder_projection_dropout": 0.0, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 12, "encoder_num_heads": 12, "encoder_attention_dropout": 0.0, "encoder_ff_interm_features": 3072, "encoder_ff_interm_dropout": 0.1, "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.1, "aux_num_out": 43 }, _labels=utils._get_fr_labels(), _sample_rate=16000, ) VOXPOPULI_ASR_BASE_10K_FR.__doc__ = """wav2vec 2.0 model with "Base" configuration. Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`] ("10k" subset, consisting of 23 languages). Fine-tuned for ASR on 211 hours of transcribed audio from "fr" subset. Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and redistributed with the same license. [`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__, `Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501 VOXPOPULI_ASR_BASE_10K_IT = Wav2Vec2ASRBundle( 'wav2vec2_voxpopuli_base_10k_asr_it.pt', { "extractor_mode": "group_norm", "extractor_conv_layer_config": [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ], "extractor_conv_bias": False, "encoder_embed_dim": 768, "encoder_projection_dropout": 0.0, "encoder_pos_conv_kernel": 128, "encoder_pos_conv_groups": 16, "encoder_num_layers": 12, "encoder_num_heads": 12, "encoder_attention_dropout": 0.0, "encoder_ff_interm_features": 3072, "encoder_ff_interm_dropout": 0.1, "encoder_dropout": 0.0, "encoder_layer_norm_first": False, "encoder_layer_drop": 0.1, "aux_num_out": 37, }, _labels=utils._get_it_labels(), _sample_rate=16000, _remove_aux_axis=(1, 2, 3), ) VOXPOPULI_ASR_BASE_10K_IT.__doc__ = """wav2vec 2.0 model with "Base" configuration. Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`] ("10k" subset, consisting of 23 languages). Fine-tuned for ASR on 91 hours of transcribed audio from "it" subset. Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and redistributed with the same license. [`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__, `Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__] Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. """ # noqa: E501
from abc import ABC, abstractmethod from typing import Union, List, Tuple, Optional from torch import Tensor from torchaudio.models import Tacotron2 class _TextProcessor(ABC): @property @abstractmethod def tokens(self): """The tokens that the each value in the processed tensor represent. See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage. :type: List[str] """ @abstractmethod def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]: """Encode the given (batch of) texts into numerical tensors See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage. Args: text (str or list of str): The input texts. Returns: (Tensor, Tensor): Tensor: The encoded texts. Shape: `(batch, max length)` Tensor: The valid length of each sample in the batch. Shape: `(batch, )`. """ class _Vocoder(ABC): @property @abstractmethod def sample_rate(self): """The sample rate of the resulting waveform See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage. :type: float """ @abstractmethod def __call__(self, specgrams: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]: """Generate waveform from the given input, such as spectrogram See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage. Args: specgrams (Tensor): The input spectrogram. Shape: `(batch, frequency bins, time)`. The expected shape depends on the implementation. lengths (Tensor, or None, optional): The valid length of each sample in the batch. Shape: `(batch, )`. (Default: `None`) Returns: (Tensor, Optional[Tensor]): Tensor: The generated waveform. Shape: `(batch, max length)` Tensor or None: The valid length of each sample in the batch. Shape: `(batch, )`. """ class Tacotron2TTSBundle(ABC): """Data class that bundles associated information to use pretrained Tacotron2 and vocoder. This class provides interfaces for instantiating the pretrained model along with the information necessary to retrieve pretrained weights and additional data to be used with the model. Torchaudio library instantiates objects of this class, each of which represents a different pretrained model. Client code should access pretrained models via these instances. Please see below for the usage and the available values. Example - Character-based TTS pipeline with Tacotron2 and WaveRNN >>> import torchaudio >>> >>> text = "Hello, T T S !" >>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH >>> >>> # Build processor, Tacotron2 and WaveRNN model >>> processor = bundle.get_text_processor() >>> tacotron2 = bundle.get_tacotron2() Downloading: 100%|███████████████████████████████| 107M/107M [00:01<00:00, 87.9MB/s] >>> vocoder = bundle.get_vocoder() Downloading: 100%|███████████████████████████████| 16.7M/16.7M [00:00<00:00, 78.1MB/s] >>> >>> # Encode text >>> input, lengths = processor(text) >>> >>> # Generate (mel-scale) spectrogram >>> specgram, lengths, _ = tacotron2.infer(input, lengths) >>> >>> # Convert spectrogram to waveform >>> waveforms, lengths = vocoder(specgram, lengths) >>> >>> torchaudio.save('hello-tts.wav', waveforms, vocoder.sample_rate) Example - Phoneme-based TTS pipeline with Tacotron2 and WaveRNN >>> >>> # Note: >>> # This bundle uses pre-trained DeepPhonemizer as >>> # the text pre-processor. >>> # Please install deep-phonemizer. >>> # See https://github.com/as-ideas/DeepPhonemizer >>> # The pretrained weight is automatically downloaded. >>> >>> import torchaudio >>> >>> text = "Hello, TTS!" >>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH >>> >>> # Build processor, Tacotron2 and WaveRNN model >>> processor = bundle.get_text_processor() Downloading: 100%|███████████████████████████████| 63.6M/63.6M [00:04<00:00, 15.3MB/s] >>> tacotron2 = bundle.get_tacotron2() Downloading: 100%|███████████████████████████████| 107M/107M [00:01<00:00, 87.9MB/s] >>> vocoder = bundle.get_vocoder() Downloading: 100%|███████████████████████████████| 16.7M/16.7M [00:00<00:00, 78.1MB/s] >>> >>> # Encode text >>> input, lengths = processor(text) >>> >>> # Generate (mel-scale) spectrogram >>> specgram, lengths, _ = tacotron2.infer(input, lengths) >>> >>> # Convert spectrogram to waveform >>> waveforms, lengths = vocoder(specgram, lengths) >>> >>> torchaudio.save('hello-tts.wav', waveforms, vocoder.sample_rate) """ # Using the inner class so that these interfaces are not directly exposed on # `torchaudio.pipelines`, but still listed in documentation. # The thing is, text processing and vocoder are generic and we do not know what kind of # new text processing and vocoder will be added in the future, so we want to make these # interfaces specific to this Tacotron2TTS pipeline. class TextProcessor(_TextProcessor): """Interface of the text processing part of Tacotron2TTS pipeline See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage. """ class Vocoder(_Vocoder): """Interface of the vocoder part of Tacotron2TTS pipeline See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage. """ @abstractmethod def get_text_processor(self, *, dl_kwargs=None) -> TextProcessor: # Overriding the signature so that the return type is correct on Sphinx """get_text_processor(self, *, dl_kwargs=None) -> torchaudio.pipelines.Tacotron2TTSBundle.TextProcessor Create a text processor For character-based pipeline, this processor splits the input text by character. For phoneme-based pipeline, this processor converts the input text (grapheme) to phonemes. If a pre-trained weight file is necessary, :func:`torch.hub.download_url_to_file` is used to downloaded it. Args: dl_kwargs (dictionary of keyword arguments,): Passed to :func:`torch.hub.download_url_to_file`. Returns: TTSTextProcessor: A callable which takes a string or a list of strings as input and returns Tensor of encoded texts and Tensor of valid lengths. The object also has ``tokens`` property, which allows to recover the tokenized form. Example - Character-based >>> text = [ >>> "Hello World!", >>> "Text-to-speech!", >>> ] >>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH >>> processor = bundle.get_text_processor() >>> input, lengths = processor(text) >>> >>> print(input) tensor([[19, 16, 23, 23, 26, 11, 34, 26, 29, 23, 15, 2, 0, 0, 0], [31, 16, 35, 31, 1, 31, 26, 1, 30, 27, 16, 16, 14, 19, 2]], dtype=torch.int32) >>> >>> print(lengths) tensor([12, 15], dtype=torch.int32) >>> >>> print([processor.tokens[i] for i in input[0, :lengths[0]]]) ['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!'] >>> print([processor.tokens[i] for i in input[1, :lengths[1]]]) ['t', 'e', 'x', 't', '-', 't', 'o', '-', 's', 'p', 'e', 'e', 'c', 'h', '!'] Example - Phoneme-based >>> text = [ >>> "Hello, T T S !", >>> "Text-to-speech!", >>> ] >>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH >>> processor = bundle.get_text_processor() Downloading: 100%|███████████████████████████████| 63.6M/63.6M [00:04<00:00, 15.3MB/s] >>> input, lengths = processor(text) >>> >>> print(input) tensor([[54, 20, 65, 69, 11, 92, 44, 65, 38, 2, 0, 0, 0, 0], [81, 40, 64, 79, 81, 1, 81, 20, 1, 79, 77, 59, 37, 2]], dtype=torch.int32) >>> >>> print(lengths) tensor([10, 14], dtype=torch.int32) >>> >>> print([processor.tokens[i] for i in input[0]]) ['HH', 'AH', 'L', 'OW', ' ', 'W', 'ER', 'L', 'D', '!', '_', '_', '_', '_'] >>> print([processor.tokens[i] for i in input[1]]) ['T', 'EH', 'K', 'S', 'T', '-', 'T', 'AH', '-', 'S', 'P', 'IY', 'CH', '!'] """ @abstractmethod def get_vocoder(self, *, dl_kwargs=None) -> Vocoder: # Overriding the signature so that the return type is correct on Sphinx """get_vocoder(self, *, dl_kwargs=None) -> torchaudio.pipelines.Tacotron2TTSBundle.Vocoder Create a vocoder module, based off of either WaveRNN or GriffinLim. If a pre-trained weight file is necessary, :func:`torch.hub.load_state_dict_from_url` is used to downloaded it. Args: dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`. Returns: Callable[[Tensor, Optional[Tensor]], Tuple[Tensor, Optional[Tensor]]]: A vocoder module, which takes spectrogram Tensor and an optional length Tensor, then returns resulting waveform Tensor and an optional length Tensor. """ @abstractmethod def get_tacotron2(self, *, dl_kwargs=None) -> Tacotron2: # Overriding the signature so that the return type is correct on Sphinx """get_tacotron2(self, *, dl_kwargs=None) -> torchaudio.models.Tacotron2 Create a Tacotron2 model with pre-trained weight. Args: dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`. Returns: Tacotron2: The resulting model. """
from .interface import Tacotron2TTSBundle from .impl import ( TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH, TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH, TACOTRON2_WAVERNN_CHAR_LJSPEECH, TACOTRON2_WAVERNN_PHONE_LJSPEECH, ) __all__ = [ 'Tacotron2TTSBundle', 'TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH', 'TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH', 'TACOTRON2_WAVERNN_CHAR_LJSPEECH', 'TACOTRON2_WAVERNN_PHONE_LJSPEECH', ]
import os import logging import torch from torchaudio._internal import ( download_url_to_file, module_utils as _mod_utils, ) def _get_chars(): return ( '_', '-', '!', "'", '(', ')', ',', '.', ':', ';', '?', ' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', ) def _get_phones(): return ( "_", "-", "!", "'", "(", ")", ",", ".", ":", ";", "?", " ", "AA", "AA0", "AA1", "AA2", "AE", "AE0", "AE1", "AE2", "AH", "AH0", "AH1", "AH2", "AO", "AO0", "AO1", "AO2", "AW", "AW0", "AW1", "AW2", "AY", "AY0", "AY1", "AY2", "B", "CH", "D", "DH", "EH", "EH0", "EH1", "EH2", "ER", "ER0", "ER1", "ER2", "EY", "EY0", "EY1", "EY2", "F", "G", "HH", "IH", "IH0", "IH1", "IH2", "IY", "IY0", "IY1", "IY2", "JH", "K", "L", "M", "N", "NG", "OW", "OW0", "OW1", "OW2", "OY", "OY0", "OY1", "OY2", "P", "R", "S", "SH", "T", "TH", "UH", "UH0", "UH1", "UH2", "UW", "UW0", "UW1", "UW2", "V", "W", "Y", "Z", "ZH" ) def _to_tensor(indices): lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32) values = [torch.tensor(i) for i in indices] values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True) return values, lengths def _load_phonemizer(file, dl_kwargs): if not _mod_utils.is_module_available('dp'): raise RuntimeError('DeepPhonemizer is not installed. Please install it.') from dp.phonemizer import Phonemizer # By default, dp issues DEBUG level log. logger = logging.getLogger('dp') orig_level = logger.level logger.setLevel(logging.INFO) try: url = f'https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}' directory = os.path.join(torch.hub.get_dir(), 'checkpoints') os.makedirs(directory, exist_ok=True) path = os.path.join(directory, file) if not os.path.exists(path): dl_kwargs = {} if dl_kwargs is None else dl_kwargs download_url_to_file(url, path, **dl_kwargs) return Phonemizer.from_checkpoint(path) finally: logger.setLevel(orig_level) def _unnormalize_waveform(waveform: torch.Tensor, bits: int) -> torch.Tensor: r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]""" waveform = torch.clamp(waveform, -1, 1) waveform = (waveform + 1.0) * (2 ** bits - 1) / 2 return torch.clamp(waveform, 0, 2 ** bits - 1).int() def _get_taco_params(n_symbols): return { 'mask_padding': False, 'n_mels': 80, 'n_frames_per_step': 1, 'symbol_embedding_dim': 512, 'encoder_embedding_dim': 512, 'encoder_n_convolution': 3, 'encoder_kernel_size': 5, 'decoder_rnn_dim': 1024, 'decoder_max_step': 2000, 'decoder_dropout': 0.1, 'decoder_early_stopping': True, 'attention_rnn_dim': 1024, 'attention_hidden_dim': 128, 'attention_location_n_filter': 32, 'attention_location_kernel_size': 31, 'attention_dropout': 0.1, 'prenet_dim': 256, 'postnet_n_convolution': 5, 'postnet_kernel_size': 5, 'postnet_embedding_dim': 512, 'gate_threshold': 0.5, 'n_symbol': n_symbols, } def _get_wrnn_params(): return { 'upsample_scales': [5, 5, 11], 'n_classes': 2 ** 8, # n_bits = 8 'hop_length': 275, 'n_res_block': 10, 'n_rnn': 512, 'n_fc': 512, 'kernel_size': 5, 'n_freq': 80, 'n_hidden': 128, 'n_output': 128 }
from dataclasses import dataclass import re from typing import Union, Optional, Dict, Any, Tuple, List import torch from torch import Tensor from torchaudio._internal import load_state_dict_from_url from torchaudio.models import Tacotron2, WaveRNN from torchaudio.functional import mu_law_decoding from torchaudio.transforms import InverseMelScale, GriffinLim from . import utils from .interface import Tacotron2TTSBundle __all__ = [] _BASE_URL = 'https://download.pytorch.org/torchaudio/models' ################################################################################ # Pipeline implementation - Text Processor ################################################################################ class _EnglishCharProcessor(Tacotron2TTSBundle.TextProcessor): def __init__(self): super().__init__() self._tokens = utils._get_chars() self._mapping = {s: i for i, s in enumerate(self._tokens)} @property def tokens(self): return self._tokens def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]: if isinstance(texts, str): texts = [texts] indices = [[self._mapping[c] for c in t.lower() if c in self._mapping] for t in texts] return utils._to_tensor(indices) class _EnglishPhoneProcessor(Tacotron2TTSBundle.TextProcessor): def __init__(self, *, dl_kwargs=None): super().__init__() self._tokens = utils._get_phones() self._mapping = {p: i for i, p in enumerate(self._tokens)} self._phonemizer = utils._load_phonemizer( 'en_us_cmudict_forward.pt', dl_kwargs=dl_kwargs) self._pattern = r"(\[[A-Z]+?\]|[_!'(),.:;? -])" @property def tokens(self): return self._tokens def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]: if isinstance(texts, str): texts = [texts] indices = [] for phones in self._phonemizer(texts, lang='en_us'): # '[F][UW][B][AA][R]!' -> ['F', 'UW', 'B', 'AA', 'R', '!'] ret = [re.sub(r'[\[\]]', '', r) for r in re.findall(self._pattern, phones)] indices.append([self._mapping[p] for p in ret]) return utils._to_tensor(indices) ################################################################################ # Pipeline implementation - Vocoder ################################################################################ class _WaveRNNVocoder(torch.nn.Module, Tacotron2TTSBundle.Vocoder): def __init__( self, model: WaveRNN, min_level_db: Optional[float] = -100 ): super().__init__() self._sample_rate = 22050 self._model = model self._min_level_db = min_level_db @property def sample_rate(self): return self._sample_rate def forward(self, mel_spec, lengths=None): mel_spec = torch.exp(mel_spec) mel_spec = 20 * torch.log10(torch.clamp(mel_spec, min=1e-5)) if self._min_level_db is not None: mel_spec = (self._min_level_db - mel_spec) / self._min_level_db mel_spec = torch.clamp(mel_spec, min=0, max=1) waveform, lengths = self._model.infer(mel_spec, lengths) waveform = utils._unnormalize_waveform(waveform, self._model.n_bits) waveform = mu_law_decoding(waveform, self._model.n_classes) waveform = waveform.squeeze(1) return waveform, lengths class _GriffinLimVocoder(torch.nn.Module, Tacotron2TTSBundle.Vocoder): def __init__(self): super().__init__() self._sample_rate = 22050 self._inv_mel = InverseMelScale( n_stft=(1024 // 2 + 1), n_mels=80, sample_rate=self.sample_rate, f_min=0., f_max=8000., mel_scale="slaney", norm='slaney', ) self._griffin_lim = GriffinLim( n_fft=1024, power=1, hop_length=256, win_length=1024, ) @property def sample_rate(self): return self._sample_rate def forward(self, mel_spec, lengths=None): mel_spec = torch.exp(mel_spec) mel_spec = mel_spec.clone().detach().requires_grad_(True) spec = self._inv_mel(mel_spec) spec = spec.detach().requires_grad_(False) waveforms = self._griffin_lim(spec) return waveforms, lengths ################################################################################ # Bundle classes mixins ################################################################################ class _CharMixin: def get_text_processor(self) -> Tacotron2TTSBundle.TextProcessor: return _EnglishCharProcessor() class _PhoneMixin: def get_text_processor(self, *, dl_kwargs=None) -> Tacotron2TTSBundle.TextProcessor: return _EnglishPhoneProcessor(dl_kwargs=dl_kwargs) @dataclass class _Tacotron2Mixin: _tacotron2_path: str _tacotron2_params: Dict[str, Any] def get_tacotron2(self, *, dl_kwargs=None) -> Tacotron2: model = Tacotron2(**self._tacotron2_params) url = f'{_BASE_URL}/{self._tacotron2_path}' dl_kwargs = {} if dl_kwargs is None else dl_kwargs state_dict = load_state_dict_from_url(url, **dl_kwargs) model.load_state_dict(state_dict) model.eval() return model @dataclass class _WaveRNNMixin: _wavernn_path: Optional[str] _wavernn_params: Optional[Dict[str, Any]] def get_vocoder(self, *, dl_kwargs=None): wavernn = self._get_wavernn(dl_kwargs=dl_kwargs) return _WaveRNNVocoder(wavernn) def _get_wavernn(self, *, dl_kwargs=None): model = WaveRNN(**self._wavernn_params) url = f'{_BASE_URL}/{self._wavernn_path}' dl_kwargs = {} if dl_kwargs is None else dl_kwargs state_dict = load_state_dict_from_url(url, **dl_kwargs) model.load_state_dict(state_dict) model.eval() return model class _GriffinLimMixin: def get_vocoder(self, **_): return _GriffinLimVocoder() ################################################################################ # Bundle classes ################################################################################ @dataclass class _Tacotron2WaveRNNCharBundle(_WaveRNNMixin, _Tacotron2Mixin, _CharMixin, Tacotron2TTSBundle): pass @dataclass class _Tacotron2WaveRNNPhoneBundle(_WaveRNNMixin, _Tacotron2Mixin, _PhoneMixin, Tacotron2TTSBundle): pass @dataclass class _Tacotron2GriffinLimCharBundle(_GriffinLimMixin, _Tacotron2Mixin, _CharMixin, Tacotron2TTSBundle): pass @dataclass class _Tacotron2GriffinLimPhoneBundle(_GriffinLimMixin, _Tacotron2Mixin, _PhoneMixin, Tacotron2TTSBundle): pass ################################################################################ # Instantiate bundle objects ################################################################################ TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH = _Tacotron2GriffinLimCharBundle( _tacotron2_path='tacotron2_english_characters_1500_epochs_ljspeech.pth', _tacotron2_params=utils._get_taco_params(n_symbols=38), ) TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH.__doc__ = ( '''Character-based TTS pipeline with :py:class:`torchaudio.models.Tacotron2` and :py:class:`torchaudio.transforms.GriffinLim`. The text processor encodes the input texts character-by-character. Tacotron2 was trained on *LJSpeech* [:footcite:`ljspeech17`] for 1,500 epochs. You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_tacotron2>`__. The default parameters were used. The vocoder is based on :py:class:`torchaudio.transforms.GriffinLim`. Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage. Example - "Hello world! T T S stands for Text to Speech!" .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH.png :alt: Spectrogram generated by Tacotron2 .. raw:: html <audio controls="controls"> <source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH.wav" type="audio/wav"> Your browser does not support the <code>audio</code> element. </audio> Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH_v2.png :alt: Spectrogram generated by Tacotron2 .. raw:: html <audio controls="controls"> <source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH_v2.wav" type="audio/wav"> Your browser does not support the <code>audio</code> element. </audio> ''') # noqa: E501 TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH = _Tacotron2GriffinLimPhoneBundle( _tacotron2_path='tacotron2_english_phonemes_1500_epochs_ljspeech.pth', _tacotron2_params=utils._get_taco_params(n_symbols=96), ) TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH.__doc__ = ( '''Phoneme-based TTS pipeline with :py:class:`torchaudio.models.Tacotron2` and :py:class:`torchaudio.transforms.GriffinLim`. The text processor encodes the input texts based on phoneme. It uses `DeepPhonemizer <https://github.com/as-ideas/DeepPhonemizer>`__ to convert graphemes to phonemes. The model (*en_us_cmudict_forward*) was trained on `CMUDict <http://www.speech.cs.cmu.edu/cgi-bin/cmudict>`__. Tacotron2 was trained on *LJSpeech* [:footcite:`ljspeech17`] for 1,500 epochs. You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_tacotron2>`__. The text processor is set to the *"english_phonemes"*. The vocoder is based on :py:class:`torchaudio.transforms.GriffinLim`. Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage. Example - "Hello world! T T S stands for Text to Speech!" .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH.png :alt: Spectrogram generated by Tacotron2 .. raw:: html <audio controls="controls"> <source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH.wav" type="audio/wav"> Your browser does not support the <code>audio</code> element. </audio> Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH_v2.png :alt: Spectrogram generated by Tacotron2 .. raw:: html <audio controls="controls"> <source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH_v2.wav" type="audio/wav"> Your browser does not support the <code>audio</code> element. </audio> ''') # noqa: E501 TACOTRON2_WAVERNN_CHAR_LJSPEECH = _Tacotron2WaveRNNCharBundle( _tacotron2_path='tacotron2_english_characters_1500_epochs_wavernn_ljspeech.pth', _tacotron2_params=utils._get_taco_params(n_symbols=38), _wavernn_path='wavernn_10k_epochs_8bits_ljspeech.pth', _wavernn_params=utils._get_wrnn_params(), ) TACOTRON2_WAVERNN_CHAR_LJSPEECH.__doc__ = ( '''Character-based TTS pipeline with :py:class:`torchaudio.models.Tacotron2` and :py:class:`torchaudio.models.WaveRNN`. The text processor encodes the input texts character-by-character. Tacotron2 was trained on *LJSpeech* [:footcite:`ljspeech17`] for 1,500 epochs. You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_tacotron2>`__. The following parameters were used; ``win_length=1100``, ``hop_length=275``, ``n_fft=2048``, ``mel_fmin=40``, and ``mel_fmax=11025``. The vocder is based on :py:class:`torchaudio.models.WaveRNN`. It was trained on 8 bits depth waveform of *LJSpeech* [:footcite:`ljspeech17`] for 10,000 epochs. You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_wavernn>`__. Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage. Example - "Hello world! T T S stands for Text to Speech!" .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH.png :alt: Spectrogram generated by Tacotron2 .. raw:: html <audio controls="controls"> <source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH.wav" type="audio/wav"> Your browser does not support the <code>audio</code> element. </audio> Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH_v2.png :alt: Spectrogram generated by Tacotron2 .. raw:: html <audio controls="controls"> <source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH_v2.wav" type="audio/wav"> Your browser does not support the <code>audio</code> element. </audio> ''') # noqa: E501 TACOTRON2_WAVERNN_PHONE_LJSPEECH = _Tacotron2WaveRNNPhoneBundle( _tacotron2_path='tacotron2_english_phonemes_1500_epochs_wavernn_ljspeech.pth', _tacotron2_params=utils._get_taco_params(n_symbols=96), _wavernn_path='wavernn_10k_epochs_8bits_ljspeech.pth', _wavernn_params=utils._get_wrnn_params(), ) TACOTRON2_WAVERNN_PHONE_LJSPEECH.__doc__ = ( '''Phoneme-based TTS pipeline with :py:class:`torchaudio.models.Tacotron2` and :py:class:`torchaudio.models.WaveRNN`. The text processor encodes the input texts based on phoneme. It uses `DeepPhonemizer <https://github.com/as-ideas/DeepPhonemizer>`__ to convert graphemes to phonemes. The model (*en_us_cmudict_forward*) was trained on `CMUDict <http://www.speech.cs.cmu.edu/cgi-bin/cmudict>`__. Tacotron2 was trained on *LJSpeech* [:footcite:`ljspeech17`] for 1,500 epochs. You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_tacotron2>`__. The following parameters were used; ``win_length=1100``, ``hop_length=275``, ``n_fft=2048``, ``mel_fmin=40``, and ``mel_fmax=11025``. The vocder is based on :py:class:`torchaudio.models.WaveRNN`. It was trained on 8 bits depth waveform of *LJSpeech* [:footcite:`ljspeech17`] for 10,000 epochs. You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_wavernn>`__. Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage. Example - "Hello world! T T S stands for Text to Speech!" .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH.png :alt: Spectrogram generated by Tacotron2 .. raw:: html <audio controls="controls"> <source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH.wav" type="audio/wav"> Your browser does not support the <code>audio</code> element. </audio> Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH_v2.png :alt: Spectrogram generated by Tacotron2 .. raw:: html <audio controls="controls"> <source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH_v2.wav" type="audio/wav"> Your browser does not support the <code>audio</code> element. </audio> ''') # noqa: E501
from . import ( sox_utils, ) from torchaudio._internal import module_utils as _mod_utils if _mod_utils.is_sox_available(): sox_utils.set_verbosity(1)
from typing import List, Dict import torch from torchaudio._internal import module_utils as _mod_utils @_mod_utils.requires_sox() def set_seed(seed: int): """Set libsox's PRNG Args: seed (int): seed value. valid range is int32. See Also: http://sox.sourceforge.net/sox.html """ torch.ops.torchaudio.sox_utils_set_seed(seed) @_mod_utils.requires_sox() def set_verbosity(verbosity: int): """Set libsox's verbosity Args: verbosity (int): Set verbosity level of libsox. * ``1`` failure messages * ``2`` warnings * ``3`` details of processing * ``4``-``6`` increasing levels of debug messages See Also: http://sox.sourceforge.net/sox.html """ torch.ops.torchaudio.sox_utils_set_verbosity(verbosity) @_mod_utils.requires_sox() def set_buffer_size(buffer_size: int): """Set buffer size for sox effect chain Args: buffer_size (int): Set the size in bytes of the buffers used for processing audio. See Also: http://sox.sourceforge.net/sox.html """ torch.ops.torchaudio.sox_utils_set_buffer_size(buffer_size) @_mod_utils.requires_sox() def set_use_threads(use_threads: bool): """Set multithread option for sox effect chain Args: use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing. To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support. See Also: http://sox.sourceforge.net/sox.html """ torch.ops.torchaudio.sox_utils_set_use_threads(use_threads) @_mod_utils.requires_sox() def list_effects() -> Dict[str, str]: """List the available sox effect names Returns: Dict[str, str]: Mapping from ``effect name`` to ``usage`` """ return dict(torch.ops.torchaudio.sox_utils_list_effects()) @_mod_utils.requires_sox() def list_read_formats() -> List[str]: """List the supported audio formats for read Returns: List[str]: List of supported audio formats """ return torch.ops.torchaudio.sox_utils_list_read_formats() @_mod_utils.requires_sox() def list_write_formats() -> List[str]: """List the supported audio formats for write Returns: List[str]: List of supported audio formats """ return torch.ops.torchaudio.sox_utils_list_write_formats() @_mod_utils.requires_sox() def get_buffer_size() -> int: """Get buffer size for sox effect chain Returns: int: size in bytes of buffers used for processing audio. """ return torch.ops.torchaudio.sox_utils_get_buffer_size()
# flake8: noqa from . import utils from .utils import ( list_audio_backends, get_audio_backend, set_audio_backend, ) utils._init_audio_backend()
import os from typing import Tuple, Optional import torch from torchaudio._internal import ( module_utils as _mod_utils, ) import torchaudio from .common import AudioMetaData @_mod_utils.requires_sox() def info( filepath: str, format: Optional[str] = None, ) -> AudioMetaData: """Get signal information of an audio file. Args: filepath (path-like object or file-like object): Source of audio data. When the function is not compiled by TorchScript, (e.g. ``torch.jit.script``), the following types are accepted; * ``path-like``: file path * ``file-like``: Object with ``read(size: int) -> bytes`` method, which returns byte string of at most ``size`` length. When the function is compiled by TorchScript, only ``str`` type is allowed. Note: * When the input type is file-like object, this function cannot get the correct length (``num_samples``) for certain formats, such as ``mp3`` and ``vorbis``. In this case, the value of ``num_samples`` is ``0``. * This argument is intentionally annotated as ``str`` only due to TorchScript compiler compatibility. format (str or None, optional): Override the format detection with the given format. Providing the argument might help when libsox can not infer the format from header or extension, Returns: AudioMetaData: Metadata of the given audio. """ if not torch.jit.is_scripting(): if hasattr(filepath, 'read'): sinfo = torchaudio._torchaudio.get_info_fileobj(filepath, format) return AudioMetaData(*sinfo) filepath = os.fspath(filepath) sinfo = torch.ops.torchaudio.sox_io_get_info(filepath, format) return AudioMetaData(*sinfo) @_mod_utils.requires_sox() def load( filepath: str, frame_offset: int = 0, num_frames: int = -1, normalize: bool = True, channels_first: bool = True, format: Optional[str] = None, ) -> Tuple[torch.Tensor, int]: """Load audio data from file. Note: This function can handle all the codecs that underlying libsox can handle, however it is tested on the following formats; * WAV, AMB * 32-bit floating-point * 32-bit signed integer * 24-bit signed integer * 16-bit signed integer * 8-bit unsigned integer (WAV only) * MP3 * FLAC * OGG/VORBIS * OPUS * SPHERE * AMR-NB To load ``MP3``, ``FLAC``, ``OGG/VORBIS``, ``OPUS`` and other codecs ``libsox`` does not handle natively, your installation of ``torchaudio`` has to be linked to ``libsox`` and corresponding codec libraries such as ``libmad`` or ``libmp3lame`` etc. By default (``normalize=True``, ``channels_first=True``), this function returns Tensor with ``float32`` dtype and the shape of `[channel, time]`. The samples are normalized to fit in the range of ``[-1.0, 1.0]``. When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit signed integer, 24-bit signed integer, and 8-bit unsigned integer, by providing ``normalize=False``, this function can return integer Tensor, where the samples are expressed within the whole range of the corresponding dtype, that is, ``int32`` tensor for 32-bit signed PCM, ``int16`` for 16-bit signed PCM and ``uint8`` for 8-bit unsigned PCM. Since torch does not support ``int24`` dtype, 24-bit signed PCM are converted to ``int32`` tensors. ``normalize`` parameter has no effect on 32-bit floating-point WAV and other formats, such as ``flac`` and ``mp3``. For these formats, this function always returns ``float32`` Tensor with values normalized to ``[-1.0, 1.0]``. Args: filepath (path-like object or file-like object): Source of audio data. When the function is not compiled by TorchScript, (e.g. ``torch.jit.script``), the following types are accepted; * ``path-like``: file path * ``file-like``: Object with ``read(size: int) -> bytes`` method, which returns byte string of at most ``size`` length. When the function is compiled by TorchScript, only ``str`` type is allowed. Note: This argument is intentionally annotated as ``str`` only due to TorchScript compiler compatibility. frame_offset (int): Number of frames to skip before start reading data. num_frames (int, optional): Maximum number of frames to read. ``-1`` reads all the remaining samples, starting from ``frame_offset``. This function may return the less number of frames if there is not enough frames in the given file. normalize (bool, optional): When ``True``, this function always return ``float32``, and sample values are normalized to ``[-1.0, 1.0]``. If input file is integer WAV, giving ``False`` will change the resulting Tensor type to integer type. This argument has no effect for formats other than integer WAV type. channels_first (bool, optional): When True, the returned Tensor has dimension `[channel, time]`. Otherwise, the returned Tensor's dimension is `[time, channel]`. format (str or None, optional): Override the format detection with the given format. Providing the argument might help when libsox can not infer the format from header or extension, Returns: (torch.Tensor, int): Resulting Tensor and sample rate. If the input file has integer wav format and normalization is off, then it has integer type, else ``float32`` type. If ``channels_first=True``, it has `[channel, time]` else `[time, channel]`. """ if not torch.jit.is_scripting(): if hasattr(filepath, 'read'): return torchaudio._torchaudio.load_audio_fileobj( filepath, frame_offset, num_frames, normalize, channels_first, format) filepath = os.fspath(filepath) return torch.ops.torchaudio.sox_io_load_audio_file( filepath, frame_offset, num_frames, normalize, channels_first, format) @_mod_utils.requires_sox() def save( filepath: str, src: torch.Tensor, sample_rate: int, channels_first: bool = True, compression: Optional[float] = None, format: Optional[str] = None, encoding: Optional[str] = None, bits_per_sample: Optional[int] = None, ): """Save audio data to file. Args: filepath (str or pathlib.Path): Path to save file. This function also handles ``pathlib.Path`` objects, but is annotated as ``str`` for TorchScript compiler compatibility. src (torch.Tensor): Audio data to save. must be 2D tensor. sample_rate (int): sampling rate channels_first (bool, optional): If ``True``, the given tensor is interpreted as `[channel, time]`, otherwise `[time, channel]`. compression (float or None, optional): Used for formats other than WAV. This corresponds to ``-C`` option of ``sox`` command. ``"mp3"`` Either bitrate (in ``kbps``) with quality factor, such as ``128.2``, or VBR encoding with quality factor such as ``-4.2``. Default: ``-4.5``. ``"flac"`` Whole number from ``0`` to ``8``. ``8`` is default and highest compression. ``"ogg"``, ``"vorbis"`` Number from ``-1`` to ``10``; ``-1`` is the highest compression and lowest quality. Default: ``3``. See the detail at http://sox.sourceforge.net/soxformat.html. format (str or None, optional): Override the audio format. When ``filepath`` argument is path-like object, audio format is infered from file extension. If file extension is missing or different, you can specify the correct format with this argument. When ``filepath`` argument is file-like object, this argument is required. Valid values are ``"wav"``, ``"mp3"``, ``"ogg"``, ``"vorbis"``, ``"amr-nb"``, ``"amb"``, ``"flac"``, ``"sph"``, ``"gsm"``, and ``"htk"``. encoding (str or None, optional): Changes the encoding for the supported formats. This argument is effective only for supported formats, such as ``"wav"``, ``""amb"`` and ``"sph"``. Valid values are; - ``"PCM_S"`` (signed integer Linear PCM) - ``"PCM_U"`` (unsigned integer Linear PCM) - ``"PCM_F"`` (floating point PCM) - ``"ULAW"`` (mu-law) - ``"ALAW"`` (a-law) Default values If not provided, the default value is picked based on ``format`` and ``bits_per_sample``. ``"wav"``, ``"amb"`` - | If both ``encoding`` and ``bits_per_sample`` are not provided, the ``dtype`` of the | Tensor is used to determine the default value. - ``"PCM_U"`` if dtype is ``uint8`` - ``"PCM_S"`` if dtype is ``int16`` or ``int32`` - ``"PCM_F"`` if dtype is ``float32`` - ``"PCM_U"`` if ``bits_per_sample=8`` - ``"PCM_S"`` otherwise ``"sph"`` format; - the default value is ``"PCM_S"`` bits_per_sample (int or None, optional): Changes the bit depth for the supported formats. When ``format`` is one of ``"wav"``, ``"flac"``, ``"sph"``, or ``"amb"``, you can change the bit depth. Valid values are ``8``, ``16``, ``32`` and ``64``. Default Value; If not provided, the default values are picked based on ``format`` and ``"encoding"``; ``"wav"``, ``"amb"``; - | If both ``encoding`` and ``bits_per_sample`` are not provided, the ``dtype`` of the | Tensor is used. - ``8`` if dtype is ``uint8`` - ``16`` if dtype is ``int16`` - ``32`` if dtype is ``int32`` or ``float32`` - ``8`` if ``encoding`` is ``"PCM_U"``, ``"ULAW"`` or ``"ALAW"`` - ``16`` if ``encoding`` is ``"PCM_S"`` - ``32`` if ``encoding`` is ``"PCM_F"`` ``"flac"`` format; - the default value is ``24`` ``"sph"`` format; - ``16`` if ``encoding`` is ``"PCM_U"``, ``"PCM_S"``, ``"PCM_F"`` or not provided. - ``8`` if ``encoding`` is ``"ULAW"`` or ``"ALAW"`` ``"amb"`` format; - ``8`` if ``encoding`` is ``"PCM_U"``, ``"ULAW"`` or ``"ALAW"`` - ``16`` if ``encoding`` is ``"PCM_S"`` or not provided. - ``32`` if ``encoding`` is ``"PCM_F"`` Supported formats/encodings/bit depth/compression are; ``"wav"``, ``"amb"`` - 32-bit floating-point PCM - 32-bit signed integer PCM - 24-bit signed integer PCM - 16-bit signed integer PCM - 8-bit unsigned integer PCM - 8-bit mu-law - 8-bit a-law Note: Default encoding/bit depth is determined by the dtype of the input Tensor. ``"mp3"`` Fixed bit rate (such as 128kHz) and variable bit rate compression. Default: VBR with high quality. ``"flac"`` - 8-bit - 16-bit - 24-bit (default) ``"ogg"``, ``"vorbis"`` - Different quality level. Default: approx. 112kbps ``"sph"`` - 8-bit signed integer PCM - 16-bit signed integer PCM - 24-bit signed integer PCM - 32-bit signed integer PCM (default) - 8-bit mu-law - 8-bit a-law - 16-bit a-law - 24-bit a-law - 32-bit a-law ``"amr-nb"`` Bitrate ranging from 4.75 kbit/s to 12.2 kbit/s. Default: 4.75 kbit/s ``"gsm"`` Lossy Speech Compression, CPU intensive. ``"htk"`` Uses a default single-channel 16-bit PCM format. Note: To save into formats that ``libsox`` does not handle natively, (such as ``"mp3"``, ``"flac"``, ``"ogg"`` and ``"vorbis"``), your installation of ``torchaudio`` has to be linked to ``libsox`` and corresponding codec libraries such as ``libmad`` or ``libmp3lame`` etc. """ if not torch.jit.is_scripting(): if hasattr(filepath, 'write'): torchaudio._torchaudio.save_audio_fileobj( filepath, src, sample_rate, channels_first, compression, format, encoding, bits_per_sample) return filepath = os.fspath(filepath) torch.ops.torchaudio.sox_io_save_audio_file( filepath, src, sample_rate, channels_first, compression, format, encoding, bits_per_sample)
class AudioMetaData: """Return type of ``torchaudio.info`` function. This class is used by :ref:`"sox_io" backend<sox_io_backend>` and :ref:`"soundfile" backend with the new interface<soundfile_backend>`. :ivar int sample_rate: Sample rate :ivar int num_frames: The number of frames :ivar int num_channels: The number of channels :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats, or when it cannot be accurately inferred. :ivar str encoding: Audio encoding The values encoding can take are one of the following: * ``PCM_S``: Signed integer linear PCM * ``PCM_U``: Unsigned integer linear PCM * ``PCM_F``: Floating point linear PCM * ``FLAC``: Flac, Free Lossless Audio Codec * ``ULAW``: Mu-law * ``ALAW``: A-law * ``MP3`` : MP3, MPEG-1 Audio Layer III * ``VORBIS``: OGG Vorbis * ``AMR_WB``: Adaptive Multi-Rate * ``AMR_NB``: Adaptive Multi-Rate Wideband * ``OPUS``: Opus * ``HTK``: Single channel 16-bit PCM * ``UNKNOWN`` : None of above """ def __init__( self, sample_rate: int, num_frames: int, num_channels: int, bits_per_sample: int, encoding: str, ): self.sample_rate = sample_rate self.num_frames = num_frames self.num_channels = num_channels self.bits_per_sample = bits_per_sample self.encoding = encoding def __str__(self): return ( f"AudioMetaData(" f"sample_rate={self.sample_rate}, " f"num_frames={self.num_frames}, " f"num_channels={self.num_channels}, " f"bits_per_sample={self.bits_per_sample}, " f"encoding={self.encoding}" f")" )
"""Defines utilities for switching audio backends""" import warnings from typing import Optional, List import torchaudio from torchaudio._internal import module_utils as _mod_utils from . import ( no_backend, sox_io_backend, soundfile_backend, ) __all__ = [ 'list_audio_backends', 'get_audio_backend', 'set_audio_backend', ] def list_audio_backends() -> List[str]: """List available backends Returns: List[str]: The list of available backends. """ backends = [] if _mod_utils.is_module_available('soundfile'): backends.append('soundfile') if _mod_utils.is_sox_available(): backends.append('sox_io') return backends def set_audio_backend(backend: Optional[str]): """Set the backend for I/O operation Args: backend (str or None): Name of the backend. One of ``"sox_io"`` or ``"soundfile"`` based on availability of the system. If ``None`` is provided the current backend is unassigned. """ if backend is not None and backend not in list_audio_backends(): raise RuntimeError( f'Backend "{backend}" is not one of ' f'available backends: {list_audio_backends()}.') if backend is None: module = no_backend elif backend == 'sox_io': module = sox_io_backend elif backend == 'soundfile': module = soundfile_backend else: raise NotImplementedError(f'Unexpected backend "{backend}"') for func in ['save', 'load', 'info']: setattr(torchaudio, func, getattr(module, func)) def _init_audio_backend(): backends = list_audio_backends() if 'sox_io' in backends: set_audio_backend('sox_io') elif 'soundfile' in backends: set_audio_backend('soundfile') else: warnings.warn('No audio backend is available.') set_audio_backend(None) def get_audio_backend() -> Optional[str]: """Get the name of the current backend Returns: Optional[str]: The name of the current backend or ``None`` if no backend is assigned. """ if torchaudio.load == no_backend.load: return None if torchaudio.load == sox_io_backend.load: return 'sox_io' if torchaudio.load == soundfile_backend.load: return 'soundfile' raise ValueError('Unknown backend.')
from pathlib import Path from typing import Callable, Optional, Tuple, Union from torch import Tensor def load(filepath: Union[str, Path], out: Optional[Tensor] = None, normalization: Union[bool, float, Callable] = True, channels_first: bool = True, num_frames: int = 0, offset: int = 0, filetype: Optional[str] = None) -> Tuple[Tensor, int]: raise RuntimeError('No audio I/O backend is available.') def save(filepath: str, src: Tensor, sample_rate: int, precision: int = 16, channels_first: bool = True) -> None: raise RuntimeError('No audio I/O backend is available.') def info(filepath: str) -> None: raise RuntimeError('No audio I/O backend is available.')
"""The new soundfile backend which will become default in 0.8.0 onward""" from typing import Tuple, Optional import warnings import torch from torchaudio._internal import module_utils as _mod_utils from .common import AudioMetaData if _mod_utils.is_soundfile_available(): import soundfile # Mapping from soundfile subtype to number of bits per sample. # This is mostly heuristical and the value is set to 0 when it is irrelevant # (lossy formats) or when it can't be inferred. # For ADPCM (and G72X) subtypes, it's hard to infer the bit depth because it's not part of the standard: # According to https://en.wikipedia.org/wiki/Adaptive_differential_pulse-code_modulation#In_telephony, # the default seems to be 8 bits but it can be compressed further to 4 bits. # The dict is inspired from # https://github.com/bastibe/python-soundfile/blob/744efb4b01abc72498a96b09115b42a4cabd85e4/soundfile.py#L66-L94 _SUBTYPE_TO_BITS_PER_SAMPLE = { 'PCM_S8': 8, # Signed 8 bit data 'PCM_16': 16, # Signed 16 bit data 'PCM_24': 24, # Signed 24 bit data 'PCM_32': 32, # Signed 32 bit data 'PCM_U8': 8, # Unsigned 8 bit data (WAV and RAW only) 'FLOAT': 32, # 32 bit float data 'DOUBLE': 64, # 64 bit float data 'ULAW': 8, # U-Law encoded. See https://en.wikipedia.org/wiki/G.711#Types 'ALAW': 8, # A-Law encoded. See https://en.wikipedia.org/wiki/G.711#Types 'IMA_ADPCM': 0, # IMA ADPCM. 'MS_ADPCM': 0, # Microsoft ADPCM. 'GSM610': 0, # GSM 6.10 encoding. (Wikipedia says 1.625 bit depth?? https://en.wikipedia.org/wiki/Full_Rate) 'VOX_ADPCM': 0, # OKI / Dialogix ADPCM 'G721_32': 0, # 32kbs G721 ADPCM encoding. 'G723_24': 0, # 24kbs G723 ADPCM encoding. 'G723_40': 0, # 40kbs G723 ADPCM encoding. 'DWVW_12': 12, # 12 bit Delta Width Variable Word encoding. 'DWVW_16': 16, # 16 bit Delta Width Variable Word encoding. 'DWVW_24': 24, # 24 bit Delta Width Variable Word encoding. 'DWVW_N': 0, # N bit Delta Width Variable Word encoding. 'DPCM_8': 8, # 8 bit differential PCM (XI only) 'DPCM_16': 16, # 16 bit differential PCM (XI only) 'VORBIS': 0, # Xiph Vorbis encoding. (lossy) 'ALAC_16': 16, # Apple Lossless Audio Codec (16 bit). 'ALAC_20': 20, # Apple Lossless Audio Codec (20 bit). 'ALAC_24': 24, # Apple Lossless Audio Codec (24 bit). 'ALAC_32': 32, # Apple Lossless Audio Codec (32 bit). } def _get_bit_depth(subtype): if subtype not in _SUBTYPE_TO_BITS_PER_SAMPLE: warnings.warn( f"The {subtype} subtype is unknown to TorchAudio. As a result, the bits_per_sample " "attribute will be set to 0. If you are seeing this warning, please " "report by opening an issue on github (after checking for existing/closed ones). " "You may otherwise ignore this warning." ) return _SUBTYPE_TO_BITS_PER_SAMPLE.get(subtype, 0) _SUBTYPE_TO_ENCODING = { 'PCM_S8': 'PCM_S', 'PCM_16': 'PCM_S', 'PCM_24': 'PCM_S', 'PCM_32': 'PCM_S', 'PCM_U8': 'PCM_U', 'FLOAT': 'PCM_F', 'DOUBLE': 'PCM_F', 'ULAW': 'ULAW', 'ALAW': 'ALAW', 'VORBIS': 'VORBIS', } def _get_encoding(format: str, subtype: str): if format == 'FLAC': return 'FLAC' return _SUBTYPE_TO_ENCODING.get(subtype, 'UNKNOWN') @_mod_utils.requires_soundfile() def info(filepath: str, format: Optional[str] = None) -> AudioMetaData: """Get signal information of an audio file. Note: ``filepath`` argument is intentionally annotated as ``str`` only, even though it accepts ``pathlib.Path`` object as well. This is for the consistency with ``"sox_io"`` backend, which has a restriction on type annotation due to TorchScript compiler compatiblity. Args: filepath (path-like object or file-like object): Source of audio data. format (str or None, optional): Not used. PySoundFile does not accept format hint. Returns: AudioMetaData: meta data of the given audio. """ sinfo = soundfile.info(filepath) return AudioMetaData( sinfo.samplerate, sinfo.frames, sinfo.channels, bits_per_sample=_get_bit_depth(sinfo.subtype), encoding=_get_encoding(sinfo.format, sinfo.subtype), ) _SUBTYPE2DTYPE = { "PCM_S8": "int8", "PCM_U8": "uint8", "PCM_16": "int16", "PCM_32": "int32", "FLOAT": "float32", "DOUBLE": "float64", } @_mod_utils.requires_soundfile() def load( filepath: str, frame_offset: int = 0, num_frames: int = -1, normalize: bool = True, channels_first: bool = True, format: Optional[str] = None, ) -> Tuple[torch.Tensor, int]: """Load audio data from file. Note: The formats this function can handle depend on the soundfile installation. This function is tested on the following formats; * WAV * 32-bit floating-point * 32-bit signed integer * 16-bit signed integer * 8-bit unsigned integer * FLAC * OGG/VORBIS * SPHERE By default (``normalize=True``, ``channels_first=True``), this function returns Tensor with ``float32`` dtype and the shape of `[channel, time]`. The samples are normalized to fit in the range of ``[-1.0, 1.0]``. When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit signed integer and 8-bit unsigned integer (24-bit signed integer is not supported), by providing ``normalize=False``, this function can return integer Tensor, where the samples are expressed within the whole range of the corresponding dtype, that is, ``int32`` tensor for 32-bit signed PCM, ``int16`` for 16-bit signed PCM and ``uint8`` for 8-bit unsigned PCM. ``normalize`` parameter has no effect on 32-bit floating-point WAV and other formats, such as ``flac`` and ``mp3``. For these formats, this function always returns ``float32`` Tensor with values normalized to ``[-1.0, 1.0]``. Note: ``filepath`` argument is intentionally annotated as ``str`` only, even though it accepts ``pathlib.Path`` object as well. This is for the consistency with ``"sox_io"`` backend, which has a restriction on type annotation due to TorchScript compiler compatiblity. Args: filepath (path-like object or file-like object): Source of audio data. frame_offset (int, optional): Number of frames to skip before start reading data. num_frames (int, optional): Maximum number of frames to read. ``-1`` reads all the remaining samples, starting from ``frame_offset``. This function may return the less number of frames if there is not enough frames in the given file. normalize (bool, optional): When ``True``, this function always return ``float32``, and sample values are normalized to ``[-1.0, 1.0]``. If input file is integer WAV, giving ``False`` will change the resulting Tensor type to integer type. This argument has no effect for formats other than integer WAV type. channels_first (bool, optional): When True, the returned Tensor has dimension `[channel, time]`. Otherwise, the returned Tensor's dimension is `[time, channel]`. format (str or None, optional): Not used. PySoundFile does not accept format hint. Returns: (torch.Tensor, int): Resulting Tensor and sample rate. If the input file has integer wav format and normalization is off, then it has integer type, else ``float32`` type. If ``channels_first=True``, it has `[channel, time]` else `[time, channel]`. """ with soundfile.SoundFile(filepath, "r") as file_: if file_.format != "WAV" or normalize: dtype = "float32" elif file_.subtype not in _SUBTYPE2DTYPE: raise ValueError(f"Unsupported subtype: {file_.subtype}") else: dtype = _SUBTYPE2DTYPE[file_.subtype] frames = file_._prepare_read(frame_offset, None, num_frames) waveform = file_.read(frames, dtype, always_2d=True) sample_rate = file_.samplerate waveform = torch.from_numpy(waveform) if channels_first: waveform = waveform.t() return waveform, sample_rate def _get_subtype_for_wav( dtype: torch.dtype, encoding: str, bits_per_sample: int): if not encoding: if not bits_per_sample: subtype = { torch.uint8: "PCM_U8", torch.int16: "PCM_16", torch.int32: "PCM_32", torch.float32: "FLOAT", torch.float64: "DOUBLE", }.get(dtype) if not subtype: raise ValueError(f"Unsupported dtype for wav: {dtype}") return subtype if bits_per_sample == 8: return "PCM_U8" return f"PCM_{bits_per_sample}" if encoding == "PCM_S": if not bits_per_sample: return "PCM_32" if bits_per_sample == 8: raise ValueError("wav does not support 8-bit signed PCM encoding.") return f"PCM_{bits_per_sample}" if encoding == "PCM_U": if bits_per_sample in (None, 8): return "PCM_U8" raise ValueError("wav only supports 8-bit unsigned PCM encoding.") if encoding == "PCM_F": if bits_per_sample in (None, 32): return "FLOAT" if bits_per_sample == 64: return "DOUBLE" raise ValueError("wav only supports 32/64-bit float PCM encoding.") if encoding == "ULAW": if bits_per_sample in (None, 8): return "ULAW" raise ValueError("wav only supports 8-bit mu-law encoding.") if encoding == "ALAW": if bits_per_sample in (None, 8): return "ALAW" raise ValueError("wav only supports 8-bit a-law encoding.") raise ValueError(f"wav does not support {encoding}.") def _get_subtype_for_sphere(encoding: str, bits_per_sample: int): if encoding in (None, "PCM_S"): return f"PCM_{bits_per_sample}" if bits_per_sample else "PCM_32" if encoding in ("PCM_U", "PCM_F"): raise ValueError(f"sph does not support {encoding} encoding.") if encoding == "ULAW": if bits_per_sample in (None, 8): return "ULAW" raise ValueError("sph only supports 8-bit for mu-law encoding.") if encoding == "ALAW": return "ALAW" raise ValueError(f"sph does not support {encoding}.") def _get_subtype( dtype: torch.dtype, format: str, encoding: str, bits_per_sample: int): if format == "wav": return _get_subtype_for_wav(dtype, encoding, bits_per_sample) if format == "flac": if encoding: raise ValueError("flac does not support encoding.") if not bits_per_sample: return "PCM_16" if bits_per_sample > 24: raise ValueError("flac does not support bits_per_sample > 24.") return "PCM_S8" if bits_per_sample == 8 else f"PCM_{bits_per_sample}" if format in ("ogg", "vorbis"): if encoding or bits_per_sample: raise ValueError( "ogg/vorbis does not support encoding/bits_per_sample.") return "VORBIS" if format == "sph": return _get_subtype_for_sphere(encoding, bits_per_sample) if format in ("nis", "nist"): return "PCM_16" raise ValueError(f"Unsupported format: {format}") @_mod_utils.requires_soundfile() def save( filepath: str, src: torch.Tensor, sample_rate: int, channels_first: bool = True, compression: Optional[float] = None, format: Optional[str] = None, encoding: Optional[str] = None, bits_per_sample: Optional[int] = None, ): """Save audio data to file. Note: The formats this function can handle depend on the soundfile installation. This function is tested on the following formats; * WAV * 32-bit floating-point * 32-bit signed integer * 16-bit signed integer * 8-bit unsigned integer * FLAC * OGG/VORBIS * SPHERE Note: ``filepath`` argument is intentionally annotated as ``str`` only, even though it accepts ``pathlib.Path`` object as well. This is for the consistency with ``"sox_io"`` backend, which has a restriction on type annotation due to TorchScript compiler compatiblity. Args: filepath (str or pathlib.Path): Path to audio file. src (torch.Tensor): Audio data to save. must be 2D tensor. sample_rate (int): sampling rate channels_first (bool, optional): If ``True``, the given tensor is interpreted as `[channel, time]`, otherwise `[time, channel]`. compression (float of None, optional): Not used. It is here only for interface compatibility reson with "sox_io" backend. format (str or None, optional): Override the audio format. When ``filepath`` argument is path-like object, audio format is inferred from file extension. If the file extension is missing or different, you can specify the correct format with this argument. When ``filepath`` argument is file-like object, this argument is required. Valid values are ``"wav"``, ``"ogg"``, ``"vorbis"``, ``"flac"`` and ``"sph"``. encoding (str or None, optional): Changes the encoding for supported formats. This argument is effective only for supported formats, sush as ``"wav"``, ``""flac"`` and ``"sph"``. Valid values are; - ``"PCM_S"`` (signed integer Linear PCM) - ``"PCM_U"`` (unsigned integer Linear PCM) - ``"PCM_F"`` (floating point PCM) - ``"ULAW"`` (mu-law) - ``"ALAW"`` (a-law) bits_per_sample (int or None, optional): Changes the bit depth for the supported formats. When ``format`` is one of ``"wav"``, ``"flac"`` or ``"sph"``, you can change the bit depth. Valid values are ``8``, ``16``, ``24``, ``32`` and ``64``. Supported formats/encodings/bit depth/compression are: ``"wav"`` - 32-bit floating-point PCM - 32-bit signed integer PCM - 24-bit signed integer PCM - 16-bit signed integer PCM - 8-bit unsigned integer PCM - 8-bit mu-law - 8-bit a-law Note: Default encoding/bit depth is determined by the dtype of the input Tensor. ``"flac"`` - 8-bit - 16-bit (default) - 24-bit ``"ogg"``, ``"vorbis"`` - Doesn't accept changing configuration. ``"sph"`` - 8-bit signed integer PCM - 16-bit signed integer PCM - 24-bit signed integer PCM - 32-bit signed integer PCM (default) - 8-bit mu-law - 8-bit a-law - 16-bit a-law - 24-bit a-law - 32-bit a-law """ if src.ndim != 2: raise ValueError(f"Expected 2D Tensor, got {src.ndim}D.") if compression is not None: warnings.warn( '`save` function of "soundfile" backend does not support "compression" parameter. ' "The argument is silently ignored." ) if hasattr(filepath, 'write'): if format is None: raise RuntimeError('`format` is required when saving to file object.') ext = format.lower() else: ext = str(filepath).split(".")[-1].lower() if bits_per_sample not in (None, 8, 16, 24, 32, 64): raise ValueError("Invalid bits_per_sample.") if bits_per_sample == 24: warnings.warn("Saving audio with 24 bits per sample might warp samples near -1. " "Using 16 bits per sample might be able to avoid this.") subtype = _get_subtype(src.dtype, ext, encoding, bits_per_sample) # sph is a extension used in TED-LIUM but soundfile does not recognize it as NIST format, # so we extend the extensions manually here if ext in ["nis", "nist", "sph"] and format is None: format = "NIST" if channels_first: src = src.t() soundfile.write( file=filepath, data=src, samplerate=sample_rate, subtype=subtype, format=format )
from torch import Tensor from torch import nn __all__ = [ "Wav2Letter", ] class Wav2Letter(nn.Module): r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech Recognition System* [:footcite:`collobert2016wav2letter`]. :math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}` Args: num_classes (int, optional): Number of classes to be classified. (Default: ``40``) input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum`` or ``mfcc`` (Default: ``waveform``). num_features (int, optional): Number of input features that the network will receive (Default: ``1``). """ def __init__(self, num_classes: int = 40, input_type: str = "waveform", num_features: int = 1) -> None: super(Wav2Letter, self).__init__() acoustic_num_features = 250 if input_type == "waveform" else num_features acoustic_model = nn.Sequential( nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23), nn.ReLU(inplace=True), nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=True), nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=True), nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=True), nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=True), nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=True), nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=True), nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=True), nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16), nn.ReLU(inplace=True), nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True) ) if input_type == "waveform": waveform_model = nn.Sequential( nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45), nn.ReLU(inplace=True) ) self.acoustic_model = nn.Sequential(waveform_model, acoustic_model) if input_type in ["power_spectrum", "mfcc"]: self.acoustic_model = acoustic_model def forward(self, x: Tensor) -> Tensor: r""" Args: x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length). Returns: Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length). """ x = self.acoustic_model(x) x = nn.functional.log_softmax(x, dim=1) return x
# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import warnings from math import sqrt from typing import Tuple, List, Optional, Union import torch from torch import nn from torch import Tensor from torch.nn import functional as F __all__ = [ "Tacotron2", ] def _get_linear_layer( in_dim: int, out_dim: int, bias: bool = True, w_init_gain: str = "linear" ) -> torch.nn.Linear: r"""Linear layer with xavier uniform initialization. Args: in_dim (int): Size of each input sample. out_dim (int): Size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias. (Default: ``True``) w_init_gain (str, optional): Parameter passed to ``torch.nn.init.calculate_gain`` for setting the gain parameter of ``xavier_uniform_``. (Default: ``linear``) Returns: (torch.nn.Linear): The corresponding linear layer. """ linear = torch.nn.Linear(in_dim, out_dim, bias=bias) torch.nn.init.xavier_uniform_( linear.weight, gain=torch.nn.init.calculate_gain(w_init_gain) ) return linear def _get_conv1d_layer( in_channels: int, out_channels: int, kernel_size: int = 1, stride: int = 1, padding: Optional[Union[str, int, Tuple[int]]] = None, dilation: int = 1, bias: bool = True, w_init_gain: str = "linear", ) -> torch.nn.Conv1d: r"""1D convolution with xavier uniform initialization. Args: in_channels (int): Number of channels in the input image. out_channels (int): Number of channels produced by the convolution. kernel_size (int, optional): Number of channels in the input image. (Default: ``1``) stride (int, optional): Number of channels in the input image. (Default: ``1``) padding (str, int or tuple, optional): Padding added to both sides of the input. (Default: dilation * (kernel_size - 1) / 2) dilation (int, optional): Number of channels in the input image. (Default: ``1``) w_init_gain (str, optional): Parameter passed to ``torch.nn.init.calculate_gain`` for setting the gain parameter of ``xavier_uniform_``. (Default: ``linear``) Returns: (torch.nn.Conv1d): The corresponding Conv1D layer. """ if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) conv1d = torch.nn.Conv1d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, ) torch.nn.init.xavier_uniform_( conv1d.weight, gain=torch.nn.init.calculate_gain(w_init_gain) ) return conv1d def _get_mask_from_lengths(lengths: Tensor) -> Tensor: r"""Returns a binary mask based on ``lengths``. The ``i``-th row and ``j``-th column of the mask is ``1`` if ``j`` is smaller than ``i``-th element of ``lengths. Args: lengths (Tensor): The length of each element in the batch, with shape (n_batch, ). Returns: mask (Tensor): The binary mask, with shape (n_batch, max of ``lengths``). """ max_len = torch.max(lengths).item() ids = torch.arange(0, max_len, device=lengths.device, dtype=lengths.dtype) mask = (ids < lengths.unsqueeze(1)).byte() mask = torch.le(mask, 0) return mask class _LocationLayer(nn.Module): r"""Location layer used in the Attention model. Args: attention_n_filter (int): Number of filters for attention model. attention_kernel_size (int): Kernel size for attention model. attention_hidden_dim (int): Dimension of attention hidden representation. """ def __init__( self, attention_n_filter: int, attention_kernel_size: int, attention_hidden_dim: int, ): super().__init__() padding = int((attention_kernel_size - 1) / 2) self.location_conv = _get_conv1d_layer( 2, attention_n_filter, kernel_size=attention_kernel_size, padding=padding, bias=False, stride=1, dilation=1, ) self.location_dense = _get_linear_layer( attention_n_filter, attention_hidden_dim, bias=False, w_init_gain="tanh" ) def forward(self, attention_weights_cat: Tensor) -> Tensor: r"""Location layer used in the Attention model. Args: attention_weights_cat (Tensor): Cumulative and previous attention weights with shape (n_batch, 2, max of ``text_lengths``). Returns: processed_attention (Tensor): Cumulative and previous attention weights with shape (n_batch, ``attention_hidden_dim``). """ # (n_batch, attention_n_filter, text_lengths.max()) processed_attention = self.location_conv(attention_weights_cat) processed_attention = processed_attention.transpose(1, 2) # (n_batch, text_lengths.max(), attention_hidden_dim) processed_attention = self.location_dense(processed_attention) return processed_attention class _Attention(nn.Module): r"""Locally sensitive attention model. Args: attention_rnn_dim (int): Number of hidden units for RNN. encoder_embedding_dim (int): Number of embedding dimensions in the Encoder. attention_hidden_dim (int): Dimension of attention hidden representation. attention_location_n_filter (int): Number of filters for Attention model. attention_location_kernel_size (int): Kernel size for Attention model. """ def __init__( self, attention_rnn_dim: int, encoder_embedding_dim: int, attention_hidden_dim: int, attention_location_n_filter: int, attention_location_kernel_size: int, ) -> None: super().__init__() self.query_layer = _get_linear_layer( attention_rnn_dim, attention_hidden_dim, bias=False, w_init_gain="tanh" ) self.memory_layer = _get_linear_layer( encoder_embedding_dim, attention_hidden_dim, bias=False, w_init_gain="tanh" ) self.v = _get_linear_layer(attention_hidden_dim, 1, bias=False) self.location_layer = _LocationLayer( attention_location_n_filter, attention_location_kernel_size, attention_hidden_dim, ) self.score_mask_value = -float("inf") def _get_alignment_energies( self, query: Tensor, processed_memory: Tensor, attention_weights_cat: Tensor ) -> Tensor: r"""Get the alignment vector. Args: query (Tensor): Decoder output with shape (n_batch, n_mels * n_frames_per_step). processed_memory (Tensor): Processed Encoder outputs with shape (n_batch, max of ``text_lengths``, attention_hidden_dim). attention_weights_cat (Tensor): Cumulative and previous attention weights with shape (n_batch, 2, max of ``text_lengths``). Returns: alignment (Tensor): attention weights, it is a tensor with shape (batch, max of ``text_lengths``). """ processed_query = self.query_layer(query.unsqueeze(1)) processed_attention_weights = self.location_layer(attention_weights_cat) energies = self.v( torch.tanh(processed_query + processed_attention_weights + processed_memory) ) alignment = energies.squeeze(2) return alignment def forward( self, attention_hidden_state: Tensor, memory: Tensor, processed_memory: Tensor, attention_weights_cat: Tensor, mask: Tensor, ) -> Tuple[Tensor, Tensor]: r"""Pass the input through the Attention model. Args: attention_hidden_state (Tensor): Attention rnn last output with shape (n_batch, ``attention_rnn_dim``). memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). processed_memory (Tensor): Processed Encoder outputs with shape (n_batch, max of ``text_lengths``, ``attention_hidden_dim``). attention_weights_cat (Tensor): Previous and cumulative attention weights with shape (n_batch, current_num_frames * 2, max of ``text_lengths``). mask (Tensor): Binary mask for padded data with shape (n_batch, current_num_frames). Returns: attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``). attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``). """ alignment = self._get_alignment_energies( attention_hidden_state, processed_memory, attention_weights_cat ) alignment = alignment.masked_fill(mask, self.score_mask_value) attention_weights = F.softmax(alignment, dim=1) attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) attention_context = attention_context.squeeze(1) return attention_context, attention_weights class _Prenet(nn.Module): r"""Prenet Module. It is consists of ``len(output_size)`` linear layers. Args: in_dim (int): The size of each input sample. output_sizes (list): The output dimension of each linear layers. """ def __init__(self, in_dim: int, out_sizes: List[int]) -> None: super().__init__() in_sizes = [in_dim] + out_sizes[:-1] self.layers = nn.ModuleList( [ _get_linear_layer(in_size, out_size, bias=False) for (in_size, out_size) in zip(in_sizes, out_sizes) ] ) def forward(self, x: Tensor) -> Tensor: r"""Pass the input through Prenet. Args: x (Tensor): The input sequence to Prenet with shape (n_batch, in_dim). Return: x (Tensor): Tensor with shape (n_batch, sizes[-1]) """ for linear in self.layers: x = F.dropout(F.relu(linear(x)), p=0.5, training=True) return x class _Postnet(nn.Module): r"""Postnet Module. Args: n_mels (int): Number of mel bins. postnet_embedding_dim (int): Postnet embedding dimension. postnet_kernel_size (int): Postnet kernel size. postnet_n_convolution (int): Number of postnet convolutions. """ def __init__( self, n_mels: int, postnet_embedding_dim: int, postnet_kernel_size: int, postnet_n_convolution: int, ): super().__init__() self.convolutions = nn.ModuleList() for i in range(postnet_n_convolution): in_channels = n_mels if i == 0 else postnet_embedding_dim out_channels = n_mels if i == (postnet_n_convolution - 1) else postnet_embedding_dim init_gain = "linear" if i == (postnet_n_convolution - 1) else "tanh" num_features = n_mels if i == (postnet_n_convolution - 1) else postnet_embedding_dim self.convolutions.append( nn.Sequential( _get_conv1d_layer( in_channels, out_channels, kernel_size=postnet_kernel_size, stride=1, padding=int((postnet_kernel_size - 1) / 2), dilation=1, w_init_gain=init_gain, ), nn.BatchNorm1d(num_features), ) ) self.n_convs = len(self.convolutions) def forward(self, x: Tensor) -> Tensor: r"""Pass the input through Postnet. Args: x (Tensor): The input sequence with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). Return: x (Tensor): Tensor with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). """ for i, conv in enumerate(self.convolutions): if i < self.n_convs - 1: x = F.dropout(torch.tanh(conv(x)), 0.5, training=self.training) else: x = F.dropout(conv(x), 0.5, training=self.training) return x class _Encoder(nn.Module): r"""Encoder Module. Args: encoder_embedding_dim (int): Number of embedding dimensions in the encoder. encoder_n_convolution (int): Number of convolution layers in the encoder. encoder_kernel_size (int): The kernel size in the encoder. Examples >>> encoder = _Encoder(3, 512, 5) >>> input = torch.rand(10, 20, 30) >>> output = encoder(input) # shape: (10, 30, 512) """ def __init__( self, encoder_embedding_dim: int, encoder_n_convolution: int, encoder_kernel_size: int, ) -> None: super().__init__() self.convolutions = nn.ModuleList() for _ in range(encoder_n_convolution): conv_layer = nn.Sequential( _get_conv1d_layer( encoder_embedding_dim, encoder_embedding_dim, kernel_size=encoder_kernel_size, stride=1, padding=int((encoder_kernel_size - 1) / 2), dilation=1, w_init_gain="relu", ), nn.BatchNorm1d(encoder_embedding_dim), ) self.convolutions.append(conv_layer) self.lstm = nn.LSTM( encoder_embedding_dim, int(encoder_embedding_dim / 2), 1, batch_first=True, bidirectional=True, ) self.lstm.flatten_parameters() def forward(self, x: Tensor, input_lengths: Tensor) -> Tensor: r"""Pass the input through the Encoder. Args: x (Tensor): The input sequences with shape (n_batch, encoder_embedding_dim, n_seq). input_lengths (Tensor): The length of each input sequence with shape (n_batch, ). Return: x (Tensor): A tensor with shape (n_batch, n_seq, encoder_embedding_dim). """ for conv in self.convolutions: x = F.dropout(F.relu(conv(x)), 0.5, self.training) x = x.transpose(1, 2) input_lengths = input_lengths.cpu() x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, batch_first=True) outputs, _ = self.lstm(x) outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True) return outputs class _Decoder(nn.Module): r"""Decoder with Attention model. Args: n_mels (int): number of mel bins n_frames_per_step (int): number of frames processed per step, only 1 is supported encoder_embedding_dim (int): the number of embedding dimensions in the encoder. decoder_rnn_dim (int): number of units in decoder LSTM decoder_max_step (int): maximum number of output mel spectrograms decoder_dropout (float): dropout probability for decoder LSTM decoder_early_stopping (bool): stop decoding when all samples are finished attention_rnn_dim (int): number of units in attention LSTM attention_hidden_dim (int): dimension of attention hidden representation attention_location_n_filter (int): number of filters for attention model attention_location_kernel_size (int): kernel size for attention model attention_dropout (float): dropout probability for attention LSTM prenet_dim (int): number of ReLU units in prenet layers gate_threshold (float): probability threshold for stop token """ def __init__( self, n_mels: int, n_frames_per_step: int, encoder_embedding_dim: int, decoder_rnn_dim: int, decoder_max_step: int, decoder_dropout: float, decoder_early_stopping: bool, attention_rnn_dim: int, attention_hidden_dim: int, attention_location_n_filter: int, attention_location_kernel_size: int, attention_dropout: float, prenet_dim: int, gate_threshold: float, ) -> None: super().__init__() self.n_mels = n_mels self.n_frames_per_step = n_frames_per_step self.encoder_embedding_dim = encoder_embedding_dim self.attention_rnn_dim = attention_rnn_dim self.decoder_rnn_dim = decoder_rnn_dim self.prenet_dim = prenet_dim self.decoder_max_step = decoder_max_step self.gate_threshold = gate_threshold self.attention_dropout = attention_dropout self.decoder_dropout = decoder_dropout self.decoder_early_stopping = decoder_early_stopping self.prenet = _Prenet(n_mels * n_frames_per_step, [prenet_dim, prenet_dim]) self.attention_rnn = nn.LSTMCell( prenet_dim + encoder_embedding_dim, attention_rnn_dim ) self.attention_layer = _Attention( attention_rnn_dim, encoder_embedding_dim, attention_hidden_dim, attention_location_n_filter, attention_location_kernel_size, ) self.decoder_rnn = nn.LSTMCell( attention_rnn_dim + encoder_embedding_dim, decoder_rnn_dim, True ) self.linear_projection = _get_linear_layer( decoder_rnn_dim + encoder_embedding_dim, n_mels * n_frames_per_step ) self.gate_layer = _get_linear_layer( decoder_rnn_dim + encoder_embedding_dim, 1, bias=True, w_init_gain="sigmoid" ) def _get_initial_frame(self, memory: Tensor) -> Tensor: r"""Gets all zeros frames to use as the first decoder input. Args: memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). Returns: decoder_input (Tensor): all zeros frames with shape (n_batch, max of ``text_lengths``, ``n_mels * n_frames_per_step``). """ n_batch = memory.size(0) dtype = memory.dtype device = memory.device decoder_input = torch.zeros( n_batch, self.n_mels * self.n_frames_per_step, dtype=dtype, device=device ) return decoder_input def _initialize_decoder_states( self, memory: Tensor ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: r"""Initializes attention rnn states, decoder rnn states, attention weights, attention cumulative weights, attention context, stores memory and stores processed memory. Args: memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). Returns: attention_hidden (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). attention_cell (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). decoder_hidden (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). decoder_cell (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``). attention_weights_cum (Tensor): Cumulated attention weights with shape (n_batch, max of ``text_lengths``). attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``). processed_memory (Tensor): Processed encoder outputs with shape (n_batch, max of ``text_lengths``, ``attention_hidden_dim``). """ n_batch = memory.size(0) max_time = memory.size(1) dtype = memory.dtype device = memory.device attention_hidden = torch.zeros( n_batch, self.attention_rnn_dim, dtype=dtype, device=device ) attention_cell = torch.zeros( n_batch, self.attention_rnn_dim, dtype=dtype, device=device ) decoder_hidden = torch.zeros( n_batch, self.decoder_rnn_dim, dtype=dtype, device=device ) decoder_cell = torch.zeros( n_batch, self.decoder_rnn_dim, dtype=dtype, device=device ) attention_weights = torch.zeros(n_batch, max_time, dtype=dtype, device=device) attention_weights_cum = torch.zeros( n_batch, max_time, dtype=dtype, device=device ) attention_context = torch.zeros( n_batch, self.encoder_embedding_dim, dtype=dtype, device=device ) processed_memory = self.attention_layer.memory_layer(memory) return ( attention_hidden, attention_cell, decoder_hidden, decoder_cell, attention_weights, attention_weights_cum, attention_context, processed_memory, ) def _parse_decoder_inputs(self, decoder_inputs: Tensor) -> Tensor: r"""Prepares decoder inputs. Args: decoder_inputs (Tensor): Inputs used for teacher-forced training, i.e. mel-specs, with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``) Returns: inputs (Tensor): Processed decoder inputs with shape (max of ``mel_specgram_lengths``, n_batch, ``n_mels``). """ # (n_batch, n_mels, mel_specgram_lengths.max()) -> (n_batch, mel_specgram_lengths.max(), n_mels) decoder_inputs = decoder_inputs.transpose(1, 2) decoder_inputs = decoder_inputs.view( decoder_inputs.size(0), int(decoder_inputs.size(1) / self.n_frames_per_step), -1, ) # (n_batch, mel_specgram_lengths.max(), n_mels) -> (mel_specgram_lengths.max(), n_batch, n_mels) decoder_inputs = decoder_inputs.transpose(0, 1) return decoder_inputs def _parse_decoder_outputs( self, mel_specgram: Tensor, gate_outputs: Tensor, alignments: Tensor ) -> Tuple[Tensor, Tensor, Tensor]: r"""Prepares decoder outputs for output Args: mel_specgram (Tensor): mel spectrogram with shape (max of ``mel_specgram_lengths``, n_batch, ``n_mels``) gate_outputs (Tensor): predicted stop token with shape (max of ``mel_specgram_lengths``, n_batch) alignments (Tensor): sequence of attention weights from the decoder with shape (max of ``mel_specgram_lengths``, n_batch, max of ``text_lengths``) Returns: mel_specgram (Tensor): mel spectrogram with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``) gate_outputs (Tensor): predicted stop token with shape (n_batch, max of ``mel_specgram_lengths``) alignments (Tensor): sequence of attention weights from the decoder with shape (n_batch, max of ``mel_specgram_lengths``, max of ``text_lengths``) """ # (mel_specgram_lengths.max(), n_batch, text_lengths.max()) # -> (n_batch, mel_specgram_lengths.max(), text_lengths.max()) alignments = alignments.transpose(0, 1).contiguous() # (mel_specgram_lengths.max(), n_batch) -> (n_batch, mel_specgram_lengths.max()) gate_outputs = gate_outputs.transpose(0, 1).contiguous() # (mel_specgram_lengths.max(), n_batch, n_mels) -> (n_batch, mel_specgram_lengths.max(), n_mels) mel_specgram = mel_specgram.transpose(0, 1).contiguous() # decouple frames per step shape = (mel_specgram.shape[0], -1, self.n_mels) mel_specgram = mel_specgram.view(*shape) # (n_batch, mel_specgram_lengths.max(), n_mels) -> (n_batch, n_mels, T_out) mel_specgram = mel_specgram.transpose(1, 2) return mel_specgram, gate_outputs, alignments def decode( self, decoder_input: Tensor, attention_hidden: Tensor, attention_cell: Tensor, decoder_hidden: Tensor, decoder_cell: Tensor, attention_weights: Tensor, attention_weights_cum: Tensor, attention_context: Tensor, memory: Tensor, processed_memory: Tensor, mask: Tensor, ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: r"""Decoder step using stored states, attention and memory Args: decoder_input (Tensor): Output of the Prenet with shape (n_batch, ``prenet_dim``). attention_hidden (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). attention_cell (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). decoder_hidden (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). decoder_cell (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``). attention_weights_cum (Tensor): Cumulated attention weights with shape (n_batch, max of ``text_lengths``). attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``). memory (Tensor): Encoder output with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). processed_memory (Tensor): Processed Encoder outputs with shape (n_batch, max of ``text_lengths``, ``attention_hidden_dim``). mask (Tensor): Binary mask for padded data with shape (n_batch, current_num_frames). Returns: decoder_output: Predicted mel spectrogram for the current frame with shape (n_batch, ``n_mels``). gate_prediction (Tensor): Prediction of the stop token with shape (n_batch, ``1``). attention_hidden (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). attention_cell (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). decoder_hidden (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). decoder_cell (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``). attention_weights_cum (Tensor): Cumulated attention weights with shape (n_batch, max of ``text_lengths``). attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``). """ cell_input = torch.cat((decoder_input, attention_context), -1) attention_hidden, attention_cell = self.attention_rnn( cell_input, (attention_hidden, attention_cell) ) attention_hidden = F.dropout( attention_hidden, self.attention_dropout, self.training ) attention_weights_cat = torch.cat( (attention_weights.unsqueeze(1), attention_weights_cum.unsqueeze(1)), dim=1 ) attention_context, attention_weights = self.attention_layer( attention_hidden, memory, processed_memory, attention_weights_cat, mask ) attention_weights_cum += attention_weights decoder_input = torch.cat((attention_hidden, attention_context), -1) decoder_hidden, decoder_cell = self.decoder_rnn( decoder_input, (decoder_hidden, decoder_cell) ) decoder_hidden = F.dropout(decoder_hidden, self.decoder_dropout, self.training) decoder_hidden_attention_context = torch.cat( (decoder_hidden, attention_context), dim=1 ) decoder_output = self.linear_projection(decoder_hidden_attention_context) gate_prediction = self.gate_layer(decoder_hidden_attention_context) return ( decoder_output, gate_prediction, attention_hidden, attention_cell, decoder_hidden, decoder_cell, attention_weights, attention_weights_cum, attention_context, ) def forward( self, memory: Tensor, mel_specgram_truth: Tensor, memory_lengths: Tensor ) -> Tuple[Tensor, Tensor, Tensor]: r"""Decoder forward pass for training. Args: memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). mel_specgram_truth (Tensor): Decoder ground-truth mel-specs for teacher forcing with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). memory_lengths (Tensor): Encoder output lengths for attention masking (the same as ``text_lengths``) with shape (n_batch, ). Returns: mel_specgram (Tensor): Predicted mel spectrogram with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). gate_outputs (Tensor): Predicted stop token for each timestep with shape (n_batch, max of ``mel_specgram_lengths``). alignments (Tensor): Sequence of attention weights from the decoder with shape (n_batch, max of ``mel_specgram_lengths``, max of ``text_lengths``). """ decoder_input = self._get_initial_frame(memory).unsqueeze(0) decoder_inputs = self._parse_decoder_inputs(mel_specgram_truth) decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0) decoder_inputs = self.prenet(decoder_inputs) mask = _get_mask_from_lengths(memory_lengths) ( attention_hidden, attention_cell, decoder_hidden, decoder_cell, attention_weights, attention_weights_cum, attention_context, processed_memory, ) = self._initialize_decoder_states(memory) mel_outputs, gate_outputs, alignments = [], [], [] while len(mel_outputs) < decoder_inputs.size(0) - 1: decoder_input = decoder_inputs[len(mel_outputs)] ( mel_output, gate_output, attention_hidden, attention_cell, decoder_hidden, decoder_cell, attention_weights, attention_weights_cum, attention_context, ) = self.decode( decoder_input, attention_hidden, attention_cell, decoder_hidden, decoder_cell, attention_weights, attention_weights_cum, attention_context, memory, processed_memory, mask, ) mel_outputs += [mel_output.squeeze(1)] gate_outputs += [gate_output.squeeze()] alignments += [attention_weights] mel_specgram, gate_outputs, alignments = self._parse_decoder_outputs( torch.stack(mel_outputs), torch.stack(gate_outputs), torch.stack(alignments) ) return mel_specgram, gate_outputs, alignments def _get_go_frame(self, memory: Tensor) -> Tensor: """Gets all zeros frames to use as the first decoder input args: memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). returns: decoder_input (Tensor): All zeros frames with shape(n_batch, ``n_mels`` * ``n_frame_per_step``). """ n_batch = memory.size(0) dtype = memory.dtype device = memory.device decoder_input = torch.zeros( n_batch, self.n_mels * self.n_frames_per_step, dtype=dtype, device=device ) return decoder_input @torch.jit.export def infer(self, memory: Tensor, memory_lengths: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Decoder inference Args: memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). memory_lengths (Tensor): Encoder output lengths for attention masking (the same as ``text_lengths``) with shape (n_batch, ). Returns: mel_specgram (Tensor): Predicted mel spectrogram with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). mel_specgram_lengths (Tensor): the length of the predicted mel spectrogram (n_batch, )) gate_outputs (Tensor): Predicted stop token for each timestep with shape (n_batch, max of ``mel_specgram_lengths``). alignments (Tensor): Sequence of attention weights from the decoder with shape (n_batch, max of ``mel_specgram_lengths``, max of ``text_lengths``). """ batch_size, device = memory.size(0), memory.device decoder_input = self._get_go_frame(memory) mask = _get_mask_from_lengths(memory_lengths) ( attention_hidden, attention_cell, decoder_hidden, decoder_cell, attention_weights, attention_weights_cum, attention_context, processed_memory, ) = self._initialize_decoder_states(memory) mel_specgram_lengths = torch.zeros([batch_size], dtype=torch.int32, device=device) finished = torch.zeros([batch_size], dtype=torch.bool, device=device) mel_specgrams: List[Tensor] = [] gate_outputs: List[Tensor] = [] alignments: List[Tensor] = [] for _ in range(self.decoder_max_step): decoder_input = self.prenet(decoder_input) ( mel_specgram, gate_output, attention_hidden, attention_cell, decoder_hidden, decoder_cell, attention_weights, attention_weights_cum, attention_context, ) = self.decode( decoder_input, attention_hidden, attention_cell, decoder_hidden, decoder_cell, attention_weights, attention_weights_cum, attention_context, memory, processed_memory, mask, ) mel_specgrams.append(mel_specgram.unsqueeze(0)) gate_outputs.append(gate_output.transpose(0, 1)) alignments.append(attention_weights) mel_specgram_lengths[~finished] += 1 finished |= torch.sigmoid(gate_output.squeeze(1)) > self.gate_threshold if self.decoder_early_stopping and torch.all(finished): break decoder_input = mel_specgram if len(mel_specgrams) == self.decoder_max_step: warnings.warn( "Reached max decoder steps. The generated spectrogram might not cover " "the whole transcript.") mel_specgrams = torch.cat(mel_specgrams, dim=0) gate_outputs = torch.cat(gate_outputs, dim=0) alignments = torch.cat(alignments, dim=0) mel_specgrams, gate_outputs, alignments = self._parse_decoder_outputs( mel_specgrams, gate_outputs, alignments ) return mel_specgrams, mel_specgram_lengths, gate_outputs, alignments class Tacotron2(nn.Module): r"""Tacotron2 model based on the implementation from `Nvidia <https://github.com/NVIDIA/DeepLearningExamples/>`_. The original implementation was introduced in *Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions* [:footcite:`shen2018natural`]. Args: mask_padding (bool, optional): Use mask padding (Default: ``False``). n_mels (int, optional): Number of mel bins (Default: ``80``). n_symbol (int, optional): Number of symbols for the input text (Default: ``148``). n_frames_per_step (int, optional): Number of frames processed per step, only 1 is supported (Default: ``1``). symbol_embedding_dim (int, optional): Input embedding dimension (Default: ``512``). encoder_n_convolution (int, optional): Number of encoder convolutions (Default: ``3``). encoder_kernel_size (int, optional): Encoder kernel size (Default: ``5``). encoder_embedding_dim (int, optional): Encoder embedding dimension (Default: ``512``). decoder_rnn_dim (int, optional): Number of units in decoder LSTM (Default: ``1024``). decoder_max_step (int, optional): Maximum number of output mel spectrograms (Default: ``2000``). decoder_dropout (float, optional): Dropout probability for decoder LSTM (Default: ``0.1``). decoder_early_stopping (bool, optional): Continue decoding after all samples are finished (Default: ``True``). attention_rnn_dim (int, optional): Number of units in attention LSTM (Default: ``1024``). attention_hidden_dim (int, optional): Dimension of attention hidden representation (Default: ``128``). attention_location_n_filter (int, optional): Number of filters for attention model (Default: ``32``). attention_location_kernel_size (int, optional): Kernel size for attention model (Default: ``31``). attention_dropout (float, optional): Dropout probability for attention LSTM (Default: ``0.1``). prenet_dim (int, optional): Number of ReLU units in prenet layers (Default: ``256``). postnet_n_convolution (int, optional): Number of postnet convolutions (Default: ``5``). postnet_kernel_size (int, optional): Postnet kernel size (Default: ``5``). postnet_embedding_dim (int, optional): Postnet embedding dimension (Default: ``512``). gate_threshold (float, optional): Probability threshold for stop token (Default: ``0.5``). """ def __init__( self, mask_padding: bool = False, n_mels: int = 80, n_symbol: int = 148, n_frames_per_step: int = 1, symbol_embedding_dim: int = 512, encoder_embedding_dim: int = 512, encoder_n_convolution: int = 3, encoder_kernel_size: int = 5, decoder_rnn_dim: int = 1024, decoder_max_step: int = 2000, decoder_dropout: float = 0.1, decoder_early_stopping: bool = True, attention_rnn_dim: int = 1024, attention_hidden_dim: int = 128, attention_location_n_filter: int = 32, attention_location_kernel_size: int = 31, attention_dropout: float = 0.1, prenet_dim: int = 256, postnet_n_convolution: int = 5, postnet_kernel_size: int = 5, postnet_embedding_dim: int = 512, gate_threshold: float = 0.5, ) -> None: super().__init__() self.mask_padding = mask_padding self.n_mels = n_mels self.n_frames_per_step = n_frames_per_step self.embedding = nn.Embedding(n_symbol, symbol_embedding_dim) std = sqrt(2.0 / (n_symbol + symbol_embedding_dim)) val = sqrt(3.0) * std self.embedding.weight.data.uniform_(-val, val) self.encoder = _Encoder( encoder_embedding_dim, encoder_n_convolution, encoder_kernel_size ) self.decoder = _Decoder( n_mels, n_frames_per_step, encoder_embedding_dim, decoder_rnn_dim, decoder_max_step, decoder_dropout, decoder_early_stopping, attention_rnn_dim, attention_hidden_dim, attention_location_n_filter, attention_location_kernel_size, attention_dropout, prenet_dim, gate_threshold, ) self.postnet = _Postnet( n_mels, postnet_embedding_dim, postnet_kernel_size, postnet_n_convolution ) def forward( self, tokens: Tensor, token_lengths: Tensor, mel_specgram: Tensor, mel_specgram_lengths: Tensor, ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: r"""Pass the input through the Tacotron2 model. This is in teacher forcing mode, which is generally used for training. The input ``tokens`` should be padded with zeros to length max of ``token_lengths``. The input ``mel_specgram`` should be padded with zeros to length max of ``mel_specgram_lengths``. Args: tokens (Tensor): The input tokens to Tacotron2 with shape `(n_batch, max of token_lengths)`. token_lengths (Tensor): The valid length of each sample in ``tokens`` with shape `(n_batch, )`. mel_specgram (Tensor): The target mel spectrogram with shape `(n_batch, n_mels, max of mel_specgram_lengths)`. mel_specgram_lengths (Tensor): The length of each mel spectrogram with shape `(n_batch, )`. Returns: [Tensor, Tensor, Tensor, Tensor]: Tensor Mel spectrogram before Postnet with shape `(n_batch, n_mels, max of mel_specgram_lengths)`. Tensor Mel spectrogram after Postnet with shape `(n_batch, n_mels, max of mel_specgram_lengths)`. Tensor The output for stop token at each time step with shape `(n_batch, max of mel_specgram_lengths)`. Tensor Sequence of attention weights from the decoder with shape `(n_batch, max of mel_specgram_lengths, max of token_lengths)`. """ embedded_inputs = self.embedding(tokens).transpose(1, 2) encoder_outputs = self.encoder(embedded_inputs, token_lengths) mel_specgram, gate_outputs, alignments = self.decoder( encoder_outputs, mel_specgram, memory_lengths=token_lengths ) mel_specgram_postnet = self.postnet(mel_specgram) mel_specgram_postnet = mel_specgram + mel_specgram_postnet if self.mask_padding: mask = _get_mask_from_lengths(mel_specgram_lengths) mask = mask.expand(self.n_mels, mask.size(0), mask.size(1)) mask = mask.permute(1, 0, 2) mel_specgram.masked_fill_(mask, 0.0) mel_specgram_postnet.masked_fill_(mask, 0.0) gate_outputs.masked_fill_(mask[:, 0, :], 1e3) return mel_specgram, mel_specgram_postnet, gate_outputs, alignments @torch.jit.export def infer(self, tokens: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Tensor, Tensor]: r"""Using Tacotron2 for inference. The input is a batch of encoded sentences (``tokens``) and its corresponding lengths (``lengths``). The output is the generated mel spectrograms, its corresponding lengths, and the attention weights from the decoder. The input `tokens` should be padded with zeros to length max of ``lengths``. Args: tokens (Tensor): The input tokens to Tacotron2 with shape `(n_batch, max of lengths)`. lengths (Tensor or None, optional): The valid length of each sample in ``tokens`` with shape `(n_batch, )`. If ``None``, it is assumed that the all the tokens are valid. Default: ``None`` Returns: (Tensor, Tensor, Tensor): Tensor The predicted mel spectrogram with shape `(n_batch, n_mels, max of mel_specgram_lengths)`. Tensor The length of the predicted mel spectrogram with shape `(n_batch, )`. Tensor Sequence of attention weights from the decoder with shape `(n_batch, max of mel_specgram_lengths, max of lengths)`. """ n_batch, max_length = tokens.shape if lengths is None: lengths = torch.tensor([max_length]).expand(n_batch).to(tokens.device, tokens.dtype) assert lengths is not None # For TorchScript compiler embedded_inputs = self.embedding(tokens).transpose(1, 2) encoder_outputs = self.encoder(embedded_inputs, lengths) mel_specgram, mel_specgram_lengths, _, alignments = self.decoder.infer( encoder_outputs, lengths ) mel_outputs_postnet = self.postnet(mel_specgram) mel_outputs_postnet = mel_specgram + mel_outputs_postnet alignments = alignments.unfold(1, n_batch, n_batch).transpose(0, 2) return mel_outputs_postnet, mel_specgram_lengths, alignments
"""Implements Conv-TasNet with building blocks of it. Based on https://github.com/naplab/Conv-TasNet/tree/e66d82a8f956a69749ec8a4ae382217faa097c5c """ from typing import Tuple, Optional import torch class ConvBlock(torch.nn.Module): """1D Convolutional block. Args: io_channels (int): The number of input/output channels, <B, Sc> hidden_channels (int): The number of channels in the internal layers, <H>. kernel_size (int): The convolution kernel size of the middle layer, <P>. padding (int): Padding value of the convolution in the middle layer. dilation (int, optional): Dilation value of the convolution in the middle layer. no_redisual (bool, optional): Disable residual block/output. Note: This implementation corresponds to the "non-causal" setting in the paper. """ def __init__( self, io_channels: int, hidden_channels: int, kernel_size: int, padding: int, dilation: int = 1, no_residual: bool = False, ): super().__init__() self.conv_layers = torch.nn.Sequential( torch.nn.Conv1d( in_channels=io_channels, out_channels=hidden_channels, kernel_size=1 ), torch.nn.PReLU(), torch.nn.GroupNorm(num_groups=1, num_channels=hidden_channels, eps=1e-08), torch.nn.Conv1d( in_channels=hidden_channels, out_channels=hidden_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, groups=hidden_channels, ), torch.nn.PReLU(), torch.nn.GroupNorm(num_groups=1, num_channels=hidden_channels, eps=1e-08), ) self.res_out = ( None if no_residual else torch.nn.Conv1d( in_channels=hidden_channels, out_channels=io_channels, kernel_size=1 ) ) self.skip_out = torch.nn.Conv1d( in_channels=hidden_channels, out_channels=io_channels, kernel_size=1 ) def forward( self, input: torch.Tensor ) -> Tuple[Optional[torch.Tensor], torch.Tensor]: feature = self.conv_layers(input) if self.res_out is None: residual = None else: residual = self.res_out(feature) skip_out = self.skip_out(feature) return residual, skip_out class MaskGenerator(torch.nn.Module): """TCN (Temporal Convolution Network) Separation Module Generates masks for separation. Args: input_dim (int): Input feature dimension, <N>. num_sources (int): The number of sources to separate. kernel_size (int): The convolution kernel size of conv blocks, <P>. num_featrs (int): Input/output feature dimenstion of conv blocks, <B, Sc>. num_hidden (int): Intermediate feature dimention of conv blocks, <H> num_layers (int): The number of conv blocks in one stack, <X>. num_stacks (int): The number of conv block stacks, <R>. msk_activate (str): The activation function of the mask output. Note: This implementation corresponds to the "non-causal" setting in the paper. """ def __init__( self, input_dim: int, num_sources: int, kernel_size: int, num_feats: int, num_hidden: int, num_layers: int, num_stacks: int, msk_activate: str, ): super().__init__() self.input_dim = input_dim self.num_sources = num_sources self.input_norm = torch.nn.GroupNorm( num_groups=1, num_channels=input_dim, eps=1e-8 ) self.input_conv = torch.nn.Conv1d( in_channels=input_dim, out_channels=num_feats, kernel_size=1 ) self.receptive_field = 0 self.conv_layers = torch.nn.ModuleList([]) for s in range(num_stacks): for l in range(num_layers): multi = 2 ** l self.conv_layers.append( ConvBlock( io_channels=num_feats, hidden_channels=num_hidden, kernel_size=kernel_size, dilation=multi, padding=multi, # The last ConvBlock does not need residual no_residual=(l == (num_layers - 1) and s == (num_stacks - 1)), ) ) self.receptive_field += ( kernel_size if s == 0 and l == 0 else (kernel_size - 1) * multi ) self.output_prelu = torch.nn.PReLU() self.output_conv = torch.nn.Conv1d( in_channels=num_feats, out_channels=input_dim * num_sources, kernel_size=1, ) if msk_activate == "sigmoid": self.mask_activate = torch.nn.Sigmoid() elif msk_activate == "relu": self.mask_activate = torch.nn.ReLU() else: raise ValueError(f"Unsupported activation {msk_activate}") def forward(self, input: torch.Tensor) -> torch.Tensor: """Generate separation mask. Args: input (torch.Tensor): 3D Tensor with shape [batch, features, frames] Returns: Tensor: shape [batch, num_sources, features, frames] """ batch_size = input.shape[0] feats = self.input_norm(input) feats = self.input_conv(feats) output = 0.0 for layer in self.conv_layers: residual, skip = layer(feats) if residual is not None: # the last conv layer does not produce residual feats = feats + residual output = output + skip output = self.output_prelu(output) output = self.output_conv(output) output = self.mask_activate(output) return output.view(batch_size, self.num_sources, self.input_dim, -1) class ConvTasNet(torch.nn.Module): """Conv-TasNet: a fully-convolutional time-domain audio separation network *Conv-TasNet: Surpassing Ideal Time–Frequency Magnitude Masking for Speech Separation* [:footcite:`Luo_2019`]. Args: num_sources (int, optional): The number of sources to split. enc_kernel_size (int, optional): The convolution kernel size of the encoder/decoder, <L>. enc_num_feats (int, optional): The feature dimensions passed to mask generator, <N>. msk_kernel_size (int, optional): The convolution kernel size of the mask generator, <P>. msk_num_feats (int, optional): The input/output feature dimension of conv block in the mask generator, <B, Sc>. msk_num_hidden_feats (int, optional): The internal feature dimension of conv block of the mask generator, <H>. msk_num_layers (int, optional): The number of layers in one conv block of the mask generator, <X>. msk_num_stacks (int, optional): The numbr of conv blocks of the mask generator, <R>. msk_activate (str, optional): The activation function of the mask output (Default: ``sigmoid``). Note: This implementation corresponds to the "non-causal" setting in the paper. """ def __init__( self, num_sources: int = 2, # encoder/decoder parameters enc_kernel_size: int = 16, enc_num_feats: int = 512, # mask generator parameters msk_kernel_size: int = 3, msk_num_feats: int = 128, msk_num_hidden_feats: int = 512, msk_num_layers: int = 8, msk_num_stacks: int = 3, msk_activate: str = "sigmoid", ): super().__init__() self.num_sources = num_sources self.enc_num_feats = enc_num_feats self.enc_kernel_size = enc_kernel_size self.enc_stride = enc_kernel_size // 2 self.encoder = torch.nn.Conv1d( in_channels=1, out_channels=enc_num_feats, kernel_size=enc_kernel_size, stride=self.enc_stride, padding=self.enc_stride, bias=False, ) self.mask_generator = MaskGenerator( input_dim=enc_num_feats, num_sources=num_sources, kernel_size=msk_kernel_size, num_feats=msk_num_feats, num_hidden=msk_num_hidden_feats, num_layers=msk_num_layers, num_stacks=msk_num_stacks, msk_activate=msk_activate, ) self.decoder = torch.nn.ConvTranspose1d( in_channels=enc_num_feats, out_channels=1, kernel_size=enc_kernel_size, stride=self.enc_stride, padding=self.enc_stride, bias=False, ) def _align_num_frames_with_strides( self, input: torch.Tensor ) -> Tuple[torch.Tensor, int]: """Pad input Tensor so that the end of the input tensor corresponds with 1. (if kernel size is odd) the center of the last convolution kernel or 2. (if kernel size is even) the end of the first half of the last convolution kernel Assumption: The resulting Tensor will be padded with the size of stride (== kernel_width // 2) on the both ends in Conv1D |<--- k_1 --->| | | |<-- k_n-1 -->| | | | |<--- k_n --->| | | | | | | | | | | | v v v | |<---->|<--- input signal --->|<--->|<---->| stride PAD stride Args: input (torch.Tensor): 3D Tensor with shape (batch_size, channels==1, frames) Returns: Tensor: Padded Tensor int: Number of paddings performed """ batch_size, num_channels, num_frames = input.shape is_odd = self.enc_kernel_size % 2 num_strides = (num_frames - is_odd) // self.enc_stride num_remainings = num_frames - (is_odd + num_strides * self.enc_stride) if num_remainings == 0: return input, 0 num_paddings = self.enc_stride - num_remainings pad = torch.zeros( batch_size, num_channels, num_paddings, dtype=input.dtype, device=input.device, ) return torch.cat([input, pad], 2), num_paddings def forward(self, input: torch.Tensor) -> torch.Tensor: """Perform source separation. Generate audio source waveforms. Args: input (torch.Tensor): 3D Tensor with shape [batch, channel==1, frames] Returns: Tensor: 3D Tensor with shape [batch, channel==num_sources, frames] """ if input.ndim != 3 or input.shape[1] != 1: raise ValueError( f"Expected 3D tensor (batch, channel==1, frames). Found: {input.shape}" ) # B: batch size # L: input frame length # L': padded input frame length # F: feature dimension # M: feature frame length # S: number of sources padded, num_pads = self._align_num_frames_with_strides(input) # B, 1, L' batch_size, num_padded_frames = padded.shape[0], padded.shape[2] feats = self.encoder(padded) # B, F, M masked = self.mask_generator(feats) * feats.unsqueeze(1) # B, S, F, M masked = masked.view( batch_size * self.num_sources, self.enc_num_feats, -1 ) # B*S, F, M decoded = self.decoder(masked) # B*S, 1, L' output = decoded.view( batch_size, self.num_sources, num_padded_frames ) # B, S, L' if num_pads > 0: output = output[..., :-num_pads] # B, S, L return output
from .wav2letter import Wav2Letter from .wavernn import WaveRNN from .conv_tasnet import ConvTasNet from .deepspeech import DeepSpeech from .tacotron2 import Tacotron2 from .wav2vec2 import ( Wav2Vec2Model, wav2vec2_model, wav2vec2_base, wav2vec2_large, wav2vec2_large_lv60k, hubert_base, hubert_large, hubert_xlarge, ) __all__ = [ 'Wav2Letter', 'WaveRNN', 'ConvTasNet', 'DeepSpeech', 'Wav2Vec2Model', 'wav2vec2_model', 'wav2vec2_base', 'wav2vec2_large', 'wav2vec2_large_lv60k', 'hubert_base', 'hubert_large', 'hubert_xlarge', 'Tacotron2', ]
import torch __all__ = ["DeepSpeech"] class FullyConnected(torch.nn.Module): """ Args: n_feature: Number of input features n_hidden: Internal hidden unit size. """ def __init__(self, n_feature: int, n_hidden: int, dropout: float, relu_max_clip: int = 20) -> None: super(FullyConnected, self).__init__() self.fc = torch.nn.Linear(n_feature, n_hidden, bias=True) self.relu_max_clip = relu_max_clip self.dropout = dropout def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.fc(x) x = torch.nn.functional.relu(x) x = torch.nn.functional.hardtanh(x, 0, self.relu_max_clip) if self.dropout: x = torch.nn.functional.dropout(x, self.dropout, self.training) return x class DeepSpeech(torch.nn.Module): """ DeepSpeech model architecture from *Deep Speech: Scaling up end-to-end speech recognition* [:footcite:`hannun2014deep`]. Args: n_feature: Number of input features n_hidden: Internal hidden unit size. n_class: Number of output classes """ def __init__( self, n_feature: int, n_hidden: int = 2048, n_class: int = 40, dropout: float = 0.0, ) -> None: super(DeepSpeech, self).__init__() self.n_hidden = n_hidden self.fc1 = FullyConnected(n_feature, n_hidden, dropout) self.fc2 = FullyConnected(n_hidden, n_hidden, dropout) self.fc3 = FullyConnected(n_hidden, n_hidden, dropout) self.bi_rnn = torch.nn.RNN( n_hidden, n_hidden, num_layers=1, nonlinearity="relu", bidirectional=True ) self.fc4 = FullyConnected(n_hidden, n_hidden, dropout) self.out = torch.nn.Linear(n_hidden, n_class) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): Tensor of dimension (batch, channel, time, feature). Returns: Tensor: Predictor tensor of dimension (batch, time, class). """ # N x C x T x F x = self.fc1(x) # N x C x T x H x = self.fc2(x) # N x C x T x H x = self.fc3(x) # N x C x T x H x = x.squeeze(1) # N x T x H x = x.transpose(0, 1) # T x N x H x, _ = self.bi_rnn(x) # The fifth (non-recurrent) layer takes both the forward and backward units as inputs x = x[:, :, :self.n_hidden] + x[:, :, self.n_hidden:] # T x N x H x = self.fc4(x) # T x N x H x = self.out(x) # T x N x n_class x = x.permute(1, 0, 2) # N x T x n_class x = torch.nn.functional.log_softmax(x, dim=2) # N x T x n_class return x
from typing import List, Tuple, Optional import math import torch from torch import Tensor from torch import nn import torch.nn.functional as F __all__ = [ "ResBlock", "MelResNet", "Stretch2d", "UpsampleNetwork", "WaveRNN", ] class ResBlock(nn.Module): r"""ResNet block based on *Efficient Neural Audio Synthesis* [:footcite:`kalchbrenner2018efficient`]. Args: n_freq: the number of bins in a spectrogram. (Default: ``128``) Examples >>> resblock = ResBlock() >>> input = torch.rand(10, 128, 512) # a random spectrogram >>> output = resblock(input) # shape: (10, 128, 512) """ def __init__(self, n_freq: int = 128) -> None: super().__init__() self.resblock_model = nn.Sequential( nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False), nn.BatchNorm1d(n_freq), nn.ReLU(inplace=True), nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False), nn.BatchNorm1d(n_freq) ) def forward(self, specgram: Tensor) -> Tensor: r"""Pass the input through the ResBlock layer. Args: specgram (Tensor): the input sequence to the ResBlock layer (n_batch, n_freq, n_time). Return: Tensor shape: (n_batch, n_freq, n_time) """ return self.resblock_model(specgram) + specgram class MelResNet(nn.Module): r"""MelResNet layer uses a stack of ResBlocks on spectrogram. Args: n_res_block: the number of ResBlock in stack. (Default: ``10``) n_freq: the number of bins in a spectrogram. (Default: ``128``) n_hidden: the number of hidden dimensions of resblock. (Default: ``128``) n_output: the number of output dimensions of melresnet. (Default: ``128``) kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``) Examples >>> melresnet = MelResNet() >>> input = torch.rand(10, 128, 512) # a random spectrogram >>> output = melresnet(input) # shape: (10, 128, 508) """ def __init__(self, n_res_block: int = 10, n_freq: int = 128, n_hidden: int = 128, n_output: int = 128, kernel_size: int = 5) -> None: super().__init__() ResBlocks = [ResBlock(n_hidden) for _ in range(n_res_block)] self.melresnet_model = nn.Sequential( nn.Conv1d(in_channels=n_freq, out_channels=n_hidden, kernel_size=kernel_size, bias=False), nn.BatchNorm1d(n_hidden), nn.ReLU(inplace=True), *ResBlocks, nn.Conv1d(in_channels=n_hidden, out_channels=n_output, kernel_size=1) ) def forward(self, specgram: Tensor) -> Tensor: r"""Pass the input through the MelResNet layer. Args: specgram (Tensor): the input sequence to the MelResNet layer (n_batch, n_freq, n_time). Return: Tensor shape: (n_batch, n_output, n_time - kernel_size + 1) """ return self.melresnet_model(specgram) class Stretch2d(nn.Module): r"""Upscale the frequency and time dimensions of a spectrogram. Args: time_scale: the scale factor in time dimension freq_scale: the scale factor in frequency dimension Examples >>> stretch2d = Stretch2d(time_scale=10, freq_scale=5) >>> input = torch.rand(10, 100, 512) # a random spectrogram >>> output = stretch2d(input) # shape: (10, 500, 5120) """ def __init__(self, time_scale: int, freq_scale: int) -> None: super().__init__() self.freq_scale = freq_scale self.time_scale = time_scale def forward(self, specgram: Tensor) -> Tensor: r"""Pass the input through the Stretch2d layer. Args: specgram (Tensor): the input sequence to the Stretch2d layer (..., n_freq, n_time). Return: Tensor shape: (..., n_freq * freq_scale, n_time * time_scale) """ return specgram.repeat_interleave(self.freq_scale, -2).repeat_interleave(self.time_scale, -1) class UpsampleNetwork(nn.Module): r"""Upscale the dimensions of a spectrogram. Args: upsample_scales: the list of upsample scales. n_res_block: the number of ResBlock in stack. (Default: ``10``) n_freq: the number of bins in a spectrogram. (Default: ``128``) n_hidden: the number of hidden dimensions of resblock. (Default: ``128``) n_output: the number of output dimensions of melresnet. (Default: ``128``) kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``) Examples >>> upsamplenetwork = UpsampleNetwork(upsample_scales=[4, 4, 16]) >>> input = torch.rand(10, 128, 10) # a random spectrogram >>> output = upsamplenetwork(input) # shape: (10, 1536, 128), (10, 1536, 128) """ def __init__(self, upsample_scales: List[int], n_res_block: int = 10, n_freq: int = 128, n_hidden: int = 128, n_output: int = 128, kernel_size: int = 5) -> None: super().__init__() total_scale = 1 for upsample_scale in upsample_scales: total_scale *= upsample_scale self.total_scale: int = total_scale self.indent = (kernel_size - 1) // 2 * total_scale self.resnet = MelResNet(n_res_block, n_freq, n_hidden, n_output, kernel_size) self.resnet_stretch = Stretch2d(total_scale, 1) up_layers = [] for scale in upsample_scales: stretch = Stretch2d(scale, 1) conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(1, scale * 2 + 1), padding=(0, scale), bias=False) conv.weight.data.fill_(1. / (scale * 2 + 1)) up_layers.append(stretch) up_layers.append(conv) self.upsample_layers = nn.Sequential(*up_layers) def forward(self, specgram: Tensor) -> Tuple[Tensor, Tensor]: r"""Pass the input through the UpsampleNetwork layer. Args: specgram (Tensor): the input sequence to the UpsampleNetwork layer (n_batch, n_freq, n_time) Return: Tensor shape: (n_batch, n_freq, (n_time - kernel_size + 1) * total_scale), (n_batch, n_output, (n_time - kernel_size + 1) * total_scale) where total_scale is the product of all elements in upsample_scales. """ resnet_output = self.resnet(specgram).unsqueeze(1) resnet_output = self.resnet_stretch(resnet_output) resnet_output = resnet_output.squeeze(1) specgram = specgram.unsqueeze(1) upsampling_output = self.upsample_layers(specgram) upsampling_output = upsampling_output.squeeze(1)[:, :, self.indent:-self.indent] return upsampling_output, resnet_output class WaveRNN(nn.Module): r"""WaveRNN model based on the implementation from `fatchord <https://github.com/fatchord/WaveRNN>`_. The original implementation was introduced in *Efficient Neural Audio Synthesis* [:footcite:`kalchbrenner2018efficient`]. The input channels of waveform and spectrogram have to be 1. The product of `upsample_scales` must equal `hop_length`. Args: upsample_scales: the list of upsample scales. n_classes: the number of output classes. hop_length: the number of samples between the starts of consecutive frames. n_res_block: the number of ResBlock in stack. (Default: ``10``) n_rnn: the dimension of RNN layer. (Default: ``512``) n_fc: the dimension of fully connected layer. (Default: ``512``) kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``) n_freq: the number of bins in a spectrogram. (Default: ``128``) n_hidden: the number of hidden dimensions of resblock. (Default: ``128``) n_output: the number of output dimensions of melresnet. (Default: ``128``) Example >>> wavernn = WaveRNN(upsample_scales=[5,5,8], n_classes=512, hop_length=200) >>> waveform, sample_rate = torchaudio.load(file) >>> # waveform shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length) >>> specgram = MelSpectrogram(sample_rate)(waveform) # shape: (n_batch, n_channel, n_freq, n_time) >>> output = wavernn(waveform, specgram) >>> # output shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length, n_classes) """ def __init__(self, upsample_scales: List[int], n_classes: int, hop_length: int, n_res_block: int = 10, n_rnn: int = 512, n_fc: int = 512, kernel_size: int = 5, n_freq: int = 128, n_hidden: int = 128, n_output: int = 128) -> None: super().__init__() self.kernel_size = kernel_size self._pad = (kernel_size - 1 if kernel_size % 2 else kernel_size) // 2 self.n_rnn = n_rnn self.n_aux = n_output // 4 self.hop_length = hop_length self.n_classes = n_classes self.n_bits: int = int(math.log2(self.n_classes)) total_scale = 1 for upsample_scale in upsample_scales: total_scale *= upsample_scale if total_scale != self.hop_length: raise ValueError(f"Expected: total_scale == hop_length, but found {total_scale} != {hop_length}") self.upsample = UpsampleNetwork(upsample_scales, n_res_block, n_freq, n_hidden, n_output, kernel_size) self.fc = nn.Linear(n_freq + self.n_aux + 1, n_rnn) self.rnn1 = nn.GRU(n_rnn, n_rnn, batch_first=True) self.rnn2 = nn.GRU(n_rnn + self.n_aux, n_rnn, batch_first=True) self.relu1 = nn.ReLU(inplace=True) self.relu2 = nn.ReLU(inplace=True) self.fc1 = nn.Linear(n_rnn + self.n_aux, n_fc) self.fc2 = nn.Linear(n_fc + self.n_aux, n_fc) self.fc3 = nn.Linear(n_fc, self.n_classes) def forward(self, waveform: Tensor, specgram: Tensor) -> Tensor: r"""Pass the input through the WaveRNN model. Args: waveform: the input waveform to the WaveRNN layer (n_batch, 1, (n_time - kernel_size + 1) * hop_length) specgram: the input spectrogram to the WaveRNN layer (n_batch, 1, n_freq, n_time) Return: Tensor: shape (n_batch, 1, (n_time - kernel_size + 1) * hop_length, n_classes) """ assert waveform.size(1) == 1, 'Require the input channel of waveform is 1' assert specgram.size(1) == 1, 'Require the input channel of specgram is 1' # remove channel dimension until the end waveform, specgram = waveform.squeeze(1), specgram.squeeze(1) batch_size = waveform.size(0) h1 = torch.zeros(1, batch_size, self.n_rnn, dtype=waveform.dtype, device=waveform.device) h2 = torch.zeros(1, batch_size, self.n_rnn, dtype=waveform.dtype, device=waveform.device) # output of upsample: # specgram: (n_batch, n_freq, (n_time - kernel_size + 1) * total_scale) # aux: (n_batch, n_output, (n_time - kernel_size + 1) * total_scale) specgram, aux = self.upsample(specgram) specgram = specgram.transpose(1, 2) aux = aux.transpose(1, 2) aux_idx = [self.n_aux * i for i in range(5)] a1 = aux[:, :, aux_idx[0]:aux_idx[1]] a2 = aux[:, :, aux_idx[1]:aux_idx[2]] a3 = aux[:, :, aux_idx[2]:aux_idx[3]] a4 = aux[:, :, aux_idx[3]:aux_idx[4]] x = torch.cat([waveform.unsqueeze(-1), specgram, a1], dim=-1) x = self.fc(x) res = x x, _ = self.rnn1(x, h1) x = x + res res = x x = torch.cat([x, a2], dim=-1) x, _ = self.rnn2(x, h2) x = x + res x = torch.cat([x, a3], dim=-1) x = self.fc1(x) x = self.relu1(x) x = torch.cat([x, a4], dim=-1) x = self.fc2(x) x = self.relu2(x) x = self.fc3(x) # bring back channel dimension return x.unsqueeze(1) @torch.jit.export def infer(self, specgram: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]: r"""Inference method of WaveRNN. This function currently only supports multinomial sampling, which assumes the network is trained on cross entropy loss. Args: specgram (Tensor): Batch of spectrograms. Shape: `(n_batch, n_freq, n_time)`. lengths (Tensor or None, optional): Indicates the valid length of each audio in the batch. Shape: `(batch, )`. When the ``specgram`` contains spectrograms with different durations, by providing ``lengths`` argument, the model will compute the corresponding valid output lengths. If ``None``, it is assumed that all the audio in ``waveforms`` have valid length. Default: ``None``. Returns: (Tensor, Optional[Tensor]): Tensor The inferred waveform of size `(n_batch, 1, n_time)`. 1 stands for a single channel. Tensor or None If ``lengths`` argument was provided, a Tensor of shape `(batch, )` is returned. It indicates the valid length in time axis of the output Tensor. """ device = specgram.device dtype = specgram.dtype specgram = torch.nn.functional.pad(specgram, (self._pad, self._pad)) specgram, aux = self.upsample(specgram) if lengths is not None: lengths = lengths * self.upsample.total_scale output: List[Tensor] = [] b_size, _, seq_len = specgram.size() h1 = torch.zeros((1, b_size, self.n_rnn), device=device, dtype=dtype) h2 = torch.zeros((1, b_size, self.n_rnn), device=device, dtype=dtype) x = torch.zeros((b_size, 1), device=device, dtype=dtype) aux_split = [aux[:, self.n_aux * i: self.n_aux * (i + 1), :] for i in range(4)] for i in range(seq_len): m_t = specgram[:, :, i] a1_t, a2_t, a3_t, a4_t = [a[:, :, i] for a in aux_split] x = torch.cat([x, m_t, a1_t], dim=1) x = self.fc(x) _, h1 = self.rnn1(x.unsqueeze(1), h1) x = x + h1[0] inp = torch.cat([x, a2_t], dim=1) _, h2 = self.rnn2(inp.unsqueeze(1), h2) x = x + h2[0] x = torch.cat([x, a3_t], dim=1) x = F.relu(self.fc1(x)) x = torch.cat([x, a4_t], dim=1) x = F.relu(self.fc2(x)) logits = self.fc3(x) posterior = F.softmax(logits, dim=1) x = torch.multinomial(posterior, 1).float() # Transform label [0, 2 ** n_bits - 1] to waveform [-1, 1] x = 2 * x / (2 ** self.n_bits - 1.0) - 1.0 output.append(x) return torch.stack(output).permute(1, 2, 0), lengths
from .model import ( Wav2Vec2Model, wav2vec2_model, wav2vec2_base, wav2vec2_large, wav2vec2_large_lv60k, hubert_base, hubert_large, hubert_xlarge, ) from . import utils __all__ = [ 'Wav2Vec2Model', 'wav2vec2_model', 'wav2vec2_base', 'wav2vec2_large', 'wav2vec2_large_lv60k', 'hubert_base', 'hubert_large', 'hubert_xlarge', 'utils', ]
from typing import Optional, Tuple, List import torch from torch import Tensor from torch.nn import Module from . import components class Wav2Vec2Model(Module): """torchaudio.models.Wav2Vec2Model(feature_extractor: torch.nn.Module, encoder: torch.nn.Module, aux: Optional[torch.nn.Module] = None) Encoder model used in *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]. Note: To build the model, please use one of the factory functions. Args: feature_extractor (torch.nn.Module): Feature extractor that extracts feature vectors from raw audio Tensor. encoder (torch.nn.Module): Encoder that converts the audio features into the sequence of probability distribution (in negative log-likelihood) over labels. aux (torch.nn.Module or None, optional): Auxiliary module. If provided, the output from encoder is passed to this module. """ # noqa: E501 def __init__( self, feature_extractor: Module, encoder: Module, aux: Optional[Module] = None, ): super().__init__() self.feature_extractor = feature_extractor self.encoder = encoder self.aux = aux @torch.jit.export def extract_features( self, waveforms: Tensor, lengths: Optional[Tensor] = None, num_layers: Optional[int] = None, ) -> Tuple[List[Tensor], Optional[Tensor]]: """Extract feature vectors from raw waveforms This returns the list of outputs from the intermediate layers of transformer block in encoder. Args: waveforms (Tensor): Audio tensor of shape `(batch, frames)`. lengths (Tensor or None, optional): Indicates the valid length of each audio in the batch. Shape: `(batch, )`. When the ``waveforms`` contains audios with different durations, by providing ``lengths`` argument, the model will compute the corresponding valid output lengths and apply proper mask in transformer attention layer. If ``None``, it is assumed that the entire audio waveform length is valid. num_layers (int or None, optional): If given, limit the number of intermediate layers to go through. Providing `1` will stop the computation after going through one intermediate layers. If not given, the outputs from all the intermediate layers are returned. Returns: (List[Tensor], Optional[Tensor]): List of Tensors Features from requested layers. Each Tensor is of shape: `(batch, time frame, feature dimension)` Tensor or None If ``lengths`` argument was provided, a Tensor of shape `(batch, )` is returned. It indicates the valid length in time axis of each feature Tensor. """ x, lengths = self.feature_extractor(waveforms, lengths) x = self.encoder.extract_features(x, lengths, num_layers) return x, lengths def forward( self, waveforms: Tensor, lengths: Optional[Tensor] = None, ) -> Tuple[Tensor, Optional[Tensor]]: """Compute the sequence of probability distribution over labels. Args: waveforms (Tensor): Audio tensor of shape `(batch, frames)`. lengths (Tensor or None, optional): Indicates the valid length of each audio in the batch. Shape: `(batch, )`. When the ``waveforms`` contains audios with different durations, by providing ``lengths`` argument, the model will compute the corresponding valid output lengths and apply proper mask in transformer attention layer. If ``None``, it is assumed that all the audio in ``waveforms`` have valid length. Default: ``None``. Returns: (Tensor, Optional[Tensor]): Tensor The sequences of probability distribution (in logit) over labels. Shape: `(batch, frames, num labels)`. Tensor or None If ``lengths`` argument was provided, a Tensor of shape `(batch, )` is returned. It indicates the valid length in time axis of the output Tensor. """ x, lengths = self.feature_extractor(waveforms, lengths) x = self.encoder(x, lengths) if self.aux is not None: x = self.aux(x) return x, lengths def wav2vec2_model( extractor_mode: str, extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], extractor_conv_bias: bool, encoder_embed_dim: int, encoder_projection_dropout: float, encoder_pos_conv_kernel: int, encoder_pos_conv_groups: int, encoder_num_layers: int, encoder_num_heads: int, encoder_attention_dropout: float, encoder_ff_interm_features: int, encoder_ff_interm_dropout: float, encoder_dropout: float, encoder_layer_norm_first: bool, encoder_layer_drop: float, aux_num_out: Optional[int], ) -> Wav2Vec2Model: # Overriding the signature so that the return type is correct on Sphinx """wav2vec2_model(extractor_mode: str, extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], extractor_conv_bias: bool, encoder_embed_dim: int, encoder_projection_dropout: float, encoder_pos_conv_kernel: int, encoder_pos_conv_groups: int, encoder_num_layers: int, encoder_num_heads: int, encoder_attention_dropout: float, encoder_ff_interm_features: int, encoder_ff_interm_dropout: float, encoder_dropout: float, encoder_layer_norm_first: bool, encoder_layer_drop: float, aux_num_out: Optional[int]) -> torchaudio.models.Wav2Vec2Model Build a custom Wav2Vec2Model Note: The "feature extractor" below corresponds to `ConvFeatureExtractionModel <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L736>`__ in the original ``fairseq`` implementation. This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] paper. The "encoder" below corresponds to `TransformerEncoder <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L817>`__, and this is referred as "Transformer" in the paper. Args: extractor_mode (str): Operation mode of feature extractor. Valid values are ``"group_norm"`` or ``"layer_norm"``. If ``"group_norm"``, then a single normalization is applied in the first convolution block. Otherwise, all the convolution blocks will have layer normalization. This option corresponds to ``extractor_mode`` from ``fairseq``. extractor_conv_layer_config (list of integer tuples or None): Configuration of convolution layers in feature extractor. List of convolution configuration, i.e. ``[(output_channel, kernel_size, stride), ...]`` If ``None`` is provided, then the following default value is used. .. code-block:: python [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ] This option corresponds to ``conv_feature_layers`` from ``fairseq``. extractor_conv_bias (bool): Whether to include bias term to each convolution operation. This option corresponds to ``conv_bias`` from ``fairseq``. encoder_embed_dim (int): The dimension of embedding in encoder. This option corresponds to ``encoder_embed_dim`` from ``fairseq``. encoder_projection_dropout (float): The dropout probability applied after the input feature is projected to ``encoder_embed_dim``. This option corresponds to ``dropout_input`` from ``fairseq``. encoder_pos_conv_kernel (int): The kernel size of convolutional positional embeddings. This option corresponds to ``conv_pos`` from ``fairseq``. encoder_pos_conv_groups (int): The number of groups of convolutional positional embeddings. This option corresponds to ``conv_pos_groups`` from ``fairseq``. encoder_num_layers (int): The number of self attention layers in transformer block. This option corresponds to ``encoder_layers`` from ``fairseq``. encoder_num_heads (int): The number of heads in self attention layers. This option corresponds to ``encoder_attention_heads`` from ``fairseq``. encoder_attention_dropout (float): The dropout probability applied after softmax in self-attention layer. This option corresponds to ``attention_dropout`` from ``fairseq``. encoder_ff_interm_features (int): The dimension of hidden features in feed forward layer. This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``. encoder_ff_interm_dropout (float): The dropout probability applied in feedforward layer. This option correspinds to ``activation_dropout`` from ``fairseq``. encoder_dropout (float): The dropout probability applied at the end of feed forward layer. This option corresponds to ``dropout`` from ``fairseq``. encoder_layer_norm_first (bool): Control the order of layer norm in transformer layer and each encoder layer. If True, in transformer layer, layer norm is applied before features are fed to encoder layers. In encoder layer, two layer norms are applied before and after self attention. If False, in transformer layer, layer norm is applied after features are fed to encoder layers. In encoder layer, two layer norms are applied after self attention, before and after feed forward. This option corresponds to ``layer_norm_first`` from ``fairseq``. encoder_layer_drop (float): Probability to drop each encoder layer during training. This option corresponds to ``layerdrop`` from ``fairseq``. aux_num_out (int or None): When provided, attach an extra linear layer on top of encoder, which can be used for fine-tuning. Returns: Wav2Vec2Model: The resulting model. """ # noqa: E501 if extractor_conv_layer_config is None: extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2 feature_extractor = components._get_feature_extractor( extractor_mode, extractor_conv_layer_config, extractor_conv_bias) encoder = components._get_encoder( in_features=extractor_conv_layer_config[-1][0], embed_dim=encoder_embed_dim, dropout_input=encoder_projection_dropout, pos_conv_kernel=encoder_pos_conv_kernel, pos_conv_groups=encoder_pos_conv_groups, num_layers=encoder_num_layers, num_heads=encoder_num_heads, attention_dropout=encoder_attention_dropout, ff_interm_features=encoder_ff_interm_features, ff_interm_dropout=encoder_ff_interm_dropout, dropout=encoder_dropout, layer_norm_first=encoder_layer_norm_first, layer_drop=encoder_layer_drop, ) aux = None if aux_num_out is not None: aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out) return Wav2Vec2Model(feature_extractor, encoder, aux) def wav2vec2_base( encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: # Overriding the signature so that the return type is correct on Sphinx """wav2vec2_base(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model Build Wav2Vec2Model with "base" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] Args: encoder_projection_dropout (float): See :py:func:`wav2vec2_model`. encoder_attention_dropout (float): See :py:func:`wav2vec2_model`. encoder_ff_interm_dropout (float): See :py:func:`wav2vec2_model`. encoder_dropout (float): See :py:func:`wav2vec2_model`. encoder_layer_drop (float): See :py:func:`wav2vec2_model`. aux_num_out (int or None, optional): See :py:func:`wav2vec2_model`. Returns: Wav2Vec2Model: The resulting model. """ # noqa: E501 return wav2vec2_model( extractor_mode="group_norm", extractor_conv_layer_config=None, extractor_conv_bias=False, encoder_embed_dim=768, encoder_projection_dropout=encoder_projection_dropout, encoder_pos_conv_kernel=128, encoder_pos_conv_groups=16, encoder_num_layers=12, encoder_num_heads=12, encoder_attention_dropout=encoder_attention_dropout, encoder_ff_interm_features=3072, encoder_ff_interm_dropout=encoder_ff_interm_dropout, encoder_dropout=encoder_dropout, encoder_layer_norm_first=False, encoder_layer_drop=encoder_layer_drop, aux_num_out=aux_num_out, ) def wav2vec2_large( encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: # Overriding the signature so that the return type is correct on Sphinx """wav2vec2_large(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model Build Wav2Vec2Model with "large" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] Args: encoder_projection_dropout (float): See :py:func:`wav2vec2_model`. encoder_attention_dropout (float): See :py:func:`wav2vec2_model`. encoder_ff_interm_dropout (float): See :py:func:`wav2vec2_model`. encoder_dropout (float): See :py:func:`wav2vec2_model`. encoder_layer_drop (float): See :py:func:`wav2vec2_model`. aux_num_out (int or None, optional): See :py:func:`wav2vec2_model`. Returns: Wav2Vec2Model: The resulting model. """ # noqa: E501 return wav2vec2_model( extractor_mode="group_norm", extractor_conv_layer_config=None, extractor_conv_bias=False, encoder_embed_dim=1024, encoder_projection_dropout=encoder_projection_dropout, encoder_pos_conv_kernel=128, encoder_pos_conv_groups=16, encoder_num_layers=24, encoder_num_heads=16, encoder_attention_dropout=encoder_attention_dropout, encoder_ff_interm_features=4096, encoder_ff_interm_dropout=encoder_ff_interm_dropout, encoder_dropout=encoder_dropout, encoder_layer_norm_first=False, encoder_layer_drop=encoder_layer_drop, aux_num_out=aux_num_out, ) def wav2vec2_large_lv60k( encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: # Overriding the signature so that the return type is correct on Sphinx """wav2vec2_large_lv60k( encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model Build Wav2Vec2Model with "large lv-60k" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] Args: encoder_projection_dropout (float): See :py:func:`wav2vec2_model`. encoder_attention_dropout (float): See :py:func:`wav2vec2_model`. encoder_ff_interm_dropout (float): See :py:func:`wav2vec2_model`. encoder_dropout (float): See :py:func:`wav2vec2_model`. encoder_layer_drop (float): See :py:func:`wav2vec2_model`. aux_num_out (int or None, optional): See :py:func:`wav2vec2_model`. Returns: Wav2Vec2Model: The resulting model. """ # noqa: E501 return wav2vec2_model( extractor_mode="layer_norm", extractor_conv_layer_config=None, extractor_conv_bias=True, encoder_embed_dim=1024, encoder_projection_dropout=encoder_projection_dropout, encoder_pos_conv_kernel=128, encoder_pos_conv_groups=16, encoder_num_layers=24, encoder_num_heads=16, encoder_attention_dropout=encoder_attention_dropout, encoder_ff_interm_features=4096, encoder_ff_interm_dropout=encoder_ff_interm_dropout, encoder_dropout=encoder_dropout, encoder_layer_norm_first=True, encoder_layer_drop=encoder_layer_drop, aux_num_out=aux_num_out, ) def hubert_base( encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.05, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: # Overriding the signature so that the return type is correct on Sphinx """hubert_base(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.05, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model Build HuBERT model with "base" architecture from *HuBERT* [:footcite:`hsu2021hubert`] Args: encoder_projection_dropout (float): See :py:func:`wav2vec2_model`. encoder_attention_dropout (float): See :py:func:`wav2vec2_model`. encoder_ff_interm_dropout (float): See :py:func:`wav2vec2_model`. encoder_dropout (float): See :py:func:`wav2vec2_model`. encoder_layer_drop (float): See :py:func:`wav2vec2_model`. aux_num_out (int or None, optional): See :py:func:`wav2vec2_model`. Returns: Wav2Vec2Model: The resulting model. """ # noqa: E501 return wav2vec2_model( extractor_mode='group_norm', extractor_conv_layer_config=None, extractor_conv_bias=False, encoder_embed_dim=768, encoder_projection_dropout=encoder_projection_dropout, encoder_pos_conv_kernel=128, encoder_pos_conv_groups=16, encoder_num_layers=12, encoder_num_heads=12, encoder_attention_dropout=encoder_attention_dropout, encoder_ff_interm_features=3072, encoder_ff_interm_dropout=encoder_ff_interm_dropout, encoder_dropout=encoder_dropout, encoder_layer_norm_first=False, encoder_layer_drop=encoder_layer_drop, aux_num_out=aux_num_out, ) def hubert_large( encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: # Overriding the signature so that the return type is correct on Sphinx """hubert_large(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model Build HuBERT model with "large" architecture from *HuBERT* [:footcite:`hsu2021hubert`] Args: encoder_projection_dropout (float): See :py:func:`wav2vec2_model`. encoder_attention_dropout (float): See :py:func:`wav2vec2_model`. encoder_ff_interm_dropout (float): See :py:func:`wav2vec2_model`. encoder_dropout (float): See :py:func:`wav2vec2_model`. encoder_layer_drop (float): See :py:func:`wav2vec2_model`. aux_num_out (int or None, optional): See :py:func:`wav2vec2_model`. Returns: Wav2Vec2Model: The resulting model. """ # noqa: E501 return wav2vec2_model( extractor_mode='layer_norm', extractor_conv_layer_config=None, extractor_conv_bias=False, encoder_embed_dim=1024, encoder_projection_dropout=encoder_projection_dropout, encoder_pos_conv_kernel=128, encoder_pos_conv_groups=16, encoder_num_layers=24, encoder_num_heads=16, encoder_attention_dropout=encoder_attention_dropout, encoder_ff_interm_features=4096, encoder_ff_interm_dropout=encoder_ff_interm_dropout, encoder_dropout=encoder_dropout, encoder_layer_norm_first=True, encoder_layer_drop=encoder_layer_drop, aux_num_out=aux_num_out, ) def hubert_xlarge( encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None, ) -> Wav2Vec2Model: # Overriding the signature so that the return type is correct on Sphinx """hubert_xlarge(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model Build HuBERT model with "extra large" architecture from *HuBERT* [:footcite:`hsu2021hubert`] Args: encoder_projection_dropout (float): See :py:func:`wav2vec2_model`. encoder_attention_dropout (float): See :py:func:`wav2vec2_model`. encoder_ff_interm_dropout (float): See :py:func:`wav2vec2_model`. encoder_dropout (float): See :py:func:`wav2vec2_model`. encoder_layer_drop (float): See :py:func:`wav2vec2_model`. aux_num_out (int or None, optional): See :py:func:`wav2vec2_model`. Returns: Wav2Vec2Model: The resulting model. """ # noqa: E501 return wav2vec2_model( extractor_mode='layer_norm', extractor_conv_layer_config=None, extractor_conv_bias=False, encoder_embed_dim=1280, encoder_projection_dropout=encoder_projection_dropout, encoder_pos_conv_kernel=128, encoder_pos_conv_groups=16, encoder_num_layers=48, encoder_num_heads=16, encoder_attention_dropout=encoder_attention_dropout, encoder_ff_interm_features=5120, encoder_ff_interm_dropout=encoder_ff_interm_dropout, encoder_dropout=encoder_dropout, encoder_layer_norm_first=True, encoder_layer_drop=encoder_layer_drop, aux_num_out=aux_num_out, )
import logging from typing import Optional, Tuple, List import torch from torch import Tensor, nn from torch.nn import Module _LG = logging.getLogger(__name__) class LayerNorm(nn.LayerNorm): """Layer norm with transpose""" def forward(self, input: Tensor) -> Tensor: x = input.transpose(-2, -1) x = nn.functional.layer_norm( x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.transpose(-2, -1) return x class ConvLayerBlock(Module): """Convolution unit of FeatureExtractor""" def __init__( self, in_channels: int, out_channels: int, kernel_size: int, stride: int, bias: bool, layer_norm: Optional[Module], ): super().__init__() self.kernel_size = kernel_size self.stride = stride self.layer_norm = layer_norm self.conv = nn.Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, bias=bias, ) def forward( self, x: Tensor, length: Optional[Tensor], ) -> Tuple[Tensor, Optional[Tensor]]: """ Args: x (Tensor): Shape: ``[batch, in_channels, in_frame]``. length (Tensor or None, optional): Shape ``[batch, ]``. Returns: Tensor: Shape ``[batch, out_channels, out_frames]``. Optional[Tensor]: Shape ``[batch, ]``. """ x = self.conv(x) if self.layer_norm is not None: x = self.layer_norm(x) x = nn.functional.gelu(x) if length is not None: length = torch.div(length - self.kernel_size, self.stride, rounding_mode='floor') + 1 # When input length is 0, the resulting length can be negative. So fix it here. length = torch.max(torch.zeros_like(length), length) return x, length class FeatureExtractor(Module): """Extract features from audio Args: conv_layers (nn.ModuleList): convolution layers """ def __init__( self, conv_layers: nn.ModuleList, ): super().__init__() self.conv_layers = conv_layers def forward( self, x: Tensor, length: Optional[Tensor], ) -> Tuple[Tensor, Optional[Tensor]]: """ Args: x (Tensor): Input Tensor representing a batch of audio, shape: ``[batch, time]``. length (Tensor or None, optional): Valid length of each input sample. shape: ``[batch, ]``. Returns: Tensor: The resulting feature, shape: ``[batch, frame, feature]`` Optional[Tensor]: Valid length of each output sample. shape: ``[batch, ]``. """ if x.ndim != 2: raise ValueError( "Expected the input Tensor to be 2D (batch, time), " "but received {list(x.shape)}") x = x.unsqueeze(1) # (batch, channel==1, frame) for layer in self.conv_layers: x, length = layer(x, length) # (batch, feature, frame) x = x.transpose(1, 2) # (batch, frame, feature) return x, length class FeatureProjection(Module): """Layer that connects FeatureExtractor and Encoder Projects features to encoder dimension. Args: in_features (int): Input feature dim. out_features (int): Output feature dim. dropout (float): Dropout probability. """ def __init__( self, in_features: int, out_features: int, dropout: float, ): super().__init__() self.layer_norm = nn.LayerNorm(in_features) self.projection = nn.Linear(in_features, out_features,) self.dropout = nn.Dropout(dropout) def forward(self, x): """ Args: x (Tensor): Feature Tensor. shape: ``[batch, frame, in_feature]`` Returns: Tensor: Projected features. ``[batch, frame, out_feature]``. """ x = self.layer_norm(x) x = self.projection(x) x = self.dropout(x) return x class ConvolutionalPositionalEmbedding(Module): """Positional embedding which is placed at the beginning of Transformer. Args: embed_dim (int): Feature dimension of the input Tensor. kernel_size (int): The number of frames to be use. groups (int): The number of groups in feature dimensions. """ def __init__( self, embed_dim: int, kernel_size: int, groups: int, ): super().__init__() self.embed_dim = embed_dim self.conv = nn.Conv1d( in_channels=embed_dim, out_channels=embed_dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=groups, ) self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) self.num_remove: int = 1 if kernel_size % 2 == 0 else 0 def __prepare_scriptable__(self): for hook in self.conv._forward_pre_hooks.values(): # The hook we want to remove is an instance of WeightNorm class, so # normally we would do `if isinstance(...)` but this class is not accessible # because of shadowing, so we check the module name directly. # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3 if ( hook.__module__ == 'torch.nn.utils.weight_norm' and hook.__class__.__name__ == 'WeightNorm' ): _LG.warning('Removing weight_norm from %s', self.__class__.__name__) torch.nn.utils.remove_weight_norm(self.conv) return self def forward(self, x): """ Args: x (Tensor): shape ``[batch, frame, feature]``. Returns: Tensor: The resulting feature. Shape ``[batch, frame, feature]``. """ x = x.transpose(-2, -1) x = self.conv(x) if self.num_remove > 0: x = x[..., :-self.num_remove] x = torch.nn.functional.gelu(x) x = x.transpose(-2, -1) return x class SelfAttention(Module): """Multihead Self Attention module Args: embed_dim (int): Total dimension of the model. num_heads (int): The number of heads. dropout (float, optional): Dropout probabiliry on attn_output_weights. Default: ``0.0`` """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, ): super().__init__() head_dim = embed_dim // num_heads if head_dim * num_heads != embed_dim: raise ValueError(f"`embed_dim ({embed_dim})` is not divisible by `num_heads ({num_heads})`") self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = torch.nn.Dropout(dropout) self.head_dim = head_dim self.scaling = self.head_dim ** -0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True) def forward( self, x: Tensor, attention_mask: Optional[Tensor] = None, ) -> Tensor: """ Args: x (Tensor): shape: ``[batch_size, sequence_length, embed_dim]``. attention_mask (Tensor or None, optional): shape: ``[batch_size, 1, sequence_length, sequence_length]`` Returns: Tensor: The resulting tensor. shape: ``[batch, sequence_length, embed_dim]`` """ if x.ndim != 3 or x.shape[2] != self.embed_dim: raise ValueError( f"The expected input shape is (batch, sequence, embed_dim=={self.embed_dim}). " f"Found {x.shape}." ) batch_size, length, embed_dim = x.size() if attention_mask is not None: shape_ = (batch_size, 1, length, length) if attention_mask.size() != shape_: raise ValueError( f"The expected attention mask shape is {shape_}. " f"Found {attention_mask.size()}." ) shape = (batch_size, length, self.num_heads, self.head_dim) q = self.q_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd k = self.k_proj(x).view(*shape).permute(0, 2, 3, 1) # B, nH, Hd, L v = self.v_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd weights = self.scaling * (q @ k) # B, nH, L, L if attention_mask is not None: weights += attention_mask weights = torch.nn.functional.softmax(weights, dim=-1) weights = self.dropout(weights) output = weights @ v # B, nH, L, Hd output = output.transpose(2, 1).reshape(batch_size, length, embed_dim) output = self.out_proj(output) return output class FeedForward(Module): """Layer that follows attention layer in encoder layer. """ def __init__( self, io_features: int, intermediate_features: int, intermediate_dropout: float, output_dropout: float, ): super().__init__() self.intermediate_dense = nn.Linear(io_features, intermediate_features) self.intermediate_dropout = nn.Dropout(intermediate_dropout) self.output_dense = nn.Linear(intermediate_features, io_features) self.output_dropout = nn.Dropout(output_dropout) def forward(self, x): """ Args: x (Tensor): shape: `(batch, sequence_length, io_features)` Returns: x (Tensor): shape: `(batch, sequence_length, io_features)` """ x = self.intermediate_dense(x) x = torch.nn.functional.gelu(x) x = self.intermediate_dropout(x) x = self.output_dense(x) x = self.output_dropout(x) return x class EncoderLayer(Module): """A layer unit in encoder. Combines multihead self attention and feed forward. """ def __init__( self, attention: Module, dropout: float, layer_norm_first: bool, feed_forward: Module, ): super().__init__() self.attention = attention self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(attention.embed_dim) self.layer_norm_first = layer_norm_first self.feed_forward = feed_forward self.final_layer_norm = nn.LayerNorm(attention.embed_dim) def forward( self, x: Tensor, attention_mask: Optional[Tensor] = None, ): """ Args: x (Tensor): shape: `(batch, sequence_length, embed_dim)` attention_mask (Tensor or None, optional): shape: `(batch, 1, sequence_length, sequence_length)` """ residual = x if self.layer_norm_first: x = self.layer_norm(x) x = self.attention(x, attention_mask) x = self.dropout(x) x = residual + x if self.layer_norm_first: x = x + self.feed_forward(self.final_layer_norm(x)) else: x = self.layer_norm(x) x = self.final_layer_norm(x + self.feed_forward(x)) return x class Transformer(Module): def __init__( self, pos_conv_embed: Module, dropout: float, layers: Module, layer_norm_first: bool, layer_drop: float, ): super().__init__() self.pos_conv_embed = pos_conv_embed self.layer_norm = nn.LayerNorm(pos_conv_embed.embed_dim) self.layer_norm_first = layer_norm_first self.layer_drop = layer_drop self.dropout = nn.Dropout(dropout) self.layers = layers def _preprocess(self, x: Tensor): x = x + self.pos_conv_embed(x) if self.layer_norm_first: x = self.layer_norm(x) x = self.dropout(x) return x def forward( self, x: Tensor, attention_mask: Optional[Tensor] = None, ): x = self._preprocess(x) for layer in self.layers: if not (self.training and torch.rand(1).item() <= self.layer_drop): x = layer(x, attention_mask) if not self.layer_norm_first: x = self.layer_norm(x) return x def get_intermediate_outputs( self, x: Tensor, attention_mask: Optional[Tensor] = None, num_layers: Optional[int] = None, ) -> List[Tensor]: if num_layers is not None: if not 0 < num_layers <= len(self.layers): raise ValueError(f'`num_layers` must be between [1, {len(self.layers)}]') ret: List[Tensor] = [] x = self._preprocess(x) for layer in self.layers: x = layer(x, attention_mask) ret.append(x) if num_layers is not None and len(ret) >= num_layers: return ret return ret class Encoder(Module): def __init__( self, feature_projection: Module, transformer: Module, ): super().__init__() self.feature_projection = feature_projection self.transformer = transformer def _preprocess( self, features: Tensor, lengths: Optional[Tensor] = None, ) -> Tuple[Tensor, Optional[Tensor]]: x = self.feature_projection(features) mask: Optional[Tensor] = None if lengths is not None: batch_size, max_len, _ = x.shape # create mask for padded elements and zero-out them mask = torch.arange(max_len, device=lengths.device).expand(batch_size, max_len) >= lengths[:, None] x[mask] = 0.0 # extend the mask to attention shape and set weight mask = -10000.0 * mask[:, None, None, :].to(dtype=features.dtype) mask = mask.expand(batch_size, 1, max_len, max_len) return x, mask def forward( self, features: Tensor, lengths: Optional[Tensor] = None, ) -> Tensor: x, mask = self._preprocess(features, lengths) x = self.transformer(x, attention_mask=mask) return x def extract_features( self, features: Tensor, lengths: Optional[Tensor] = None, num_layers: Optional[int] = None, ) -> List[Tensor]: x, masks = self._preprocess(features, lengths) return self.transformer.get_intermediate_outputs( x, attention_mask=masks, num_layers=num_layers) ################################################################################ def _get_feature_extractor( norm_mode: str, shapes: List[Tuple[int, int, int]], bias: bool, ) -> FeatureExtractor: """ Args: norm_mode (str): Either "group_norm" or "layer_norm". If "group_norm", then a single normalization is applied in the first convolution block. Otherwise, all the convolution blocks will have layer normalization. This option corresponds to "extractor_mode" from fairseq. Expected values are "group_norm" for Base arch, and "layer_norm" for Large arch. shapes (list of tuple of int): Configuration of convolution layers. List of convolution configuration, i.e. ``[(output_channel, kernel_size, stride), ...]`` This option corresponds to "conv_feature_layers" from fairseq. Expected values are ``[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2`` for all the architectures. bias (bool): Whether to include bias term to each convolution operation. This option corresponds to "conv_bias" from fairseq. Expected values are False for Base arch, and True for Large arch. See Also: * Original implementation https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L666-L733 * "extractor_mode" - Def and base: https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L38-L45 - Large: https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L52 * "conv_feature_layers" - Def, base and large: https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L94-L100 * "conv_bias" - Def and base: https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L101-L103 - Large: https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L61 """ assert norm_mode in ["group_norm", "layer_norm"] blocks = [] in_channels = 1 for i, (out_channels, kernel_size, stride) in enumerate(shapes): normalization = None if norm_mode == "group_norm" and i == 0: normalization = nn.GroupNorm( num_groups=out_channels, num_channels=out_channels, affine=True, ) elif norm_mode == "layer_norm": normalization = LayerNorm( normalized_shape=out_channels, elementwise_affine=True, ) blocks.append( ConvLayerBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, bias=bias, layer_norm=normalization, ) ) in_channels = out_channels return FeatureExtractor(nn.ModuleList(blocks)) def _get_encoder( in_features: int, embed_dim: int, dropout_input: float, pos_conv_kernel: int, pos_conv_groups: int, num_layers: int, num_heads: int, attention_dropout: float, ff_interm_features: int, ff_interm_dropout: float, dropout: float, layer_norm_first: bool, layer_drop: float, ) -> Encoder: """ Args: in_features (int): The number of input features. embed_dim (int): The dimension of embedding. This option corresponds to "encoder_embed_dim" from fairseq. Expected values are 768 for Base arch, and 1024 for Large arch. dropout_input (float): The dropout probability applied after the input feature is projected to ``embed_dim``. This option corresponds to "dropout_input" from fairseq. Expected values are 0.1 for both Base and Large arch. pos_conv_kernel (int): The kernel size of convolutional positional embeddings. This option corresponds to "conv_pos" from fairseq. Expected values are 128 for both Base and Large arch. pos_conv_groups (int): The number of groups of convolutional positional embeddings. This option corresponds to "conv_pos_groups" from fairseq. Expected values are 16 for both Base and Large arch. num_layers (int): The number of self attention layers in transformer block. This option corresponds to "encoder_layers" from fairseq. Expected values are 12 for Base and 24 for Large arch. num_heads (int): The number of heads in self attention layers. This option corresponds to "encoder_attention_heads" from fairseq. Expected values are 12 for Base and 16 for Large arch. attention_dropout (float): The dropout probability applied after softmax in self-attention layer. This option corresponds to "attention_dropout" from fairseq. Expected values are 0.1 for Base and 0.0 for Large arch. ff_interm_features (int): The dimension of hidden features in feed forward layer. This option corresponds to "encoder_ffn_embed_dim" from fairseq. Expected values are 3072 for Base and 4096 for Large arch. ff_interm_dropout (float): The dropout probability applied in feedforward layer. This option correspinds to "activation_dropout" from fairseq. Expected values are 0.1 for both Base and Large arch. dropout (float): The dropout probability applied at the end of feed forward layer. This option corresponds to "dropout" from fairseq. Expected values are 0.1 for Base and 0.0 for Large arch. layer_norm_first (bool): Control the order of layer norm in transformer layer and each encoder layer. If True, in transformer layer, layer norm is applied before features are fed to encoder layers. In encoder layer, two layer norms are applied before and after self attention. If False, in transformer layer, layer norm is applied after features are fed to encoder layers. In encoder layer, two layer norms are applied after self attention, before and after feed forward. This option corresponds to "layer_norm_first" from fairseq. Expected values are False for Base and True for Large arch. layer_drop (float): Probability to drop each encoder layer during training. This option corresponds to "layerdrop" from fairseq. Expected values are 0.1 for both Base and Large arch. See Also: * "encoder_embed_dim" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L49-L51 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L64 * "dropout_input" - Def, base and large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L75-L78 * "conv_pos" - Def, base and large NOTE: The description is wrong. https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L204-L207 - Usage https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L756 * "conv_pos_groups" - Def, base and large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L208-L211 * "encoder_layers" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L46-L48 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L63 * "encoder_attention_heads" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L55-L57 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L66 * "attention_dropout" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L66-L68 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L60 * "encoder_ffn_embed_dim" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L52-L54 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L65 * "activation_dropout" - Def https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L69-L71 - Base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L55 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L55 * "dropout" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L63-L65 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L59 * "layer_norm_first" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L91-L93 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L53 * "layerdrop" - Def https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L72-L74 - Base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L54 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L54 """ feature_projection = FeatureProjection(in_features, embed_dim, dropout_input) pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups) # Original impl # https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782 encoder_layers = nn.ModuleList() for _ in range(num_layers): attention = SelfAttention( embed_dim=embed_dim, num_heads=num_heads, dropout=attention_dropout, ) feed_forward = FeedForward( io_features=embed_dim, intermediate_features=ff_interm_features, intermediate_dropout=ff_interm_dropout, output_dropout=dropout, ) encoder_layers.append( EncoderLayer( attention=attention, dropout=dropout, layer_norm_first=layer_norm_first, feed_forward=feed_forward, ) ) transformer = Transformer( pos_conv_embed=pos_conv, dropout=dropout, layers=encoder_layers, layer_norm_first=not layer_norm_first, layer_drop=layer_drop, ) return Encoder(feature_projection, transformer)
from .import_huggingface import import_huggingface_model from .import_fairseq import import_fairseq_model __all__ = [ 'import_huggingface_model', 'import_fairseq_model', ]
"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format. """ import logging from torch.nn import Module from ..model import Wav2Vec2Model, wav2vec2_model _LG = logging.getLogger(__name__) def _get_config(cfg): config = { 'extractor_mode': f'{cfg.feat_extract_norm}_norm', 'extractor_conv_layer_config': list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)), 'extractor_conv_bias': cfg.conv_bias, 'encoder_embed_dim': cfg.hidden_size, 'encoder_projection_dropout': cfg.feat_proj_dropout, 'encoder_pos_conv_kernel': cfg.num_conv_pos_embeddings, 'encoder_pos_conv_groups': cfg.num_conv_pos_embedding_groups, 'encoder_num_layers': cfg.num_hidden_layers, 'encoder_num_heads': cfg.num_attention_heads, 'encoder_attention_dropout': cfg.attention_dropout, 'encoder_ff_interm_features': cfg.intermediate_size, 'encoder_ff_interm_dropout': cfg.activation_dropout, 'encoder_dropout': cfg.hidden_dropout, 'encoder_layer_norm_first': cfg.do_stable_layer_norm, 'encoder_layer_drop': cfg.layerdrop, } return config def _build(config, original): if original.__class__.__name__ == 'Wav2Vec2ForCTC': aux_num_out = original.config.vocab_size wav2vec2 = original.wav2vec2 else: _LG.warning( 'The model is not an instance of Wav2Vec2ForCTC. ' '"lm_head" module is not imported.') aux_num_out = None wav2vec2 = original imported = wav2vec2_model(**config, aux_num_out=aux_num_out) imported.feature_extractor.load_state_dict(wav2vec2.feature_extractor.state_dict()) imported.encoder.feature_projection.load_state_dict(wav2vec2.feature_projection.state_dict()) imported.encoder.transformer.load_state_dict(wav2vec2.encoder.state_dict()) if original.__class__.__name__ == 'Wav2Vec2ForCTC': imported.aux.load_state_dict(original.lm_head.state_dict()) return imported def import_huggingface_model(original: Module) -> Wav2Vec2Model: """import_huggingface_model(original: torch.nn.Module) -> torchaudio.models.Wav2Vec2Model Build Wav2Vec2Model from the corresponding model object of Hugging Face's `Transformers`_. Args: original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``. Returns: Wav2Vec2Model: Imported model. Example >>> from torchaudio.models.wav2vec2.utils import import_huggingface_model >>> >>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") >>> model = import_huggingface_model(original) >>> >>> waveforms, _ = torchaudio.load("audio.wav") >>> logits, _ = model(waveforms) .. _Transformers: https://huggingface.co/transformers/ """ _LG.info('Importing model.') _LG.info('Loading model configuration.') config = _get_config(original.config) _LG.debug(' - config: %s', config) _LG.info('Building model.') imported = _build(config, original) return imported
"""Import fariseq's wav2vec2.0 pretrained weights to torchaudios's format. For this module to work, you need `fairseq`. """ import re from torch.nn import Module from ..model import Wav2Vec2Model, wav2vec2_model def _parse_config(w2v_model): encoder = w2v_model.encoder conv_layers = w2v_model.feature_extractor.conv_layers extractor_mode = 'layer_norm' if 'GroupNorm' in conv_layers[0][2].__class__.__name__: extractor_mode = 'group_norm' else: extractor_mode = 'layer_norm' conv_layer_config = [(l[0].out_channels, l[0].kernel_size[0], l[0].stride[0]) for l in conv_layers] if all(l[0].bias is None for l in conv_layers): conv_bias = False elif all(l[0].bias is not None for l in conv_layers): conv_bias = True else: raise ValueError( 'Either all the convolutions layers have bias term or none of them should.') config = { 'extractor_mode': extractor_mode, 'extractor_conv_layer_config': conv_layer_config, 'extractor_conv_bias': conv_bias, 'encoder_embed_dim': w2v_model.post_extract_proj.out_features, 'encoder_projection_dropout': w2v_model.dropout_input.p, 'encoder_pos_conv_kernel': encoder.pos_conv[0].kernel_size[0], 'encoder_pos_conv_groups': encoder.pos_conv[0].groups, 'encoder_num_layers': len(encoder.layers), 'encoder_num_heads': encoder.layers[0].self_attn.num_heads, 'encoder_attention_dropout': encoder.layers[0].self_attn.dropout_module.p, 'encoder_ff_interm_features': encoder.layers[0].fc1.out_features, 'encoder_ff_interm_dropout': encoder.layers[0].dropout2.p, 'encoder_dropout': encoder.layers[0].dropout3.p, 'encoder_layer_norm_first': encoder.layer_norm_first, 'encoder_layer_drop': encoder.layerdrop, } return config def _map_key(key): key_ = key if key.startswith('w2v_model.'): key = key.replace('w2v_model.', '') if re.match(r'(mask_emb|quantizer|project_q|final_proj|mask_emb)', key): return None # Feature Extractor # Group norm when "extractor_mode" is "default". # (Only the first layer) # "conv_layers.0.2.weight" -> "conv_layers.0.layer_norm.weight" # "conv_layers.0.2.bias" -> "conv_layers.0.layer_norm.bias" match = re.match(r'feature_extractor\.conv_layers\.0\.2\.(weight|bias)', key) if match: return f"feature_extractor.conv_layers.0.layer_norm.{match.group(1)}" # Convolutions # "conv_layers.X.0.weight" -> "conv_layers.X.conv.weight" # "conv_layers.X.0.bias" -> "conv_layers.X.conv.bias" match = re.match(r'feature_extractor\.conv_layers\.(\d+)\.0\.(weight|bias)', key) if match: return f"feature_extractor.conv_layers.{match.group(1)}.conv.{match.group(2)}" # Layer norm when "extractor_mode" is "layer_norm". # "conv_layers.X.2.1.weight" -> "conv_layers.X.layer_norm.weight" # "conv_layers.X.2.1.bias" -> "conv_layers.X.layer_norm.bias" match = re.match(r'feature_extractor\.conv_layers\.(\d+)\.2\.1\.(weight|bias)', key) if match: return f"feature_extractor.conv_layers.{match.group(1)}.layer_norm.{match.group(2)}" match = re.match(r"post_extract_proj\.(weight|bias)", key) # Encoder - Feature projection if match: return f"encoder.feature_projection.projection.{match.group(1)}" match = re.match(r"layer_norm\.(weight|bias)", key) if match: return f"encoder.feature_projection.layer_norm.{match.group(1)}" # Encoder - Transformer - Convolutional positional embedding match = re.match(r"encoder\.pos_conv\.0\.(bias|weight_g|weight_v)", key) if match: return f"encoder.transformer.pos_conv_embed.conv.{match.group(1)}" match = re.match(r"encoder\.layer_norm\.(weight|bias)", key) if match: return f"encoder.transformer.layer_norm.{match.group(1)}" # Encoder - Transformer - Self attention layers match = re.match(r"encoder\.layers\.(\d+)\.self_attn\.((k_|v_|q_|out_)proj\.(weight|bias))", key) if match: return f"encoder.transformer.layers.{match.group(1)}.attention.{match.group(2)}" match = re.match(r"encoder\.layers\.(\d+)\.self_attn_layer_norm\.(weight|bias)", key) if match: return f"encoder.transformer.layers.{match.group(1)}.layer_norm.{match.group(2)}" match = re.match(r"encoder\.layers\.(\d+)\.fc1\.(weight|bias)", key) if match: return f"encoder.transformer.layers.{match.group(1)}.feed_forward.intermediate_dense.{match.group(2)}" match = re.match(r"encoder\.layers\.(\d+)\.fc2\.(weight|bias)", key) if match: return f"encoder.transformer.layers.{match.group(1)}.feed_forward.output_dense.{match.group(2)}" match = re.match(r"encoder\.layers\.(\d+)\.final_layer_norm\.(weight|bias)", key) if match: return f"encoder.transformer.layers.{match.group(1)}.final_layer_norm.{match.group(2)}" match = re.match(r"proj\.(weight|bias)", key) # Auxiliary Module # Only relevant when loading fine-tuned models if match: return f"aux.{match.group(1)}" # HuBERT Extension if key in ['label_embs_concat']: return key raise ValueError(f'Unexpected key: {key_}') def _convert_state_dict(state_dict): converted = {} for k, v in state_dict.items(): k = _map_key(k) if k is not None: converted[k] = v return converted def import_fairseq_model(original: Module) -> Wav2Vec2Model: # Overriding the signature so that the types are correct on Sphinx """import_fairseq_model(original: torch.nn.Module) -> torchaudio.models.Wav2Vec2Model Build Wav2Vec2Model from the corresponding model object of `fairseq`_. Args: original (torch.nn.Module): An instance of fairseq's Wav2Vec2.0 or HuBERT model. One of ``fairseq.models.wav2vec.wav2vec2_asr.Wav2VecEncoder``, ``fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model`` or ``fairseq.models.hubert.hubert_asr.HubertEncoder``. Returns: Wav2Vec2Model: Imported model. Example - Loading pretrain-only model >>> from torchaudio.models.wav2vec2.utils import import_fairseq_model >>> >>> # Load model using fairseq >>> model_file = 'wav2vec_small.pt' >>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file]) >>> original = model[0] >>> imported = import_fairseq_model(original) >>> >>> # Perform feature extraction >>> waveform, _ = torchaudio.load('audio.wav') >>> features, _ = imported.extract_features(waveform) >>> >>> # Compare result with the original model from fairseq >>> reference = original.feature_extractor(waveform).transpose(1, 2) >>> torch.testing.assert_allclose(features, reference) Example - Fine-tuned model >>> from torchaudio.models.wav2vec2.utils import import_fairseq_model >>> >>> # Load model using fairseq >>> model_file = 'wav2vec_small_960h.pt' >>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file]) >>> original = model[0] >>> imported = import_fairseq_model(original.w2v_encoder) >>> >>> # Perform encoding >>> waveform, _ = torchaudio.load('audio.wav') >>> emission, _ = imported(waveform) >>> >>> # Compare result with the original model from fairseq >>> mask = torch.zeros_like(waveform) >>> reference = original(waveform, mask)['encoder_out'].transpose(0, 1) >>> torch.testing.assert_allclose(emission, reference) .. _fairseq: https://github.com/pytorch/fairseq """ class_ = original.__class__.__name__ if class_ == 'Wav2Vec2Model': return _import_wav2vec2_pretraining(original) if class_ == 'Wav2VecEncoder': return _import_wav2vec2_finetuning(original) if class_ == 'HubertModel': return _import_hubert_pretraining(original) if class_ == 'HubertEncoder': return _import_hubert_finetuning(original) raise ValueError( f'Expected an instance of `Wav2Vec2Model` or `Wav2VecEncoder`. Found: {class_}') def _import_wav2vec2_finetuning(original: Module) -> Wav2Vec2Model: config = _parse_config(original.w2v_model) model = wav2vec2_model(**config, aux_num_out=original.proj.out_features) model.load_state_dict(_convert_state_dict(original.state_dict())) return model def _import_wav2vec2_pretraining(original: Module) -> Wav2Vec2Model: config = _parse_config(original) model = wav2vec2_model(**config, aux_num_out=None) model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False) return model def _import_hubert_finetuning(original: Module) -> Wav2Vec2Model: config = _parse_config(original.w2v_model) model = wav2vec2_model(**config, aux_num_out=original.proj.out_features) model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False) return model def _import_hubert_pretraining(original: Module) -> Wav2Vec2Model: config = _parse_config(original) model = wav2vec2_model(**config, aux_num_out=None) model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False) return model
import math from typing import List, Optional, Tuple import torch __all__ = ["Emformer"] def _lengths_to_padding_mask(lengths: torch.Tensor) -> torch.Tensor: batch_size = lengths.shape[0] max_length = int(torch.max(lengths).item()) padding_mask = torch.arange( max_length, device=lengths.device, dtype=lengths.dtype ).expand(batch_size, max_length) >= lengths.unsqueeze(1) return padding_mask def _gen_padding_mask( utterance: torch.Tensor, right_context: torch.Tensor, summary: torch.Tensor, lengths: torch.Tensor, mems: torch.Tensor, left_context_key: Optional[torch.Tensor] = None, ) -> Optional[torch.Tensor]: T = right_context.size(0) + utterance.size(0) + summary.size(0) B = right_context.size(1) if B == 1: padding_mask = None else: right_context_blocks_length = T - torch.max(lengths).int() - summary.size(0) left_context_blocks_length = ( left_context_key.size(0) if left_context_key is not None else 0 ) klengths = ( lengths + mems.size(0) + right_context_blocks_length + left_context_blocks_length ) padding_mask = _lengths_to_padding_mask(lengths=klengths) return padding_mask def _get_activation_module(activation: str) -> torch.nn.Module: if activation == "relu": return torch.nn.ReLU() elif activation == "gelu": return torch.nn.GELU() elif activation == "silu": return torch.nn.SiLU() else: raise ValueError(f"Unsupported activation {activation}") def _get_weight_init_gains( weight_init_scale_strategy: Optional[str], num_layers: int ) -> List[Optional[float]]: if weight_init_scale_strategy is None: return [None for _ in range(num_layers)] elif weight_init_scale_strategy == "depthwise": return [1.0 / math.sqrt(layer_idx + 1) for layer_idx in range(num_layers)] elif weight_init_scale_strategy == "constant": return [1.0 / math.sqrt(2) for layer_idx in range(num_layers)] else: raise ValueError( f"Unsupported weight_init_scale_strategy value {weight_init_scale_strategy}" ) def _gen_attention_mask_block( col_widths: List[int], col_mask: List[bool], num_rows: int, device: torch.device ) -> torch.Tensor: assert len(col_widths) == len( col_mask ), "Length of col_widths must match that of col_mask" mask_block = [ torch.ones(num_rows, col_width, device=device) if is_ones_col else torch.zeros(num_rows, col_width, device=device) for col_width, is_ones_col in zip(col_widths, col_mask) ] return torch.cat(mask_block, dim=1) class _EmformerAttention(torch.nn.Module): r"""Emformer layer attention module. Args: input_dim (int): input dimension. num_heads (int): number of attention heads in each Emformer layer. dropout (float, optional): dropout probability. (Default: 0.0) weight_init_gain (float or None, optional): scale factor to apply when initializing attention module parameters. (Default: ``None``) tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``) negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8) """ def __init__( self, input_dim: int, num_heads: int, dropout: float = 0.0, weight_init_gain: Optional[float] = None, tanh_on_mem: bool = False, negative_inf: float = -1e8, ): super().__init__() if input_dim % num_heads != 0: raise ValueError( f"input_dim ({input_dim}) is not a multiple of num_heads ({num_heads})." ) self.input_dim = input_dim self.num_heads = num_heads self.dropout = dropout self.tanh_on_mem = tanh_on_mem self.negative_inf = negative_inf self.scaling = (self.input_dim // self.num_heads) ** -0.5 self.emb_to_key_value = torch.nn.Linear(input_dim, 2 * input_dim, bias=True) self.emb_to_query = torch.nn.Linear(input_dim, input_dim, bias=True) self.out_proj = torch.nn.Linear(input_dim, input_dim, bias=True) if weight_init_gain: torch.nn.init.xavier_uniform_( self.emb_to_key_value.weight, gain=weight_init_gain ) torch.nn.init.xavier_uniform_( self.emb_to_query.weight, gain=weight_init_gain ) def _gen_key_value( self, input: torch.Tensor, mems: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: T, _, _ = input.shape summary_length = mems.size(0) + 1 right_ctx_utterance_block = input[: T - summary_length] mems_right_ctx_utterance_block = torch.cat([mems, right_ctx_utterance_block]) key, value = self.emb_to_key_value(mems_right_ctx_utterance_block).chunk( chunks=2, dim=2 ) return key, value def _gen_attention_probs( self, attention_weights: torch.Tensor, attention_mask: torch.Tensor, padding_mask: Optional[torch.Tensor], ) -> torch.Tensor: attention_weights_float = attention_weights.float() attention_weights_float = attention_weights_float.masked_fill( attention_mask.unsqueeze(0), self.negative_inf ) T = attention_weights.size(1) B = attention_weights.size(0) // self.num_heads if padding_mask is not None: attention_weights_float = attention_weights_float.view( B, self.num_heads, T, -1 ) attention_weights_float = attention_weights_float.masked_fill( padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), self.negative_inf ) attention_weights_float = attention_weights_float.view( B * self.num_heads, T, -1 ) attention_probs = torch.nn.functional.softmax( attention_weights_float, dim=-1 ).type_as(attention_weights) return torch.nn.functional.dropout( attention_probs, p=float(self.dropout), training=self.training ) def _forward_impl( self, utterance: torch.Tensor, lengths: torch.Tensor, right_context: torch.Tensor, summary: torch.Tensor, mems: torch.Tensor, attention_mask: torch.Tensor, left_context_key: Optional[torch.Tensor] = None, left_context_val: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: B = utterance.size(1) T = right_context.size(0) + utterance.size(0) + summary.size(0) # Compute query with [right context, utterance, summary]. query = self.emb_to_query(torch.cat([right_context, utterance, summary])) # Compute key and value with [mems, right context, utterance]. key, value = self.emb_to_key_value( torch.cat([mems, right_context, utterance]) ).chunk(chunks=2, dim=2) if left_context_key is not None and left_context_val is not None: right_context_blocks_length = T - torch.max(lengths).int() - summary.size(0) key = torch.cat( [ key[: mems.size(0) + right_context_blocks_length], left_context_key, key[mems.size(0) + right_context_blocks_length:], ], ) value = torch.cat( [ value[: mems.size(0) + right_context_blocks_length], left_context_val, value[mems.size(0) + right_context_blocks_length:], ], ) # Compute attention weights from query, key, and value. reshaped_query, reshaped_key, reshaped_value = [ tensor.contiguous() .view(-1, B * self.num_heads, self.input_dim // self.num_heads) .transpose(0, 1) for tensor in [query, key, value] ] attention_weights = torch.bmm( reshaped_query * self.scaling, reshaped_key.transpose(1, 2) ) # Compute padding mask. padding_mask = _gen_padding_mask( utterance, right_context, summary, lengths, mems, left_context_key ) # Compute attention probabilities. attention_probs = self._gen_attention_probs( attention_weights, attention_mask, padding_mask ) # Compute attention. attention = torch.bmm(attention_probs, reshaped_value) assert attention.shape == ( B * self.num_heads, T, self.input_dim // self.num_heads, ) attention = attention.transpose(0, 1).contiguous().view(T, B, self.input_dim) # Apply output projection. output_right_context_mems = self.out_proj(attention) summary_length = summary.size(0) output_right_context = output_right_context_mems[: T - summary_length] output_mems = output_right_context_mems[T - summary_length:] if self.tanh_on_mem: output_mems = torch.tanh(output_mems) else: output_mems = torch.clamp(output_mems, min=-10, max=10) return output_right_context, output_mems, key, value def forward( self, utterance: torch.Tensor, lengths: torch.Tensor, right_context: torch.Tensor, summary: torch.Tensor, mems: torch.Tensor, attention_mask: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor]: r"""Forward pass for training. B: batch size; D: feature dimension of each frame; T: number of utterance frames; R: number of right context frames; S: number of summary elements; M: number of memory elements. Args: utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``utterance``. right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. summary (torch.Tensor): summary elements, with shape `(S, B, D)`. mems (torch.Tensor): memory elements, with shape `(M, B, D)`. attention_mask (torch.Tensor): attention mask for underlying attention module. Returns: (Tensor, Tensor): Tensor output frames corresponding to utterance and right_context, with shape `(T + R, B, D)`. Tensor updated memory elements, with shape `(M, B, D)`. """ output, output_mems, _, _ = self._forward_impl( utterance, lengths, right_context, summary, mems, attention_mask ) return output, output_mems[:-1] @torch.jit.export def infer( self, utterance: torch.Tensor, lengths: torch.Tensor, right_context: torch.Tensor, summary: torch.Tensor, mems: torch.Tensor, left_context_key: torch.Tensor, left_context_val: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: r"""Forward pass for inference. B: batch size; D: feature dimension of each frame; T: number of utterance frames; R: number of right context frames; S: number of summary elements; M: number of memory elements. Args: utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``utterance``. right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. summary (torch.Tensor): summary elements, with shape `(S, B, D)`. mems (torch.Tensor): memory elements, with shape `(M, B, D)`. left_context_key (torch.Tensor): left context attention key computed from preceding invocation. left_context_val (torch.Tensor): left context attention value computed from preceding invocation. Returns: (Tensor, Tensor, Tensor, and Tensor): Tensor output frames corresponding to utterance and right_context, with shape `(T + R, B, D)`. Tensor updated memory elements, with shape `(M, B, D)`. Tensor attention key computed for left context and utterance. Tensor attention value computed for left context and utterance. """ query_dim = right_context.size(0) + utterance.size(0) + summary.size(0) key_dim = ( right_context.size(0) + utterance.size(0) + mems.size(0) + left_context_key.size(0) ) attention_mask = torch.zeros(query_dim, key_dim).to( dtype=torch.bool, device=utterance.device ) attention_mask[-1, : mems.size(0)] = True output, output_mems, key, value = self._forward_impl( utterance, lengths, right_context, summary, mems, attention_mask, left_context_key=left_context_key, left_context_val=left_context_val, ) return ( output, output_mems, key[mems.size(0) + right_context.size(0):], value[mems.size(0) + right_context.size(0):], ) class _EmformerLayer(torch.nn.Module): r"""Emformer layer that constitutes Emformer. Args: input_dim (int): input dimension. num_heads (int): number of attention heads. ffn_dim: (int): hidden layer dimension of feedforward network. dropout (float, optional): dropout probability. (Default: 0.0) activation (str, optional): activation function to use in feedforward network. Must be one of ("relu", "gelu", "silu"). (Default: "relu") left_context_length (int, optional): length of left context. (Default: 0) segment_length (int, optional): length of each input segment. (Default: 128) max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0) weight_init_gain (float or None, optional): scale factor to apply when initializing attention module parameters. (Default: ``None``) tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``) negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8) """ def __init__( self, input_dim: int, num_heads: int, ffn_dim: int, dropout: float = 0.0, activation: str = "relu", left_context_length: int = 0, segment_length: int = 128, max_memory_size: int = 0, weight_init_gain: Optional[float] = None, tanh_on_mem: bool = False, negative_inf: float = -1e8, ): super().__init__() self.attention = _EmformerAttention( input_dim=input_dim, num_heads=num_heads, dropout=dropout, weight_init_gain=weight_init_gain, tanh_on_mem=tanh_on_mem, negative_inf=negative_inf, ) self.dropout = torch.nn.Dropout(dropout) self.memory_op = torch.nn.AvgPool1d( kernel_size=segment_length, stride=segment_length, ceil_mode=True ) activation_module = _get_activation_module(activation) self.pos_ff = torch.nn.Sequential( torch.nn.LayerNorm(input_dim), torch.nn.Linear(input_dim, ffn_dim), activation_module, torch.nn.Dropout(dropout), torch.nn.Linear(ffn_dim, input_dim), torch.nn.Dropout(dropout), ) self.layer_norm_input = torch.nn.LayerNorm(input_dim) self.layer_norm_output = torch.nn.LayerNorm(input_dim) self.left_context_length = left_context_length self.segment_length = segment_length self.max_memory_size = max_memory_size self.input_dim = input_dim self.use_mem = max_memory_size > 0 def _init_state( self, batch_size: int, device: Optional[torch.device] ) -> List[torch.Tensor]: empty_memory = torch.zeros( self.max_memory_size, batch_size, self.input_dim, device=device ) left_context_key = torch.zeros( self.left_context_length, batch_size, self.input_dim, device=device ) left_context_val = torch.zeros( self.left_context_length, batch_size, self.input_dim, device=device ) past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device) return [empty_memory, left_context_key, left_context_val, past_length] def _unpack_state( self, utterance: torch.Tensor, mems: torch.Tensor, state: List[torch.Tensor] ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: past_length = state[3][0][0].item() past_left_context_length = min(self.left_context_length, past_length) past_mem_length = min( self.max_memory_size, math.ceil(past_length / self.segment_length) ) pre_mems = state[0][self.max_memory_size - past_mem_length:] lc_key = state[1][self.left_context_length - past_left_context_length:] lc_val = state[2][self.left_context_length - past_left_context_length:] return pre_mems, lc_key, lc_val def _pack_state( self, next_k: torch.Tensor, next_v: torch.Tensor, update_length: int, mems: torch.Tensor, state: List[torch.Tensor], ) -> List[torch.Tensor]: new_k = torch.cat([state[1], next_k]) new_v = torch.cat([state[2], next_v]) state[0] = torch.cat([state[0], mems])[-self.max_memory_size:] state[1] = new_k[new_k.shape[0] - self.left_context_length:] state[2] = new_v[new_v.shape[0] - self.left_context_length:] state[3] = state[3] + update_length return state def _process_attention_output( self, rc_output: torch.Tensor, utterance: torch.Tensor, right_context: torch.Tensor, ) -> torch.Tensor: result = self.dropout(rc_output) + torch.cat([right_context, utterance]) result = self.pos_ff(result) + result result = self.layer_norm_output(result) return result def _apply_pre_attention_layer_norm( self, utterance: torch.Tensor, right_context: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: layer_norm_input = self.layer_norm_input(torch.cat([right_context, utterance])) return ( layer_norm_input[right_context.size(0):], layer_norm_input[: right_context.size(0)], ) def _apply_post_attention_ffn( self, rc_output: torch.Tensor, utterance: torch.Tensor, right_context: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: rc_output = self._process_attention_output(rc_output, utterance, right_context) return rc_output[right_context.size(0):], rc_output[: right_context.size(0)] def _apply_attention_forward( self, utterance: torch.Tensor, lengths: torch.Tensor, right_context: torch.Tensor, mems: torch.Tensor, attention_mask: Optional[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: if attention_mask is None: raise ValueError( "attention_mask must be not None when for_inference is False" ) if self.use_mem: summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1) else: summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device) rc_output, next_m = self.attention( utterance=utterance, lengths=lengths, right_context=right_context, summary=summary, mems=mems, attention_mask=attention_mask, ) return rc_output, next_m def _apply_attention_infer( self, utterance: torch.Tensor, lengths: torch.Tensor, right_context: torch.Tensor, mems: torch.Tensor, state: Optional[List[torch.Tensor]], ) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]: if state is None: state = self._init_state(utterance.size(1), device=utterance.device) pre_mems, lc_key, lc_val = self._unpack_state(utterance, mems, state) if self.use_mem: summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1) summary = summary[:1] else: summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device) rc_output, next_m, next_k, next_v = self.attention.infer( utterance=utterance, lengths=lengths, right_context=right_context, summary=summary, mems=pre_mems, left_context_key=lc_key, left_context_val=lc_val, ) state = self._pack_state(next_k, next_v, utterance.size(0), mems, state) return rc_output, next_m, state def forward( self, utterance: torch.Tensor, lengths: torch.Tensor, right_context: torch.Tensor, mems: torch.Tensor, attention_mask: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: r"""Forward pass for training. B: batch size; D: feature dimension of each frame; T: number of utterance frames; R: number of right context frames; M: number of memory elements. Args: utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``utterance``. right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. mems (torch.Tensor): memory elements, with shape `(M, B, D)`. attention_mask (torch.Tensor): attention mask for underlying attention module. Returns: (Tensor, Tensor, Tensor): Tensor encoded utterance frames, with shape `(T, B, D)`. Tensor updated right context frames, with shape `(R, B, D)`. Tensor updated memory elements, with shape `(M, B, D)`. """ ( layer_norm_utterance, layer_norm_right_context, ) = self._apply_pre_attention_layer_norm(utterance, right_context) rc_output, output_mems = self._apply_attention_forward( layer_norm_utterance, lengths, layer_norm_right_context, mems, attention_mask, ) output_utterance, output_right_context = self._apply_post_attention_ffn( rc_output, utterance, right_context ) return output_utterance, output_right_context, output_mems @torch.jit.export def infer( self, utterance: torch.Tensor, lengths: torch.Tensor, right_context: torch.Tensor, state: Optional[List[torch.Tensor]], mems: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor], torch.Tensor]: r"""Forward pass for inference. B: batch size; D: feature dimension of each frame; T: number of utterance frames; R: number of right context frames; M: number of memory elements. Args: utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``utterance``. right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. state (List[torch.Tensor] or None): list of tensors representing layer internal state generated in preceding invocation of ``infer``. mems (torch.Tensor): memory elements, with shape `(M, B, D)`. Returns: (Tensor, Tensor, List[torch.Tensor], Tensor): Tensor encoded utterance frames, with shape `(T, B, D)`. Tensor updated right context frames, with shape `(R, B, D)`. List[Tensor] list of tensors representing layer internal state generated in current invocation of ``infer``. Tensor updated memory elements, with shape `(M, B, D)`. """ ( layer_norm_utterance, layer_norm_right_context, ) = self._apply_pre_attention_layer_norm(utterance, right_context) rc_output, output_mems, output_state = self._apply_attention_infer( layer_norm_utterance, lengths, layer_norm_right_context, mems, state ) output_utterance, output_right_context = self._apply_post_attention_ffn( rc_output, utterance, right_context ) return output_utterance, output_right_context, output_state, output_mems class Emformer(torch.nn.Module): r"""Implements the Emformer architecture introduced in *Emformer: Efficient Memory Transformer Based Acoustic Model for Low Latency Streaming Speech Recognition* [:footcite:`shi2021emformer`]. Args: input_dim (int): input dimension. num_heads (int): number of attention heads in each Emformer layer. ffn_dim (int): hidden layer dimension of each Emformer layer's feedforward network. num_layers (int): number of Emformer layers to instantiate. dropout (float, optional): dropout probability. (Default: 0.0) activation (str, optional): activation function to use in each Emformer layer's feedforward network. Must be one of ("relu", "gelu", "silu"). (Default: "relu") left_context_length (int, optional): length of left context. (Default: 0) right_context_length (int, optional): length of right context. (Default: 0) segment_length (int, optional): length of each input segment. (Default: 128) max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0) weight_init_scale_strategy (str, optional): per-layer weight initialization scaling strategy. Must be one of ("depthwise", "constant", ``None``). (Default: "depthwise") tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``) negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8) Examples: >>> emformer = Emformer(512, 8, 2048, 20) >>> input = torch.rand(128, 400, 512) # batch, num_frames, feature_dim >>> lengths = torch.randint(1, 200, (128,)) # batch >>> output = emformer(input, lengths) >>> output, lengths, states = emformer.infer(input, lengths, None) """ def __init__( self, input_dim: int, num_heads: int, ffn_dim: int, num_layers: int, dropout: float = 0.0, activation: str = "relu", left_context_length: int = 0, right_context_length: int = 0, segment_length: int = 128, max_memory_size: int = 0, weight_init_scale_strategy: str = "depthwise", tanh_on_mem: bool = False, negative_inf: float = -1e8, ): super().__init__() self.use_mem = max_memory_size > 0 self.memory_op = torch.nn.AvgPool1d( kernel_size=segment_length, stride=segment_length, ceil_mode=True, ) weight_init_gains = _get_weight_init_gains( weight_init_scale_strategy, num_layers ) self.emformer_layers = torch.nn.ModuleList( [ _EmformerLayer( input_dim, num_heads, ffn_dim, dropout=dropout, activation=activation, left_context_length=left_context_length, segment_length=segment_length, max_memory_size=max_memory_size, weight_init_gain=weight_init_gains[layer_idx], tanh_on_mem=tanh_on_mem, negative_inf=negative_inf, ) for layer_idx in range(num_layers) ] ) self.left_context_length = left_context_length self.right_context_length = right_context_length self.segment_length = segment_length self.max_memory_size = max_memory_size def _gen_right_context(self, input: torch.Tensor) -> torch.Tensor: right_context_blocks = [] T, B, D = input.shape num_segs = math.ceil((T - self.right_context_length) / self.segment_length) right_context_blocks = [] for seg_idx in range(num_segs - 1): start = (seg_idx + 1) * self.segment_length end = start + self.right_context_length right_context_blocks.append(input[start:end]) right_context_blocks.append(input[T - self.right_context_length:]) return torch.cat(right_context_blocks) def _gen_attention_mask_col_widths( self, seg_idx: int, utterance_length: int ) -> List[int]: num_segs = math.ceil(utterance_length / self.segment_length) rc = self.right_context_length lc = self.left_context_length rc_start = seg_idx * rc rc_end = rc_start + rc seg_start = max(seg_idx * self.segment_length - lc, 0) seg_end = min((seg_idx + 1) * self.segment_length, utterance_length) rc_length = self.right_context_length * num_segs if self.use_mem: m_start = max(seg_idx - self.max_memory_size, 0) mem_length = num_segs - 1 col_widths = [ m_start, # before memory seg_idx - m_start, # memory mem_length - seg_idx, # after memory rc_start, # before right context rc, # right context rc_length - rc_end, # after right context seg_start, # before query segment seg_end - seg_start, # query segment utterance_length - seg_end, # after query segment ] else: col_widths = [ rc_start, # before right context rc, # right context rc_length - rc_end, # after right context seg_start, # before query segment seg_end - seg_start, # query segment utterance_length - seg_end, # after query segment ] return col_widths def _gen_attention_mask(self, input: torch.Tensor) -> torch.Tensor: utterance_length, batch_size, _ = input.shape num_segs = math.ceil(utterance_length / self.segment_length) rc_mask = [] query_mask = [] summary_mask = [] if self.use_mem: num_cols = 9 # memory, right context, query segment rc_q_cols_mask = [idx in [1, 4, 7] for idx in range(num_cols)] # right context, query segment s_cols_mask = [idx in [4, 7] for idx in range(num_cols)] masks_to_concat = [rc_mask, query_mask, summary_mask] else: num_cols = 6 # right context, query segment rc_q_cols_mask = [idx in [1, 4] for idx in range(num_cols)] s_cols_mask = None masks_to_concat = [rc_mask, query_mask] for seg_idx in range(num_segs): col_widths = self._gen_attention_mask_col_widths(seg_idx, utterance_length) rc_mask_block = _gen_attention_mask_block( col_widths, rc_q_cols_mask, self.right_context_length, input.device ) rc_mask.append(rc_mask_block) query_mask_block = _gen_attention_mask_block( col_widths, rc_q_cols_mask, min( self.segment_length, utterance_length - seg_idx * self.segment_length, ), input.device, ) query_mask.append(query_mask_block) if s_cols_mask is not None: summary_mask_block = _gen_attention_mask_block( col_widths, s_cols_mask, 1, input.device ) summary_mask.append(summary_mask_block) attention_mask = ( 1 - torch.cat([torch.cat(mask) for mask in masks_to_concat]) ).to(torch.bool) return attention_mask def forward( self, input: torch.Tensor, lengths: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: r"""Forward pass for training. B: batch size; T: number of frames; D: feature dimension of each frame. Args: input (torch.Tensor): utterance frames right-padded with right context frames, with shape `(B, T, D)`. lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``input``. Returns: (Tensor, Tensor): Tensor output frames, with shape `(B, T - ``right_context_length``, D)`. Tensor output lengths, with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in output frames. """ input = input.permute(1, 0, 2) right_context = self._gen_right_context(input) utterance = input[: input.size(0) - self.right_context_length] attention_mask = self._gen_attention_mask(utterance) mems = ( self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)[:-1] if self.use_mem else torch.empty(0).to(dtype=input.dtype, device=input.device) ) output = utterance for layer in self.emformer_layers: output, right_context, mems = layer( output, lengths, right_context, mems, attention_mask ) return output.permute(1, 0, 2), lengths @torch.jit.export def infer( self, input: torch.Tensor, lengths: torch.Tensor, states: Optional[List[List[torch.Tensor]]] = None, ) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]: r"""Forward pass for inference. B: batch size; T: number of frames; D: feature dimension of each frame. Args: input (torch.Tensor): utterance frames right-padded with right context frames, with shape `(B, T, D)`. lengths (torch.Tensor): with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in ``input``. states (List[List[torch.Tensor]] or None, optional): list of lists of tensors representing Emformer internal state generated in preceding invocation of ``infer``. (Default: ``None``) Returns: (Tensor, Tensor, List[List[Tensor]]): Tensor output frames, with shape `(B, T - ``right_context_length``, D)`. Tensor output lengths, with shape `(B,)` and i-th element representing number of valid frames for i-th batch element in output frames. List[List[Tensor]] output states; list of lists of tensors representing Emformer internal state generated in current invocation of ``infer``. """ input = input.permute(1, 0, 2) right_context_start_idx = input.size(0) - self.right_context_length right_context = input[right_context_start_idx:] utterance = input[:right_context_start_idx] output_lengths = torch.clamp(lengths - self.right_context_length, min=0) mems = ( self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1) if self.use_mem else torch.empty(0).to(dtype=input.dtype, device=input.device) ) output = utterance output_states: List[List[torch.Tensor]] = [] for layer_idx, layer in enumerate(self.emformer_layers): output, right_context, output_state, mems = layer.infer( output, output_lengths, right_context, None if states is None else states[layer_idx], mems, ) output_states.append(output_state) return output.permute(1, 0, 2), output_lengths, output_states
from .emformer import Emformer __all__ = ["Emformer"]
from . import kaldi __all__ = [ 'kaldi', ]
from typing import Tuple import math import torch from torch import Tensor import torchaudio __all__ = [ 'get_mel_banks', 'inverse_mel_scale', 'inverse_mel_scale_scalar', 'mel_scale', 'mel_scale_scalar', 'spectrogram', 'fbank', 'mfcc', 'vtln_warp_freq', 'vtln_warp_mel_freq', ] # numeric_limits<float>::epsilon() 1.1920928955078125e-07 EPSILON = torch.tensor(torch.finfo(torch.float).eps) # 1 milliseconds = 0.001 seconds MILLISECONDS_TO_SECONDS = 0.001 # window types HAMMING = 'hamming' HANNING = 'hanning' POVEY = 'povey' RECTANGULAR = 'rectangular' BLACKMAN = 'blackman' WINDOWS = [HAMMING, HANNING, POVEY, RECTANGULAR, BLACKMAN] def _get_epsilon(device, dtype): return EPSILON.to(device=device, dtype=dtype) def _next_power_of_2(x: int) -> int: r"""Returns the smallest power of 2 that is greater than x """ return 1 if x == 0 else 2 ** (x - 1).bit_length() def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor: r"""Given a waveform (1D tensor of size ``num_samples``), it returns a 2D tensor (m, ``window_size``) representing how the window is shifted along the waveform. Each row is a frame. Args: waveform (Tensor): Tensor of size ``num_samples`` window_size (int): Frame length window_shift (int): Frame shift snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit in the file, and the number of frames depends on the frame_length. If False, the number of frames depends only on the frame_shift, and we reflect the data at the ends. Returns: Tensor: 2D tensor of size (m, ``window_size``) where each row is a frame """ assert waveform.dim() == 1 num_samples = waveform.size(0) strides = (window_shift * waveform.stride(0), waveform.stride(0)) if snip_edges: if num_samples < window_size: return torch.empty((0, 0), dtype=waveform.dtype, device=waveform.device) else: m = 1 + (num_samples - window_size) // window_shift else: reversed_waveform = torch.flip(waveform, [0]) m = (num_samples + (window_shift // 2)) // window_shift pad = window_size // 2 - window_shift // 2 pad_right = reversed_waveform if pad > 0: # torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect' # but we want [2, 1, 0, 0, 1, 2] pad_left = reversed_waveform[-pad:] waveform = torch.cat((pad_left, waveform, pad_right), dim=0) else: # pad is negative so we want to trim the waveform at the front waveform = torch.cat((waveform[-pad:], pad_right), dim=0) sizes = (m, window_size) return waveform.as_strided(sizes, strides) def _feature_window_function(window_type: str, window_size: int, blackman_coeff: float, device: torch.device, dtype: int, ) -> Tensor: r"""Returns a window function with the given type and size """ if window_type == HANNING: return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype) elif window_type == HAMMING: return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype) elif window_type == POVEY: # like hanning but goes to zero at edges return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85) elif window_type == RECTANGULAR: return torch.ones(window_size, device=device, dtype=dtype) elif window_type == BLACKMAN: a = 2 * math.pi / (window_size - 1) window_function = torch.arange(window_size, device=device, dtype=dtype) # can't use torch.blackman_window as they use different coefficients return (blackman_coeff - 0.5 * torch.cos(a * window_function) + (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)).to(device=device, dtype=dtype) else: raise Exception('Invalid window type ' + window_type) def _get_log_energy(strided_input: Tensor, epsilon: Tensor, energy_floor: float) -> Tensor: r"""Returns the log energy of size (m) for a strided_input (m,*) """ device, dtype = strided_input.device, strided_input.dtype log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log() # size (m) if energy_floor == 0.0: return log_energy return torch.max( log_energy, torch.tensor(math.log(energy_floor), device=device, dtype=dtype)) def _get_waveform_and_window_properties(waveform: Tensor, channel: int, sample_frequency: float, frame_shift: float, frame_length: float, round_to_power_of_two: bool, preemphasis_coefficient: float) -> Tuple[Tensor, int, int, int]: r"""Gets the waveform and window properties """ channel = max(channel, 0) assert channel < waveform.size(0), ('Invalid channel {} for size {}'.format(channel, waveform.size(0))) waveform = waveform[channel, :] # size (n) window_shift = int(sample_frequency * frame_shift * MILLISECONDS_TO_SECONDS) window_size = int(sample_frequency * frame_length * MILLISECONDS_TO_SECONDS) padded_window_size = _next_power_of_2(window_size) if round_to_power_of_two else window_size assert 2 <= window_size <= len( waveform), ('choose a window size {} that is [2, {}]' .format(window_size, len(waveform))) assert 0 < window_shift, '`window_shift` must be greater than 0' assert padded_window_size % 2 == 0, 'the padded `window_size` must be divisible by two.' \ ' use `round_to_power_of_two` or change `frame_length`' assert 0. <= preemphasis_coefficient <= 1.0, '`preemphasis_coefficient` must be between [0,1]' assert sample_frequency > 0, '`sample_frequency` must be greater than zero' return waveform, window_shift, window_size, padded_window_size def _get_window(waveform: Tensor, padded_window_size: int, window_size: int, window_shift: int, window_type: str, blackman_coeff: float, snip_edges: bool, raw_energy: bool, energy_floor: float, dither: float, remove_dc_offset: bool, preemphasis_coefficient: float) -> Tuple[Tensor, Tensor]: r"""Gets a window and its log energy Returns: (Tensor, Tensor): strided_input of size (m, ``padded_window_size``) and signal_log_energy of size (m) """ device, dtype = waveform.device, waveform.dtype epsilon = _get_epsilon(device, dtype) # size (m, window_size) strided_input = _get_strided(waveform, window_size, window_shift, snip_edges) if dither != 0.0: # Returns a random number strictly between 0 and 1 x = torch.max(epsilon, torch.rand(strided_input.shape, device=device, dtype=dtype)) rand_gauss = torch.sqrt(-2 * x.log()) * torch.cos(2 * math.pi * x) strided_input = strided_input + rand_gauss * dither if remove_dc_offset: # Subtract each row/frame by its mean row_means = torch.mean(strided_input, dim=1).unsqueeze(1) # size (m, 1) strided_input = strided_input - row_means if raw_energy: # Compute the log energy of each row/frame before applying preemphasis and # window function signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m) if preemphasis_coefficient != 0.0: # strided_input[i,j] -= preemphasis_coefficient * strided_input[i, max(0, j-1)] for all i,j offset_strided_input = torch.nn.functional.pad( strided_input.unsqueeze(0), (1, 0), mode='replicate').squeeze(0) # size (m, window_size + 1) strided_input = strided_input - preemphasis_coefficient * offset_strided_input[:, :-1] # Apply window_function to each row/frame window_function = _feature_window_function( window_type, window_size, blackman_coeff, device, dtype).unsqueeze(0) # size (1, window_size) strided_input = strided_input * window_function # size (m, window_size) # Pad columns with zero until we reach size (m, padded_window_size) if padded_window_size != window_size: padding_right = padded_window_size - window_size strided_input = torch.nn.functional.pad( strided_input.unsqueeze(0), (0, padding_right), mode='constant', value=0).squeeze(0) # Compute energy after window function (not the raw one) if not raw_energy: signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m) return strided_input, signal_log_energy def _subtract_column_mean(tensor: Tensor, subtract_mean: bool) -> Tensor: # subtracts the column mean of the tensor size (m, n) if subtract_mean=True # it returns size (m, n) if subtract_mean: col_means = torch.mean(tensor, dim=0).unsqueeze(0) tensor = tensor - col_means return tensor def spectrogram(waveform: Tensor, blackman_coeff: float = 0.42, channel: int = -1, dither: float = 0.0, energy_floor: float = 1.0, frame_length: float = 25.0, frame_shift: float = 10.0, min_duration: float = 0.0, preemphasis_coefficient: float = 0.97, raw_energy: bool = True, remove_dc_offset: bool = True, round_to_power_of_two: bool = True, sample_frequency: float = 16000.0, snip_edges: bool = True, subtract_mean: bool = False, window_type: str = POVEY) -> Tensor: r"""Create a spectrogram from a raw audio signal. This matches the input/output of Kaldi's compute-spectrogram-feats. Args: waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2) blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``) channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``) dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``) energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution: this floor is applied to the zeroth component, representing the total signal energy. The floor on the individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``) frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``) frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``) min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``) preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``) raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``) remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``) round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input to FFT. (Default: ``True``) sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if specified there) (Default: ``16000.0``) snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit in the file, and the number of frames depends on the frame_length. If False, the number of frames depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``) subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do it this way. (Default: ``False``) window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman') (Default: ``'povey'``) Returns: Tensor: A spectrogram identical to what Kaldi would output. The shape is (m, ``padded_window_size // 2 + 1``) where m is calculated in _get_strided """ device, dtype = waveform.device, waveform.dtype epsilon = _get_epsilon(device, dtype) waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties( waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient) if len(waveform) < min_duration * sample_frequency: # signal is too short return torch.empty(0) strided_input, signal_log_energy = _get_window( waveform, padded_window_size, window_size, window_shift, window_type, blackman_coeff, snip_edges, raw_energy, energy_floor, dither, remove_dc_offset, preemphasis_coefficient) # size (m, padded_window_size // 2 + 1, 2) fft = torch.fft.rfft(strided_input) # Convert the FFT into a power spectrum power_spectrum = torch.max(fft.abs().pow(2.), epsilon).log() # size (m, padded_window_size // 2 + 1) power_spectrum[:, 0] = signal_log_energy power_spectrum = _subtract_column_mean(power_spectrum, subtract_mean) return power_spectrum def inverse_mel_scale_scalar(mel_freq: float) -> float: return 700.0 * (math.exp(mel_freq / 1127.0) - 1.0) def inverse_mel_scale(mel_freq: Tensor) -> Tensor: return 700.0 * ((mel_freq / 1127.0).exp() - 1.0) def mel_scale_scalar(freq: float) -> float: return 1127.0 * math.log(1.0 + freq / 700.0) def mel_scale(freq: Tensor) -> Tensor: return 1127.0 * (1.0 + freq / 700.0).log() def vtln_warp_freq(vtln_low_cutoff: float, vtln_high_cutoff: float, low_freq: float, high_freq: float, vtln_warp_factor: float, freq: Tensor) -> Tensor: r"""This computes a VTLN warping function that is not the same as HTK's one, but has similar inputs (this function has the advantage of never producing empty bins). This function computes a warp function F(freq), defined between low_freq and high_freq inclusive, with the following properties: F(low_freq) == low_freq F(high_freq) == high_freq The function is continuous and piecewise linear with two inflection points. The lower inflection point (measured in terms of the unwarped frequency) is at frequency l, determined as described below. The higher inflection point is at a frequency h, determined as described below. If l <= f <= h, then F(f) = f/vtln_warp_factor. If the higher inflection point (measured in terms of the unwarped frequency) is at h, then max(h, F(h)) == vtln_high_cutoff. Since (by the last point) F(h) == h/vtln_warp_factor, then max(h, h/vtln_warp_factor) == vtln_high_cutoff, so h = vtln_high_cutoff / max(1, 1/vtln_warp_factor). = vtln_high_cutoff * min(1, vtln_warp_factor). If the lower inflection point (measured in terms of the unwarped frequency) is at l, then min(l, F(l)) == vtln_low_cutoff This implies that l = vtln_low_cutoff / min(1, 1/vtln_warp_factor) = vtln_low_cutoff * max(1, vtln_warp_factor) Args: vtln_low_cutoff (float): Lower frequency cutoffs for VTLN vtln_high_cutoff (float): Upper frequency cutoffs for VTLN low_freq (float): Lower frequency cutoffs in mel computation high_freq (float): Upper frequency cutoffs in mel computation vtln_warp_factor (float): Vtln warp factor freq (Tensor): given frequency in Hz Returns: Tensor: Freq after vtln warp """ assert vtln_low_cutoff > low_freq, 'be sure to set the vtln_low option higher than low_freq' assert vtln_high_cutoff < high_freq, 'be sure to set the vtln_high option lower than high_freq [or negative]' l = vtln_low_cutoff * max(1.0, vtln_warp_factor) h = vtln_high_cutoff * min(1.0, vtln_warp_factor) scale = 1.0 / vtln_warp_factor Fl = scale * l # F(l) Fh = scale * h # F(h) assert l > low_freq and h < high_freq # slope of left part of the 3-piece linear function scale_left = (Fl - low_freq) / (l - low_freq) # [slope of center part is just "scale"] # slope of right part of the 3-piece linear function scale_right = (high_freq - Fh) / (high_freq - h) res = torch.empty_like(freq) outside_low_high_freq = torch.lt(freq, low_freq) | torch.gt(freq, high_freq) # freq < low_freq || freq > high_freq before_l = torch.lt(freq, l) # freq < l before_h = torch.lt(freq, h) # freq < h after_h = torch.ge(freq, h) # freq >= h # order of operations matter here (since there is overlapping frequency regions) res[after_h] = high_freq + scale_right * (freq[after_h] - high_freq) res[before_h] = scale * freq[before_h] res[before_l] = low_freq + scale_left * (freq[before_l] - low_freq) res[outside_low_high_freq] = freq[outside_low_high_freq] return res def vtln_warp_mel_freq(vtln_low_cutoff: float, vtln_high_cutoff: float, low_freq, high_freq: float, vtln_warp_factor: float, mel_freq: Tensor) -> Tensor: r""" Args: vtln_low_cutoff (float): Lower frequency cutoffs for VTLN vtln_high_cutoff (float): Upper frequency cutoffs for VTLN low_freq (float): Lower frequency cutoffs in mel computation high_freq (float): Upper frequency cutoffs in mel computation vtln_warp_factor (float): Vtln warp factor mel_freq (Tensor): Given frequency in Mel Returns: Tensor: ``mel_freq`` after vtln warp """ return mel_scale(vtln_warp_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq, vtln_warp_factor, inverse_mel_scale(mel_freq))) def get_mel_banks(num_bins: int, window_length_padded: int, sample_freq: float, low_freq: float, high_freq: float, vtln_low: float, vtln_high: float, vtln_warp_factor: float) -> Tuple[Tensor, Tensor]: """ Returns: (Tensor, Tensor): The tuple consists of ``bins`` (which is melbank of size (``num_bins``, ``num_fft_bins``)) and ``center_freqs`` (which is center frequencies of bins of size (``num_bins``)). """ assert num_bins > 3, 'Must have at least 3 mel bins' assert window_length_padded % 2 == 0 num_fft_bins = window_length_padded / 2 nyquist = 0.5 * sample_freq if high_freq <= 0.0: high_freq += nyquist assert (0.0 <= low_freq < nyquist) and (0.0 < high_freq <= nyquist) and (low_freq < high_freq), \ ('Bad values in options: low-freq {} and high-freq {} vs. nyquist {}'.format(low_freq, high_freq, nyquist)) # fft-bin width [think of it as Nyquist-freq / half-window-length] fft_bin_width = sample_freq / window_length_padded mel_low_freq = mel_scale_scalar(low_freq) mel_high_freq = mel_scale_scalar(high_freq) # divide by num_bins+1 in next line because of end-effects where the bins # spread out to the sides. mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1) if vtln_high < 0.0: vtln_high += nyquist assert vtln_warp_factor == 1.0 or ((low_freq < vtln_low < high_freq) and (0.0 < vtln_high < high_freq) and (vtln_low < vtln_high)), \ ('Bad values in options: vtln-low {} and vtln-high {}, versus ' 'low-freq {} and high-freq {}'.format(vtln_low, vtln_high, low_freq, high_freq)) bin = torch.arange(num_bins).unsqueeze(1) left_mel = mel_low_freq + bin * mel_freq_delta # size(num_bins, 1) center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # size(num_bins, 1) right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # size(num_bins, 1) if vtln_warp_factor != 1.0: left_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, left_mel) center_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, center_mel) right_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, right_mel) center_freqs = inverse_mel_scale(center_mel) # size (num_bins) # size(1, num_fft_bins) mel = mel_scale(fft_bin_width * torch.arange(num_fft_bins)).unsqueeze(0) # size (num_bins, num_fft_bins) up_slope = (mel - left_mel) / (center_mel - left_mel) down_slope = (right_mel - mel) / (right_mel - center_mel) if vtln_warp_factor == 1.0: # left_mel < center_mel < right_mel so we can min the two slopes and clamp negative values bins = torch.max(torch.zeros(1), torch.min(up_slope, down_slope)) else: # warping can move the order of left_mel, center_mel, right_mel anywhere bins = torch.zeros_like(up_slope) up_idx = torch.gt(mel, left_mel) & torch.le(mel, center_mel) # left_mel < mel <= center_mel down_idx = torch.gt(mel, center_mel) & torch.lt(mel, right_mel) # center_mel < mel < right_mel bins[up_idx] = up_slope[up_idx] bins[down_idx] = down_slope[down_idx] return bins, center_freqs def fbank(waveform: Tensor, blackman_coeff: float = 0.42, channel: int = -1, dither: float = 0.0, energy_floor: float = 1.0, frame_length: float = 25.0, frame_shift: float = 10.0, high_freq: float = 0.0, htk_compat: bool = False, low_freq: float = 20.0, min_duration: float = 0.0, num_mel_bins: int = 23, preemphasis_coefficient: float = 0.97, raw_energy: bool = True, remove_dc_offset: bool = True, round_to_power_of_two: bool = True, sample_frequency: float = 16000.0, snip_edges: bool = True, subtract_mean: bool = False, use_energy: bool = False, use_log_fbank: bool = True, use_power: bool = True, vtln_high: float = -500.0, vtln_low: float = 100.0, vtln_warp: float = 1.0, window_type: str = POVEY) -> Tensor: r"""Create a fbank from a raw audio signal. This matches the input/output of Kaldi's compute-fbank-feats. Args: waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2) blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``) channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``) dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``) energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution: this floor is applied to the zeroth component, representing the total signal energy. The floor on the individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``) frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``) frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``) high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist) (Default: ``0.0``) htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible features (need to change other parameters). (Default: ``False``) low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``) min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``) num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``) preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``) raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``) remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``) round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input to FFT. (Default: ``True``) sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if specified there) (Default: ``16000.0``) snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit in the file, and the number of frames depends on the frame_length. If False, the number of frames depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``) subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do it this way. (Default: ``False``) use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``) use_log_fbank (bool, optional):If true, produce log-filterbank, else produce linear. (Default: ``True``) use_power (bool, optional): If true, use power, else use magnitude. (Default: ``True``) vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if negative, offset from high-mel-freq (Default: ``-500.0``) vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``) vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``) window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman') (Default: ``'povey'``) Returns: Tensor: A fbank identical to what Kaldi would output. The shape is (m, ``num_mel_bins + use_energy``) where m is calculated in _get_strided """ device, dtype = waveform.device, waveform.dtype waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties( waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient) if len(waveform) < min_duration * sample_frequency: # signal is too short return torch.empty(0, device=device, dtype=dtype) # strided_input, size (m, padded_window_size) and signal_log_energy, size (m) strided_input, signal_log_energy = _get_window( waveform, padded_window_size, window_size, window_shift, window_type, blackman_coeff, snip_edges, raw_energy, energy_floor, dither, remove_dc_offset, preemphasis_coefficient) # size (m, padded_window_size // 2 + 1) spectrum = torch.fft.rfft(strided_input).abs() if use_power: spectrum = spectrum.pow(2.) # size (num_mel_bins, padded_window_size // 2) mel_energies, _ = get_mel_banks(num_mel_bins, padded_window_size, sample_frequency, low_freq, high_freq, vtln_low, vtln_high, vtln_warp) mel_energies = mel_energies.to(device=device, dtype=dtype) # pad right column with zeros and add dimension, size (num_mel_bins, padded_window_size // 2 + 1) mel_energies = torch.nn.functional.pad(mel_energies, (0, 1), mode='constant', value=0) # sum with mel fiterbanks over the power spectrum, size (m, num_mel_bins) mel_energies = torch.mm(spectrum, mel_energies.T) if use_log_fbank: # avoid log of zero (which should be prevented anyway by dithering) mel_energies = torch.max(mel_energies, _get_epsilon(device, dtype)).log() # if use_energy then add it as the last column for htk_compat == true else first column if use_energy: signal_log_energy = signal_log_energy.unsqueeze(1) # size (m, 1) # returns size (m, num_mel_bins + 1) if htk_compat: mel_energies = torch.cat((mel_energies, signal_log_energy), dim=1) else: mel_energies = torch.cat((signal_log_energy, mel_energies), dim=1) mel_energies = _subtract_column_mean(mel_energies, subtract_mean) return mel_energies def _get_dct_matrix(num_ceps: int, num_mel_bins: int) -> Tensor: # returns a dct matrix of size (num_mel_bins, num_ceps) # size (num_mel_bins, num_mel_bins) dct_matrix = torchaudio.functional.create_dct(num_mel_bins, num_mel_bins, 'ortho') # kaldi expects the first cepstral to be weighted sum of factor sqrt(1/num_mel_bins) # this would be the first column in the dct_matrix for torchaudio as it expects a # right multiply (which would be the first column of the kaldi's dct_matrix as kaldi # expects a left multiply e.g. dct_matrix * vector). dct_matrix[:, 0] = math.sqrt(1 / float(num_mel_bins)) dct_matrix = dct_matrix[:, :num_ceps] return dct_matrix def _get_lifter_coeffs(num_ceps: int, cepstral_lifter: float) -> Tensor: # returns size (num_ceps) # Compute liftering coefficients (scaling on cepstral coeffs) # coeffs are numbered slightly differently from HTK: the zeroth index is C0, which is not affected. i = torch.arange(num_ceps) return 1.0 + 0.5 * cepstral_lifter * torch.sin(math.pi * i / cepstral_lifter) def mfcc( waveform: Tensor, blackman_coeff: float = 0.42, cepstral_lifter: float = 22.0, channel: int = -1, dither: float = 0.0, energy_floor: float = 1.0, frame_length: float = 25.0, frame_shift: float = 10.0, high_freq: float = 0.0, htk_compat: bool = False, low_freq: float = 20.0, num_ceps: int = 13, min_duration: float = 0.0, num_mel_bins: int = 23, preemphasis_coefficient: float = 0.97, raw_energy: bool = True, remove_dc_offset: bool = True, round_to_power_of_two: bool = True, sample_frequency: float = 16000.0, snip_edges: bool = True, subtract_mean: bool = False, use_energy: bool = False, vtln_high: float = -500.0, vtln_low: float = 100.0, vtln_warp: float = 1.0, window_type: str = POVEY) -> Tensor: r"""Create a mfcc from a raw audio signal. This matches the input/output of Kaldi's compute-mfcc-feats. Args: waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2) blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``) cepstral_lifter (float, optional): Constant that controls scaling of MFCCs (Default: ``22.0``) channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``) dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``) energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution: this floor is applied to the zeroth component, representing the total signal energy. The floor on the individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``) frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``) frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``) high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist) (Default: ``0.0``) htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible features (need to change other parameters). (Default: ``False``) low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``) num_ceps (int, optional): Number of cepstra in MFCC computation (including C0) (Default: ``13``) min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``) num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``) preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``) raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``) remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``) round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input to FFT. (Default: ``True``) sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if specified there) (Default: ``16000.0``) snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit in the file, and the number of frames depends on the frame_length. If False, the number of frames depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``) subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do it this way. (Default: ``False``) use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``) vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if negative, offset from high-mel-freq (Default: ``-500.0``) vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``) vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``) window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman') (Default: ``"povey"``) Returns: Tensor: A mfcc identical to what Kaldi would output. The shape is (m, ``num_ceps``) where m is calculated in _get_strided """ assert num_ceps <= num_mel_bins, 'num_ceps cannot be larger than num_mel_bins: %d vs %d' % (num_ceps, num_mel_bins) device, dtype = waveform.device, waveform.dtype # The mel_energies should not be squared (use_power=True), not have mean subtracted # (subtract_mean=False), and use log (use_log_fbank=True). # size (m, num_mel_bins + use_energy) feature = fbank(waveform=waveform, blackman_coeff=blackman_coeff, channel=channel, dither=dither, energy_floor=energy_floor, frame_length=frame_length, frame_shift=frame_shift, high_freq=high_freq, htk_compat=htk_compat, low_freq=low_freq, min_duration=min_duration, num_mel_bins=num_mel_bins, preemphasis_coefficient=preemphasis_coefficient, raw_energy=raw_energy, remove_dc_offset=remove_dc_offset, round_to_power_of_two=round_to_power_of_two, sample_frequency=sample_frequency, snip_edges=snip_edges, subtract_mean=False, use_energy=use_energy, use_log_fbank=True, use_power=True, vtln_high=vtln_high, vtln_low=vtln_low, vtln_warp=vtln_warp, window_type=window_type) if use_energy: # size (m) signal_log_energy = feature[:, num_mel_bins if htk_compat else 0] # offset is 0 if htk_compat==True else 1 mel_offset = int(not htk_compat) feature = feature[:, mel_offset:(num_mel_bins + mel_offset)] # size (num_mel_bins, num_ceps) dct_matrix = _get_dct_matrix(num_ceps, num_mel_bins).to(dtype=dtype, device=device) # size (m, num_ceps) feature = feature.matmul(dct_matrix) if cepstral_lifter != 0.0: # size (1, num_ceps) lifter_coeffs = _get_lifter_coeffs(num_ceps, cepstral_lifter).unsqueeze(0) feature *= lifter_coeffs.to(device=device, dtype=dtype) # if use_energy then replace the last column for htk_compat == true else first column if use_energy: feature[:, 0] = signal_log_energy if htk_compat: energy = feature[:, 0].unsqueeze(1) # size (m, 1) feature = feature[:, 1:] # size (m, num_ceps - 1) if not use_energy: # scale on C0 (actually removing a scale we previously added that's # part of one common definition of the cosine transform.) energy *= math.sqrt(2) feature = torch.cat((feature, energy), dim=1) feature = _subtract_column_mean(feature, subtract_mean) return feature
import os from typing import List, Tuple, Optional import torch import torchaudio from torchaudio._internal import module_utils as _mod_utils from torchaudio.utils.sox_utils import list_effects @_mod_utils.requires_sox() def init_sox_effects(): """Initialize resources required to use sox effects. Note: You do not need to call this function manually. It is called automatically. Once initialized, you do not need to call this function again across the multiple uses of sox effects though it is safe to do so as long as :func:`shutdown_sox_effects` is not called yet. Once :func:`shutdown_sox_effects` is called, you can no longer use SoX effects and initializing again will result in error. """ torch.ops.torchaudio.sox_effects_initialize_sox_effects() @_mod_utils.requires_sox() def shutdown_sox_effects(): """Clean up resources required to use sox effects. Note: You do not need to call this function manually. It is called automatically. It is safe to call this function multiple times. Once :py:func:`shutdown_sox_effects` is called, you can no longer use SoX effects and initializing again will result in error. """ torch.ops.torchaudio.sox_effects_shutdown_sox_effects() @_mod_utils.requires_sox() def effect_names() -> List[str]: """Gets list of valid sox effect names Returns: List[str]: list of available effect names. Example >>> torchaudio.sox_effects.effect_names() ['allpass', 'band', 'bandpass', ... ] """ return list(list_effects().keys()) @_mod_utils.requires_sox() def apply_effects_tensor( tensor: torch.Tensor, sample_rate: int, effects: List[List[str]], channels_first: bool = True, ) -> Tuple[torch.Tensor, int]: """Apply sox effects to given Tensor Note: This function only works on CPU Tensors. This function works in the way very similar to ``sox`` command, however there are slight differences. For example, ``sox`` command adds certain effects automatically (such as ``rate`` effect after ``speed`` and ``pitch`` and other effects), but this function does only applies the given effects. (Therefore, to actually apply ``speed`` effect, you also need to give ``rate`` effect with desired sampling rate.). Args: tensor (torch.Tensor): Input 2D CPU Tensor. sample_rate (int): Sample rate effects (List[List[str]]): List of effects. channels_first (bool, optional): Indicates if the input Tensor's dimension is `[channels, time]` or `[time, channels]` Returns: (Tensor, int): Resulting Tensor and sample rate. The resulting Tensor has the same ``dtype`` as the input Tensor, and the same channels order. The shape of the Tensor can be different based on the effects applied. Sample rate can also be different based on the effects applied. Example - Basic usage >>> >>> # Defines the effects to apply >>> effects = [ ... ['gain', '-n'], # normalises to 0dB ... ['pitch', '5'], # 5 cent pitch shift ... ['rate', '8000'], # resample to 8000 Hz ... ] >>> >>> # Generate pseudo wave: >>> # normalized, channels first, 2ch, sampling rate 16000, 1 second >>> sample_rate = 16000 >>> waveform = 2 * torch.rand([2, sample_rate * 1]) - 1 >>> waveform.shape torch.Size([2, 16000]) >>> waveform tensor([[ 0.3138, 0.7620, -0.9019, ..., -0.7495, -0.4935, 0.5442], [-0.0832, 0.0061, 0.8233, ..., -0.5176, -0.9140, -0.2434]]) >>> >>> # Apply effects >>> waveform, sample_rate = apply_effects_tensor( ... wave_form, sample_rate, effects, channels_first=True) >>> >>> # Check the result >>> # The new waveform is sampling rate 8000, 1 second. >>> # normalization and channel order are preserved >>> waveform.shape torch.Size([2, 8000]) >>> waveform tensor([[ 0.5054, -0.5518, -0.4800, ..., -0.0076, 0.0096, -0.0110], [ 0.1331, 0.0436, -0.3783, ..., -0.0035, 0.0012, 0.0008]]) >>> sample_rate 8000 Example - Torchscript-able transform >>> >>> # Use `apply_effects_tensor` in `torch.nn.Module` and dump it to file, >>> # then run sox effect via Torchscript runtime. >>> >>> class SoxEffectTransform(torch.nn.Module): ... effects: List[List[str]] ... ... def __init__(self, effects: List[List[str]]): ... super().__init__() ... self.effects = effects ... ... def forward(self, tensor: torch.Tensor, sample_rate: int): ... return sox_effects.apply_effects_tensor( ... tensor, sample_rate, self.effects) ... ... >>> # Create transform object >>> effects = [ ... ["lowpass", "-1", "300"], # apply single-pole lowpass filter ... ["rate", "8000"], # change sample rate to 8000 ... ] >>> transform = SoxEffectTensorTransform(effects, input_sample_rate) >>> >>> # Dump it to file and load >>> path = 'sox_effect.zip' >>> torch.jit.script(trans).save(path) >>> transform = torch.jit.load(path) >>> >>>> # Run transform >>> waveform, input_sample_rate = torchaudio.load("input.wav") >>> waveform, sample_rate = transform(waveform, input_sample_rate) >>> assert sample_rate == 8000 """ return torch.ops.torchaudio.sox_effects_apply_effects_tensor( tensor, sample_rate, effects, channels_first) @_mod_utils.requires_sox() def apply_effects_file( path: str, effects: List[List[str]], normalize: bool = True, channels_first: bool = True, format: Optional[str] = None, ) -> Tuple[torch.Tensor, int]: """Apply sox effects to the audio file and load the resulting data as Tensor Note: This function works in the way very similar to ``sox`` command, however there are slight differences. For example, ``sox`` commnad adds certain effects automatically (such as ``rate`` effect after ``speed``, ``pitch`` etc), but this function only applies the given effects. Therefore, to actually apply ``speed`` effect, you also need to give ``rate`` effect with desired sampling rate, because internally, ``speed`` effects only alter sampling rate and leave samples untouched. Args: path (path-like object or file-like object): Source of audio data. When the function is not compiled by TorchScript, (e.g. ``torch.jit.script``), the following types are accepted: * ``path-like``: file path * ``file-like``: Object with ``read(size: int) -> bytes`` method, which returns byte string of at most ``size`` length. When the function is compiled by TorchScript, only ``str`` type is allowed. Note: This argument is intentionally annotated as ``str`` only for TorchScript compiler compatibility. effects (List[List[str]]): List of effects. normalize (bool, optional): When ``True``, this function always return ``float32``, and sample values are normalized to ``[-1.0, 1.0]``. If input file is integer WAV, giving ``False`` will change the resulting Tensor type to integer type. This argument has no effect for formats other than integer WAV type. channels_first (bool, optional): When True, the returned Tensor has dimension `[channel, time]`. Otherwise, the returned Tensor's dimension is `[time, channel]`. format (str or None, optional): Override the format detection with the given format. Providing the argument might help when libsox can not infer the format from header or extension, Returns: (Tensor, int): Resulting Tensor and sample rate. If ``normalize=True``, the resulting Tensor is always ``float32`` type. If ``normalize=False`` and the input audio file is of integer WAV file, then the resulting Tensor has corresponding integer type. (Note 24 bit integer type is not supported) If ``channels_first=True``, the resulting Tensor has dimension `[channel, time]`, otherwise `[time, channel]`. Example - Basic usage >>> >>> # Defines the effects to apply >>> effects = [ ... ['gain', '-n'], # normalises to 0dB ... ['pitch', '5'], # 5 cent pitch shift ... ['rate', '8000'], # resample to 8000 Hz ... ] >>> >>> # Apply effects and load data with channels_first=True >>> waveform, sample_rate = apply_effects_file("data.wav", effects, channels_first=True) >>> >>> # Check the result >>> waveform.shape torch.Size([2, 8000]) >>> waveform tensor([[ 5.1151e-03, 1.8073e-02, 2.2188e-02, ..., 1.0431e-07, -1.4761e-07, 1.8114e-07], [-2.6924e-03, 2.1860e-03, 1.0650e-02, ..., 6.4122e-07, -5.6159e-07, 4.8103e-07]]) >>> sample_rate 8000 Example - Apply random speed perturbation to dataset >>> >>> # Load data from file, apply random speed perturbation >>> class RandomPerturbationFile(torch.utils.data.Dataset): ... \"\"\"Given flist, apply random speed perturbation ... ... Suppose all the input files are at least one second long. ... \"\"\" ... def __init__(self, flist: List[str], sample_rate: int): ... super().__init__() ... self.flist = flist ... self.sample_rate = sample_rate ... ... def __getitem__(self, index): ... speed = 0.5 + 1.5 * random.randn() ... effects = [ ... ['gain', '-n', '-10'], # apply 10 db attenuation ... ['remix', '-'], # merge all the channels ... ['speed', f'{speed:.5f}'], # duration is now 0.5 ~ 2.0 seconds. ... ['rate', f'{self.sample_rate}'], ... ['pad', '0', '1.5'], # add 1.5 seconds silence at the end ... ['trim', '0', '2'], # get the first 2 seconds ... ] ... waveform, _ = torchaudio.sox_effects.apply_effects_file( ... self.flist[index], effects) ... return waveform ... ... def __len__(self): ... return len(self.flist) ... >>> dataset = RandomPerturbationFile(file_list, sample_rate=8000) >>> loader = torch.utils.data.DataLoader(dataset, batch_size=32) >>> for batch in loader: >>> pass """ if not torch.jit.is_scripting(): if hasattr(path, 'read'): return torchaudio._torchaudio.apply_effects_fileobj( path, effects, normalize, channels_first, format) path = os.fspath(path) return torch.ops.torchaudio.sox_effects_apply_effects_file( path, effects, normalize, channels_first, format)
from torchaudio._internal import module_utils as _mod_utils from .sox_effects import ( init_sox_effects, shutdown_sox_effects, effect_names, apply_effects_tensor, apply_effects_file, ) if _mod_utils.is_sox_available(): import atexit init_sox_effects() atexit.register(shutdown_sox_effects) __all__ = [ 'init_sox_effects', 'shutdown_sox_effects', 'effect_names', 'apply_effects_tensor', 'apply_effects_file', ]
import math import warnings from typing import Optional import torch from torch import Tensor def _dB2Linear(x: float) -> float: return math.exp(x * math.log(10) / 20.0) def _generate_wave_table( wave_type: str, data_type: str, table_size: int, min: float, max: float, phase: float, device: torch.device, ) -> Tensor: r"""A helper function for phaser. Generates a table with given parameters. Args: wave_type (str): SINE or TRIANGULAR data_type (str): desired data_type ( `INT` or `FLOAT` ) table_size (int): desired table size min (float): desired min value max (float): desired max value phase (float): desired phase device (torch.device): Torch device on which table must be generated Returns: Tensor: A 1D tensor with wave table values """ phase_offset = int(phase / math.pi / 2 * table_size + 0.5) t = torch.arange(table_size, device=device, dtype=torch.int32) point = (t + phase_offset) % table_size d = torch.zeros_like(point, device=device, dtype=torch.float64) if wave_type == "SINE": d = (torch.sin(point.to(torch.float64) / table_size * 2 * math.pi) + 1) / 2 elif wave_type == "TRIANGLE": d = point.to(torch.float64) * 2 / table_size value = torch.div(4 * point, table_size, rounding_mode='floor') d[value == 0] = d[value == 0] + 0.5 d[value == 1] = 1.5 - d[value == 1] d[value == 2] = 1.5 - d[value == 2] d[value == 3] = d[value == 3] - 1.5 d = d * (max - min) + min if data_type == "INT": mask = d < 0 d[mask] = d[mask] - 0.5 d[~mask] = d[~mask] + 0.5 d = d.to(torch.int32) elif data_type == "FLOAT": d = d.to(torch.float32) return d def allpass_biquad( waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707 ) -> Tensor: r"""Design two-pole all-pass filter. Similar to SoX implementation. Args: waveform(torch.Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) central_freq (float or torch.Tensor): central frequency (in Hz) Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF """ dtype = waveform.dtype device = waveform.device central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) Q = torch.as_tensor(Q, dtype=dtype, device=device) w0 = 2 * math.pi * central_freq / sample_rate alpha = torch.sin(w0) / 2 / Q b0 = 1 - alpha b1 = -2 * torch.cos(w0) b2 = 1 + alpha a0 = 1 + alpha a1 = -2 * torch.cos(w0) a2 = 1 - alpha return biquad(waveform, b0, b1, b2, a0, a1, a2) def band_biquad( waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707, noise: bool = False, ) -> Tensor: r"""Design two-pole band filter. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) central_freq (float or torch.Tensor): central frequency (in Hz) Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``). noise (bool, optional) : If ``True``, uses the alternate mode for un-pitched audio (e.g. percussion). If ``False``, uses mode oriented to pitched audio, i.e. voice, singing, or instrumental music (Default: ``False``). Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF """ dtype = waveform.dtype device = waveform.device central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) Q = torch.as_tensor(Q, dtype=dtype, device=device) w0 = 2 * math.pi * central_freq / sample_rate bw_Hz = central_freq / Q a0 = 1.0 a2 = torch.exp(-2 * math.pi * bw_Hz / sample_rate) a1 = -4 * a2 / (1 + a2) * torch.cos(w0) b0 = torch.sqrt(1 - a1 * a1 / (4 * a2)) * (1 - a2) if noise: mult = torch.sqrt(((1 + a2) * (1 + a2) - a1 * a1) * (1 - a2) / (1 + a2)) / b0 b0 = mult * b0 b1 = 0.0 b2 = 0.0 return biquad(waveform, b0, b1, b2, a0, a1, a2) def bandpass_biquad( waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707, const_skirt_gain: bool = False, ) -> Tensor: r"""Design two-pole band-pass filter. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) central_freq (float or torch.Tensor): central frequency (in Hz) Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) const_skirt_gain (bool, optional) : If ``True``, uses a constant skirt gain (peak gain = Q). If ``False``, uses a constant 0dB peak gain. (Default: ``False``) Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF """ dtype = waveform.dtype device = waveform.device central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) Q = torch.as_tensor(Q, dtype=dtype, device=device) w0 = 2 * math.pi * central_freq / sample_rate alpha = torch.sin(w0) / 2 / Q temp = torch.sin(w0) / 2 if const_skirt_gain else alpha b0 = temp b1 = 0.0 b2 = -temp a0 = 1 + alpha a1 = -2 * torch.cos(w0) a2 = 1 - alpha return biquad(waveform, b0, b1, b2, a0, a1, a2) def bandreject_biquad( waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707 ) -> Tensor: r"""Design two-pole band-reject filter. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) central_freq (float or torch.Tensor): central frequency (in Hz) Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF """ dtype = waveform.dtype device = waveform.device central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) Q = torch.as_tensor(Q, dtype=dtype, device=device) w0 = 2 * math.pi * central_freq / sample_rate alpha = torch.sin(w0) / 2 / Q b0 = 1.0 b1 = -2 * torch.cos(w0) b2 = 1.0 a0 = 1 + alpha a1 = -2 * torch.cos(w0) a2 = 1 - alpha return biquad(waveform, b0, b1, b2, a0, a1, a2) def bass_biquad( waveform: Tensor, sample_rate: int, gain: float, central_freq: float = 100, Q: float = 0.707, ) -> Tensor: r"""Design a bass tone-control effect. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB. central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``100``) Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``). Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF """ dtype = waveform.dtype device = waveform.device central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) Q = torch.as_tensor(Q, dtype=dtype, device=device) gain = torch.as_tensor(gain, dtype=dtype, device=device) w0 = 2 * math.pi * central_freq / sample_rate alpha = torch.sin(w0) / 2 / Q A = torch.exp(gain / 40 * math.log(10)) temp1 = 2 * torch.sqrt(A) * alpha temp2 = (A - 1) * torch.cos(w0) temp3 = (A + 1) * torch.cos(w0) b0 = A * ((A + 1) - temp2 + temp1) b1 = 2 * A * ((A - 1) - temp3) b2 = A * ((A + 1) - temp2 - temp1) a0 = (A + 1) + temp2 + temp1 a1 = -2 * ((A - 1) + temp3) a2 = (A + 1) + temp2 - temp1 return biquad(waveform, b0 / a0, b1 / a0, b2 / a0, a0 / a0, a1 / a0, a2 / a0) def biquad( waveform: Tensor, b0: float, b1: float, b2: float, a0: float, a1: float, a2: float ) -> Tensor: r"""Perform a biquad filter of input tensor. Initial conditions set to 0. https://en.wikipedia.org/wiki/Digital_biquad_filter Args: waveform (Tensor): audio waveform of dimension of `(..., time)` b0 (float or torch.Tensor): numerator coefficient of current input, x[n] b1 (float or torch.Tensor): numerator coefficient of input one time step ago x[n-1] b2 (float or torch.Tensor): numerator coefficient of input two time steps ago x[n-2] a0 (float or torch.Tensor): denominator coefficient of current output y[n], typically 1 a1 (float or torch.Tensor): denominator coefficient of current output y[n-1] a2 (float or torch.Tensor): denominator coefficient of current output y[n-2] Returns: Tensor: Waveform with dimension of `(..., time)` """ device = waveform.device dtype = waveform.dtype b0 = torch.as_tensor(b0, dtype=dtype, device=device).view(1) b1 = torch.as_tensor(b1, dtype=dtype, device=device).view(1) b2 = torch.as_tensor(b2, dtype=dtype, device=device).view(1) a0 = torch.as_tensor(a0, dtype=dtype, device=device).view(1) a1 = torch.as_tensor(a1, dtype=dtype, device=device).view(1) a2 = torch.as_tensor(a2, dtype=dtype, device=device).view(1) output_waveform = lfilter( waveform, torch.cat([a0, a1, a2]), torch.cat([b0, b1, b2]), ) return output_waveform def contrast(waveform: Tensor, enhancement_amount: float = 75.0) -> Tensor: r"""Apply contrast effect. Similar to SoX implementation. Comparable with compression, this effect modifies an audio signal to make it sound louder Args: waveform (Tensor): audio waveform of dimension of `(..., time)` enhancement_amount (float, optional): controls the amount of the enhancement Allowed range of values for enhancement_amount : 0-100 Note that enhancement_amount = 0 still gives a significant contrast enhancement Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html """ if not 0 <= enhancement_amount <= 100: raise ValueError("Allowed range of values for enhancement_amount : 0-100") contrast = enhancement_amount / 750.0 temp1 = waveform * (math.pi / 2) temp2 = contrast * torch.sin(temp1 * 4) output_waveform = torch.sin(temp1 + temp2) return output_waveform def dcshift( waveform: Tensor, shift: float, limiter_gain: Optional[float] = None ) -> Tensor: r"""Apply a DC shift to the audio. Similar to SoX implementation. This can be useful to remove a DC offset (caused perhaps by a hardware problem in the recording chain) from the audio Args: waveform (Tensor): audio waveform of dimension of `(..., time)` shift (float): indicates the amount to shift the audio Allowed range of values for shift : -2.0 to +2.0 limiter_gain (float of None, optional): It is used only on peaks to prevent clipping It should have a value much less than 1 (e.g. 0.05 or 0.02) Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html """ output_waveform = waveform limiter_threshold = 0.0 if limiter_gain is not None: limiter_threshold = 1.0 - (abs(shift) - limiter_gain) if limiter_gain is not None and shift > 0: mask = waveform > limiter_threshold temp = ( (waveform[mask] - limiter_threshold) * limiter_gain / (1 - limiter_threshold) ) output_waveform[mask] = (temp + limiter_threshold + shift).clamp( max=limiter_threshold ) output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1) elif limiter_gain is not None and shift < 0: mask = waveform < -limiter_threshold temp = ( (waveform[mask] + limiter_threshold) * limiter_gain / (1 - limiter_threshold) ) output_waveform[mask] = (temp - limiter_threshold + shift).clamp( min=-limiter_threshold ) output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1) else: output_waveform = (waveform + shift).clamp(min=-1, max=1) return output_waveform def deemph_biquad(waveform: Tensor, sample_rate: int) -> Tensor: r"""Apply ISO 908 CD de-emphasis (shelving) IIR filter. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, Allowed sample rate ``44100`` or ``48000`` Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF """ if sample_rate == 44100: central_freq = 5283 width_slope = 0.4845 gain = -9.477 elif sample_rate == 48000: central_freq = 5356 width_slope = 0.479 gain = -9.62 else: raise ValueError("Sample rate must be 44100 (audio-CD) or 48000 (DAT)") w0 = 2 * math.pi * central_freq / sample_rate A = math.exp(gain / 40.0 * math.log(10)) alpha = math.sin(w0) / 2 * math.sqrt((A + 1 / A) * (1 / width_slope - 1) + 2) temp1 = 2 * math.sqrt(A) * alpha temp2 = (A - 1) * math.cos(w0) temp3 = (A + 1) * math.cos(w0) b0 = A * ((A + 1) + temp2 + temp1) b1 = -2 * A * ((A - 1) + temp3) b2 = A * ((A + 1) + temp2 - temp1) a0 = (A + 1) - temp2 + temp1 a1 = 2 * ((A - 1) - temp3) a2 = (A + 1) - temp2 - temp1 return biquad(waveform, b0, b1, b2, a0, a1, a2) def _add_noise_shaping(dithered_waveform: Tensor, waveform: Tensor) -> Tensor: r"""Noise shaping is calculated by error: error[n] = dithered[n] - original[n] noise_shaped_waveform[n] = dithered[n] + error[n-1] """ wf_shape = waveform.size() waveform = waveform.reshape(-1, wf_shape[-1]) dithered_shape = dithered_waveform.size() dithered_waveform = dithered_waveform.reshape(-1, dithered_shape[-1]) error = dithered_waveform - waveform # add error[n-1] to dithered_waveform[n], so offset the error by 1 index zeros = torch.zeros(1, dtype=error.dtype, device=error.device) for index in range(error.size()[0]): err = error[index] error_offset = torch.cat((zeros, err)) error[index] = error_offset[: waveform.size()[1]] noise_shaped = dithered_waveform + error return noise_shaped.reshape(dithered_shape[:-1] + noise_shaped.shape[-1:]) def _apply_probability_distribution( waveform: Tensor, density_function: str = "TPDF" ) -> Tensor: r"""Apply a probability distribution function on a waveform. Triangular probability density function (TPDF) dither noise has a triangular distribution; values in the center of the range have a higher probability of occurring. Rectangular probability density function (RPDF) dither noise has a uniform distribution; any value in the specified range has the same probability of occurring. Gaussian probability density function (GPDF) has a normal distribution. The relationship of probabilities of results follows a bell-shaped, or Gaussian curve, typical of dither generated by analog sources. Args: waveform (Tensor): Tensor of audio of dimension (..., time) density_function (str, optional): The density function of a continuous random variable (Default: ``"TPDF"``) Options: Triangular Probability Density Function - `TPDF` Rectangular Probability Density Function - `RPDF` Gaussian Probability Density Function - `GPDF` Returns: Tensor: waveform dithered with TPDF """ # pack batch shape = waveform.size() waveform = waveform.reshape(-1, shape[-1]) channel_size = waveform.size()[0] - 1 time_size = waveform.size()[-1] - 1 random_channel = ( int( torch.randint( channel_size, [ 1, ], ).item() ) if channel_size > 0 else 0 ) random_time = ( int( torch.randint( time_size, [ 1, ], ).item() ) if time_size > 0 else 0 ) number_of_bits = 16 up_scaling = 2 ** (number_of_bits - 1) - 2 signal_scaled = waveform * up_scaling down_scaling = 2 ** (number_of_bits - 1) signal_scaled_dis = waveform if density_function == "RPDF": RPDF = waveform[random_channel][random_time] - 0.5 signal_scaled_dis = signal_scaled + RPDF elif density_function == "GPDF": # TODO Replace by distribution code once # https://github.com/pytorch/pytorch/issues/29843 is resolved # gaussian = torch.distributions.normal.Normal(torch.mean(waveform, -1), 1).sample() num_rand_variables = 6 gaussian = waveform[random_channel][random_time] for ws in num_rand_variables * [time_size]: rand_chan = int( torch.randint( channel_size, [ 1, ], ).item() ) gaussian += waveform[rand_chan][ int( torch.randint( ws, [ 1, ], ).item() ) ] signal_scaled_dis = signal_scaled + gaussian else: # dtype needed for https://github.com/pytorch/pytorch/issues/32358 TPDF = torch.bartlett_window( time_size + 1, dtype=signal_scaled.dtype, device=signal_scaled.device ) TPDF = TPDF.repeat((channel_size + 1), 1) signal_scaled_dis = signal_scaled + TPDF quantised_signal_scaled = torch.round(signal_scaled_dis) quantised_signal = quantised_signal_scaled / down_scaling # unpack batch return quantised_signal.reshape(shape[:-1] + quantised_signal.shape[-1:]) def dither( waveform: Tensor, density_function: str = "TPDF", noise_shaping: bool = False ) -> Tensor: r"""Dither increases the perceived dynamic range of audio stored at a particular bit-depth by eliminating nonlinear truncation distortion (i.e. adding minimally perceived noise to mask distortion caused by quantization). Args: waveform (Tensor): Tensor of audio of dimension (..., time) density_function (str, optional): The density function of a continuous random variable. One of ``"TPDF"`` (Triangular Probability Density Function), ``"RPDF"`` (Rectangular Probability Density Function) or ``"GPDF"`` (Gaussian Probability Density Function) (Default: ``"TPDF"``). noise_shaping (bool, optional): a filtering process that shapes the spectral energy of quantisation error (Default: ``False``) Returns: Tensor: waveform dithered """ dithered = _apply_probability_distribution( waveform, density_function=density_function ) if noise_shaping: return _add_noise_shaping(dithered, waveform) else: return dithered def equalizer_biquad( waveform: Tensor, sample_rate: int, center_freq: float, gain: float, Q: float = 0.707, ) -> Tensor: r"""Design biquad peaking equalizer filter and perform filtering. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) center_freq (float): filter's central frequency gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) Returns: Tensor: Waveform of dimension of `(..., time)` """ dtype = waveform.dtype device = waveform.device center_freq = torch.as_tensor(center_freq, dtype=dtype, device=device) Q = torch.as_tensor(Q, dtype=dtype, device=device) gain = torch.as_tensor(gain, dtype=dtype, device=device) w0 = 2 * math.pi * center_freq / sample_rate A = torch.exp(gain / 40.0 * math.log(10)) alpha = torch.sin(w0) / 2 / Q b0 = 1 + alpha * A b1 = -2 * torch.cos(w0) b2 = 1 - alpha * A a0 = 1 + alpha / A a1 = -2 * torch.cos(w0) a2 = 1 - alpha / A return biquad(waveform, b0, b1, b2, a0, a1, a2) def filtfilt( waveform: Tensor, a_coeffs: Tensor, b_coeffs: Tensor, clamp: bool = True, ) -> Tensor: r"""Apply an IIR filter forward and backward to a waveform. Inspired by https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html Args: waveform (Tensor): audio waveform of dimension of `(..., time)`. Must be normalized to -1 to 1. a_coeffs (Tensor): denominator coefficients of difference equation of dimension of either 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`. Lower delay coefficients are first, e.g. ``[a0, a1, a2, ...]``. Must be same size as b_coeffs (pad with 0's as necessary). b_coeffs (Tensor): numerator coefficients of difference equation of dimension of either 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`. Lower delay coefficients are first, e.g. ``[b0, b1, b2, ...]``. Must be same size as a_coeffs (pad with 0's as necessary). clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``) Returns: Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs`` are 2D Tensors, or `(..., time)` otherwise. """ forward_filtered = lfilter(waveform, a_coeffs, b_coeffs, clamp=False, batching=True) backward_filtered = lfilter( forward_filtered.flip(-1), a_coeffs, b_coeffs, clamp=clamp, batching=True, ).flip(-1) return backward_filtered def flanger( waveform: Tensor, sample_rate: int, delay: float = 0.0, depth: float = 2.0, regen: float = 0.0, width: float = 71.0, speed: float = 0.5, phase: float = 25.0, modulation: str = "sinusoidal", interpolation: str = "linear", ) -> Tensor: r"""Apply a flanger effect to the audio. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., channel, time)` . Max 4 channels allowed sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) delay (float, optional): desired delay in milliseconds(ms) Allowed range of values are 0 to 30 depth (float, optional): desired delay depth in milliseconds(ms) Allowed range of values are 0 to 10 regen (float, optional): desired regen(feedback gain) in dB Allowed range of values are -95 to 95 width (float, optional): desired width(delay gain) in dB Allowed range of values are 0 to 100 speed (float, optional): modulation speed in Hz Allowed range of values are 0.1 to 10 phase (float, optional): percentage phase-shift for multi-channel Allowed range of values are 0 to 100 modulation (str, optional): Use either "sinusoidal" or "triangular" modulation. (Default: ``sinusoidal``) interpolation (str, optional): Use either "linear" or "quadratic" for delay-line interpolation. (Default: ``linear``) Returns: Tensor: Waveform of dimension of `(..., channel, time)` Reference: - http://sox.sourceforge.net/sox.html - Scott Lehman, `Effects Explained`_, .. _Effects Explained: https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html """ if modulation not in ("sinusoidal", "triangular"): raise ValueError("Only 'sinusoidal' or 'triangular' modulation allowed") if interpolation not in ("linear", "quadratic"): raise ValueError("Only 'linear' or 'quadratic' interpolation allowed") actual_shape = waveform.shape device, dtype = waveform.device, waveform.dtype if actual_shape[-2] > 4: raise ValueError("Max 4 channels allowed") # convert to 3D (batch, channels, time) waveform = waveform.view(-1, actual_shape[-2], actual_shape[-1]) # Scaling feedback_gain = regen / 100 delay_gain = width / 100 channel_phase = phase / 100 delay_min = delay / 1000 delay_depth = depth / 1000 n_channels = waveform.shape[-2] if modulation == "sinusoidal": wave_type = "SINE" else: wave_type = "TRIANGLE" # Balance output: in_gain = 1.0 / (1 + delay_gain) delay_gain = delay_gain / (1 + delay_gain) # Balance feedback loop: delay_gain = delay_gain * (1 - abs(feedback_gain)) delay_buf_length = int((delay_min + delay_depth) * sample_rate + 0.5) delay_buf_length = delay_buf_length + 2 delay_bufs = torch.zeros( waveform.shape[0], n_channels, delay_buf_length, dtype=dtype, device=device ) delay_last = torch.zeros(waveform.shape[0], n_channels, dtype=dtype, device=device) lfo_length = int(sample_rate / speed) table_min = math.floor(delay_min * sample_rate + 0.5) table_max = delay_buf_length - 2.0 lfo = _generate_wave_table( wave_type=wave_type, data_type="FLOAT", table_size=lfo_length, min=float(table_min), max=float(table_max), phase=3 * math.pi / 2, device=device, ) output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device) delay_buf_pos = 0 lfo_pos = 0 channel_idxs = torch.arange(0, n_channels, device=device) for i in range(waveform.shape[-1]): delay_buf_pos = (delay_buf_pos + delay_buf_length - 1) % delay_buf_length cur_channel_phase = (channel_idxs * lfo_length * channel_phase + 0.5).to( torch.int64 ) delay_tensor = lfo[(lfo_pos + cur_channel_phase) % lfo_length] frac_delay = torch.frac(delay_tensor) delay_tensor = torch.floor(delay_tensor) int_delay = delay_tensor.to(torch.int64) temp = waveform[:, :, i] delay_bufs[:, :, delay_buf_pos] = temp + delay_last * feedback_gain delayed_0 = delay_bufs[ :, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length ] int_delay = int_delay + 1 delayed_1 = delay_bufs[ :, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length ] int_delay = int_delay + 1 if interpolation == "linear": delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay else: delayed_2 = delay_bufs[ :, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length ] int_delay = int_delay + 1 delayed_2 = delayed_2 - delayed_0 delayed_1 = delayed_1 - delayed_0 a = delayed_2 * 0.5 - delayed_1 b = delayed_1 * 2 - delayed_2 * 0.5 delayed = delayed_0 + (a * frac_delay + b) * frac_delay delay_last = delayed output_waveform[:, :, i] = waveform[:, :, i] * in_gain + delayed * delay_gain lfo_pos = (lfo_pos + 1) % lfo_length return output_waveform.clamp(min=-1, max=1).view(actual_shape) def gain(waveform: Tensor, gain_db: float = 1.0) -> Tensor: r"""Apply amplification or attenuation to the whole waveform. Args: waveform (Tensor): Tensor of audio of dimension (..., time). gain_db (float, optional) Gain adjustment in decibels (dB) (Default: ``1.0``). Returns: Tensor: the whole waveform amplified by gain_db. """ if gain_db == 0: return waveform ratio = 10 ** (gain_db / 20) return waveform * ratio def highpass_biquad( waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707 ) -> Tensor: r"""Design biquad highpass filter and perform filtering. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) cutoff_freq (float or torch.Tensor): filter cutoff frequency Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) Returns: Tensor: Waveform dimension of `(..., time)` """ dtype = waveform.dtype device = waveform.device cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device) Q = torch.as_tensor(Q, dtype=dtype, device=device) w0 = 2 * math.pi * cutoff_freq / sample_rate alpha = torch.sin(w0) / 2.0 / Q b0 = (1 + torch.cos(w0)) / 2 b1 = -1 - torch.cos(w0) b2 = b0 a0 = 1 + alpha a1 = -2 * torch.cos(w0) a2 = 1 - alpha return biquad(waveform, b0, b1, b2, a0, a1, a2) def _lfilter_core_generic_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor): n_order = a_coeffs_flipped.size(1) a_coeffs_flipped = a_coeffs_flipped.unsqueeze(2) for i_sample, o0 in enumerate(input_signal_windows.permute(2, 0, 1)): windowed_output_signal = padded_output_waveform[ :, :, i_sample:i_sample + n_order ] o0 -= (windowed_output_signal.transpose(0, 1) @ a_coeffs_flipped)[..., 0].t() padded_output_waveform[:, :, i_sample + n_order - 1] = o0 try: _lfilter_core_cpu_loop = torch.ops.torchaudio._lfilter_core_loop except RuntimeError as err: assert str(err) == 'No such operator torchaudio::_lfilter_core_loop' _lfilter_core_cpu_loop = _lfilter_core_generic_loop def _lfilter_core( waveform: Tensor, a_coeffs: Tensor, b_coeffs: Tensor, ) -> Tensor: assert a_coeffs.size() == b_coeffs.size() assert len(waveform.size()) == 3 assert waveform.device == a_coeffs.device assert b_coeffs.device == a_coeffs.device n_batch, n_channel, n_sample = waveform.size() n_order = a_coeffs.size(1) assert n_order > 0 # Pad the input and create output padded_waveform = torch.nn.functional.pad(waveform, [n_order - 1, 0]) padded_output_waveform = torch.zeros_like(padded_waveform) # Set up the coefficients matrix # Flip coefficients' order a_coeffs_flipped = a_coeffs.flip(1) b_coeffs_flipped = b_coeffs.flip(1) # calculate windowed_input_signal in parallel using convolution input_signal_windows = torch.nn.functional.conv1d( padded_waveform, b_coeffs_flipped.unsqueeze(1), groups=n_channel ) input_signal_windows.div_(a_coeffs[:, :1]) a_coeffs_flipped.div_(a_coeffs[:, :1]) if input_signal_windows.device == torch.device('cpu') and\ a_coeffs_flipped.device == torch.device('cpu') and\ padded_output_waveform.device == torch.device('cpu'): _lfilter_core_cpu_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) else: _lfilter_core_generic_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) output = padded_output_waveform[:, :, n_order - 1:] return output try: _lfilter = torch.ops.torchaudio._lfilter except RuntimeError as err: assert str(err) == 'No such operator torchaudio::_lfilter' _lfilter = _lfilter_core def lfilter( waveform: Tensor, a_coeffs: Tensor, b_coeffs: Tensor, clamp: bool = True, batching: bool = True ) -> Tensor: r"""Perform an IIR filter by evaluating difference equation. Note: To avoid numerical problems, small filter order is preferred. Using double precision could also minimize numerical precision errors. Args: waveform (Tensor): audio waveform of dimension of `(..., time)`. Must be normalized to -1 to 1. a_coeffs (Tensor): denominator coefficients of difference equation of dimension of either 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`. Lower delays coefficients are first, e.g. ``[a0, a1, a2, ...]``. Must be same size as b_coeffs (pad with 0's as necessary). b_coeffs (Tensor): numerator coefficients of difference equation of dimension of either 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`. Lower delays coefficients are first, e.g. ``[b0, b1, b2, ...]``. Must be same size as a_coeffs (pad with 0's as necessary). clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``) batching (bool, optional): Effective only when coefficients are 2D. If ``True``, then waveform should be at least 2D, and the size of second axis from last should equals to ``num_filters``. The output can be expressed as ``output[..., i, :] = lfilter(waveform[..., i, :], a_coeffs[i], b_coeffs[i], clamp=clamp, batching=False)``. (Default: ``True``) Returns: Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs`` are 2D Tensors, or `(..., time)` otherwise. """ assert a_coeffs.size() == b_coeffs.size() assert a_coeffs.ndim <= 2 if a_coeffs.ndim > 1: if batching: assert waveform.ndim > 1 assert waveform.shape[-2] == a_coeffs.shape[0] else: waveform = torch.stack([waveform] * a_coeffs.shape[0], -2) else: a_coeffs = a_coeffs.unsqueeze(0) b_coeffs = b_coeffs.unsqueeze(0) # pack batch shape = waveform.size() waveform = waveform.reshape(-1, a_coeffs.shape[0], shape[-1]) output = _lfilter(waveform, a_coeffs, b_coeffs) if clamp: output = torch.clamp(output, min=-1.0, max=1.0) # unpack batch output = output.reshape(shape[:-1] + output.shape[-1:]) return output def lowpass_biquad( waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707 ) -> Tensor: r"""Design biquad lowpass filter and perform filtering. Similar to SoX implementation. Args: waveform (torch.Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) cutoff_freq (float or torch.Tensor): filter cutoff frequency Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) Returns: Tensor: Waveform of dimension of `(..., time)` """ dtype = waveform.dtype device = waveform.device cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device) Q = torch.as_tensor(Q, dtype=dtype, device=device) w0 = 2 * math.pi * cutoff_freq / sample_rate alpha = torch.sin(w0) / 2 / Q b0 = (1 - torch.cos(w0)) / 2 b1 = 1 - torch.cos(w0) b2 = b0 a0 = 1 + alpha a1 = -2 * torch.cos(w0) a2 = 1 - alpha return biquad(waveform, b0, b1, b2, a0, a1, a2) def _overdrive_core_loop_generic( waveform: Tensor, temp: Tensor, last_in: Tensor, last_out: Tensor, output_waveform: Tensor ): for i in range(waveform.shape[-1]): last_out = temp[:, i] - last_in + 0.995 * last_out last_in = temp[:, i] output_waveform[:, i] = waveform[:, i] * 0.5 + last_out * 0.75 try: _overdrive_core_loop_cpu = torch.ops.torchaudio._overdrive_core_loop except RuntimeError as err: assert str(err) == 'No such operator torchaudio::_overdrive_core_loop' _overdrive_core_loop_cpu = _overdrive_core_loop_generic def overdrive(waveform: Tensor, gain: float = 20, colour: float = 20) -> Tensor: r"""Apply a overdrive effect to the audio. Similar to SoX implementation. This effect applies a non linear distortion to the audio signal. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` gain (float, optional): desired gain at the boost (or attenuation) in dB Allowed range of values are 0 to 100 colour (float, optional): controls the amount of even harmonic content in the over-driven output Allowed range of values are 0 to 100 Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html """ actual_shape = waveform.shape device, dtype = waveform.device, waveform.dtype # convert to 2D (..,time) waveform = waveform.view(-1, actual_shape[-1]) gain = _dB2Linear(gain) colour = colour / 200 last_in = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device) last_out = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device) temp = waveform * gain + colour mask1 = temp < -1 temp[mask1] = torch.tensor(-2.0 / 3.0, dtype=dtype, device=device) # Wrapping the constant with Tensor is required for Torchscript mask2 = temp > 1 temp[mask2] = torch.tensor(2.0 / 3.0, dtype=dtype, device=device) mask3 = ~mask1 & ~mask2 temp[mask3] = temp[mask3] - (temp[mask3] ** 3) * (1.0 / 3) output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device) # Uses CPU optimized loop function if available for CPU device if device == torch.device('cpu'): _overdrive_core_loop_cpu(waveform, temp, last_in, last_out, output_waveform) else: _overdrive_core_loop_generic(waveform, temp, last_in, last_out, output_waveform) return output_waveform.clamp(min=-1, max=1).view(actual_shape) def phaser( waveform: Tensor, sample_rate: int, gain_in: float = 0.4, gain_out: float = 0.74, delay_ms: float = 3.0, decay: float = 0.4, mod_speed: float = 0.5, sinusoidal: bool = True, ) -> Tensor: r"""Apply a phasing effect to the audio. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) gain_in (float, optional): desired input gain at the boost (or attenuation) in dB Allowed range of values are 0 to 1 gain_out (float, optional): desired output gain at the boost (or attenuation) in dB Allowed range of values are 0 to 1e9 delay_ms (float, optional): desired delay in milliseconds Allowed range of values are 0 to 5.0 decay (float, optional): desired decay relative to gain-in Allowed range of values are 0 to 0.99 mod_speed (float, optional): modulation speed in Hz Allowed range of values are 0.1 to 2 sinusoidal (bool, optional): If ``True``, uses sinusoidal modulation (preferable for multiple instruments) If ``False``, uses triangular modulation (gives single instruments a sharper phasing effect) (Default: ``True``) Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html - Scott Lehman, `Effects Explained`_. .. _Effects Explained: https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html """ actual_shape = waveform.shape device, dtype = waveform.device, waveform.dtype # convert to 2D (channels,time) waveform = waveform.view(-1, actual_shape[-1]) delay_buf_len = int((delay_ms * 0.001 * sample_rate) + 0.5) delay_buf = torch.zeros( waveform.shape[0], delay_buf_len, dtype=dtype, device=device ) mod_buf_len = int(sample_rate / mod_speed + 0.5) if sinusoidal: wave_type = "SINE" else: wave_type = "TRIANGLE" mod_buf = _generate_wave_table( wave_type=wave_type, data_type="INT", table_size=mod_buf_len, min=1.0, max=float(delay_buf_len), phase=math.pi / 2, device=device, ) delay_pos = 0 mod_pos = 0 output_waveform_pre_gain_list = [] waveform = waveform * gain_in delay_buf = delay_buf * decay waveform_list = [waveform[:, i] for i in range(waveform.size(1))] delay_buf_list = [delay_buf[:, i] for i in range(delay_buf.size(1))] mod_buf_list = [mod_buf[i] for i in range(mod_buf.size(0))] for i in range(waveform.shape[-1]): idx = int((delay_pos + mod_buf_list[mod_pos]) % delay_buf_len) mod_pos = (mod_pos + 1) % mod_buf_len delay_pos = (delay_pos + 1) % delay_buf_len temp = (waveform_list[i]) + (delay_buf_list[idx]) delay_buf_list[delay_pos] = temp * decay output_waveform_pre_gain_list.append(temp) output_waveform = torch.stack(output_waveform_pre_gain_list, dim=1).to( dtype=dtype, device=device ) output_waveform.mul_(gain_out) return output_waveform.clamp(min=-1, max=1).view(actual_shape) def riaa_biquad(waveform: Tensor, sample_rate: int) -> Tensor: r"""Apply RIAA vinyl playback equalization. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz). Allowed sample rates in Hz : ``44100``,``48000``,``88200``,``96000`` Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF """ if sample_rate == 44100: zeros = [-0.2014898, 0.9233820] poles = [0.7083149, 0.9924091] elif sample_rate == 48000: zeros = [-0.1766069, 0.9321590] poles = [0.7396325, 0.9931330] elif sample_rate == 88200: zeros = [-0.1168735, 0.9648312] poles = [0.8590646, 0.9964002] elif sample_rate == 96000: zeros = [-0.1141486, 0.9676817] poles = [0.8699137, 0.9966946] else: raise ValueError("Sample rate must be 44.1k, 48k, 88.2k, or 96k") # polynomial coefficients with roots zeros[0] and zeros[1] b0 = 1.0 b1 = -(zeros[0] + zeros[1]) b2 = zeros[0] * zeros[1] # polynomial coefficients with roots poles[0] and poles[1] a0 = 1.0 a1 = -(poles[0] + poles[1]) a2 = poles[0] * poles[1] # Normalize to 0dB at 1kHz y = 2 * math.pi * 1000 / sample_rate b_re = b0 + b1 * math.cos(-y) + b2 * math.cos(-2 * y) a_re = a0 + a1 * math.cos(-y) + a2 * math.cos(-2 * y) b_im = b1 * math.sin(-y) + b2 * math.sin(-2 * y) a_im = a1 * math.sin(-y) + a2 * math.sin(-2 * y) g = 1 / math.sqrt((b_re ** 2 + b_im ** 2) / (a_re ** 2 + a_im ** 2)) b0 *= g b1 *= g b2 *= g return biquad(waveform, b0, b1, b2, a0, a1, a2) def treble_biquad( waveform: Tensor, sample_rate: int, gain: float, central_freq: float = 3000, Q: float = 0.707, ) -> Tensor: r"""Design a treble tone-control effect. Similar to SoX implementation. Args: waveform (Tensor): audio waveform of dimension of `(..., time)` sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB. central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``3000``) Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``). Returns: Tensor: Waveform of dimension of `(..., time)` Reference: - http://sox.sourceforge.net/sox.html - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF """ dtype = waveform.dtype device = waveform.device central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) Q = torch.as_tensor(Q, dtype=dtype, device=device) gain = torch.as_tensor(gain, dtype=dtype, device=device) w0 = 2 * math.pi * central_freq / sample_rate alpha = torch.sin(w0) / 2 / Q A = torch.exp(gain / 40 * math.log(10)) temp1 = 2 * torch.sqrt(A) * alpha temp2 = (A - 1) * torch.cos(w0) temp3 = (A + 1) * torch.cos(w0) b0 = A * ((A + 1) + temp2 + temp1) b1 = -2 * A * ((A - 1) + temp3) b2 = A * ((A + 1) + temp2 - temp1) a0 = (A + 1) - temp2 + temp1 a1 = 2 * ((A - 1) - temp3) a2 = (A + 1) - temp2 - temp1 return biquad(waveform, b0, b1, b2, a0, a1, a2) def _measure( measure_len_ws: int, samples: Tensor, spectrum: Tensor, noise_spectrum: Tensor, spectrum_window: Tensor, spectrum_start: int, spectrum_end: int, cepstrum_window: Tensor, cepstrum_start: int, cepstrum_end: int, noise_reduction_amount: float, measure_smooth_time_mult: float, noise_up_time_mult: float, noise_down_time_mult: float, index_ns: int, boot_count: int, ) -> float: assert spectrum.size()[-1] == noise_spectrum.size()[-1] samplesLen_ns = samples.size()[-1] dft_len_ws = spectrum.size()[-1] dftBuf = torch.zeros(dft_len_ws) _index_ns = torch.tensor( [index_ns] + [(index_ns + i) % samplesLen_ns for i in range(1, measure_len_ws)] ) dftBuf[:measure_len_ws] = samples[_index_ns] * spectrum_window[:measure_len_ws] # memset(c->dftBuf + i, 0, (p->dft_len_ws - i) * sizeof(*c->dftBuf)); dftBuf[measure_len_ws:dft_len_ws].zero_() # lsx_safe_rdft((int)p->dft_len_ws, 1, c->dftBuf); _dftBuf = torch.fft.rfft(dftBuf) # memset(c->dftBuf, 0, p->spectrum_start * sizeof(*c->dftBuf)); _dftBuf[:spectrum_start].zero_() mult: float = ( boot_count / (1.0 + boot_count) if boot_count >= 0 else measure_smooth_time_mult ) _d = _dftBuf[spectrum_start:spectrum_end].abs() spectrum[spectrum_start:spectrum_end].mul_(mult).add_(_d * (1 - mult)) _d = spectrum[spectrum_start:spectrum_end] ** 2 _zeros = torch.zeros(spectrum_end - spectrum_start) _mult = ( _zeros if boot_count >= 0 else torch.where( _d > noise_spectrum[spectrum_start:spectrum_end], torch.tensor(noise_up_time_mult), # if torch.tensor(noise_down_time_mult), # else ) ) noise_spectrum[spectrum_start:spectrum_end].mul_(_mult).add_(_d * (1 - _mult)) _d = torch.sqrt( torch.max( _zeros, _d - noise_reduction_amount * noise_spectrum[spectrum_start:spectrum_end], ) ) _cepstrum_Buf: Tensor = torch.zeros(dft_len_ws >> 1) _cepstrum_Buf[spectrum_start:spectrum_end] = _d * cepstrum_window _cepstrum_Buf[spectrum_end:dft_len_ws >> 1].zero_() # lsx_safe_rdft((int)p->dft_len_ws >> 1, 1, c->dftBuf); _cepstrum_Buf = torch.fft.rfft(_cepstrum_Buf) result: float = float( torch.sum(_cepstrum_Buf[cepstrum_start:cepstrum_end].abs().pow(2)) ) result = ( math.log(result / (cepstrum_end - cepstrum_start)) if result > 0 else -math.inf ) return max(0, 21 + result) def vad( waveform: Tensor, sample_rate: int, trigger_level: float = 7.0, trigger_time: float = 0.25, search_time: float = 1.0, allowed_gap: float = 0.25, pre_trigger_time: float = 0.0, # Fine-tuning parameters boot_time: float = 0.35, noise_up_time: float = 0.1, noise_down_time: float = 0.01, noise_reduction_amount: float = 1.35, measure_freq: float = 20.0, measure_duration: Optional[float] = None, measure_smooth_time: float = 0.4, hp_filter_freq: float = 50.0, lp_filter_freq: float = 6000.0, hp_lifter_freq: float = 150.0, lp_lifter_freq: float = 2000.0, ) -> Tensor: r"""Voice Activity Detector. Similar to SoX implementation. Attempts to trim silence and quiet background sounds from the ends of recordings of speech. The algorithm currently uses a simple cepstral power measurement to detect voice, so may be fooled by other things, especially music. The effect can trim only from the front of the audio, so in order to trim from the back, the reverse effect must also be used. Args: waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)` Tensor of shape `(channels, time)` is treated as a multi-channel recording of the same event and the resulting output will be trimmed to the earliest voice activity in any channel. sample_rate (int): Sample rate of audio signal. trigger_level (float, optional): The measurement level used to trigger activity detection. This may need to be cahnged depending on the noise level, signal level, and other characteristics of the input audio. (Default: 7.0) trigger_time (float, optional): The time constant (in seconds) used to help ignore short bursts of sound. (Default: 0.25) search_time (float, optional): The amount of audio (in seconds) to search for quieter/shorter bursts of audio to include prior to the detected trigger point. (Default: 1.0) allowed_gap (float, optional): The allowed gap (in seconds) between quieter/shorter bursts of audio to include prior to the detected trigger point. (Default: 0.25) pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve before the trigger point and any found quieter/shorter bursts. (Default: 0.0) boot_time (float, optional) The algorithm (internally) uses adaptive noise estimation/reduction in order to detect the start of the wanted audio. This option sets the time for the initial noise estimate. (Default: 0.35) noise_up_time (float, optional) Time constant used by the adaptive noise estimator for when the noise level is increasing. (Default: 0.1) noise_down_time (float, optional) Time constant used by the adaptive noise estimator for when the noise level is decreasing. (Default: 0.01) noise_reduction_amount (float, optional) Amount of noise reduction to use in the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35) measure_freq (float, optional) Frequency of the algorithm’s processing/measurements. (Default: 20.0) measure_duration: (float, optional) Measurement duration. (Default: Twice the measurement period; i.e. with overlap.) measure_smooth_time (float, optional) Time constant used to smooth spectral measurements. (Default: 0.4) hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied at the input to the detector algorithm. (Default: 50.0) lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied at the input to the detector algorithm. (Default: 6000.0) hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used in the detector algorithm. (Default: 150.0) lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used in the detector algorithm. (Default: 2000.0) Returns: Tensor: Tensor of audio of dimension `(..., time)`. Reference: - http://sox.sourceforge.net/sox.html """ if waveform.ndim > 2: warnings.warn( "Expected input tensor dimension of 1 for single channel" f" or 2 for multi-channel. Got {waveform.ndim} instead. " "Batch semantics is not supported. " "Please refer to https://github.com/pytorch/audio/issues/1348" " and https://github.com/pytorch/audio/issues/1468." ) measure_duration: float = ( 2.0 / measure_freq if measure_duration is None else measure_duration ) measure_len_ws = int(sample_rate * measure_duration + 0.5) measure_len_ns = measure_len_ws # for (dft_len_ws = 16; dft_len_ws < measure_len_ws; dft_len_ws <<= 1); dft_len_ws = 16 while dft_len_ws < measure_len_ws: dft_len_ws *= 2 measure_period_ns = int(sample_rate / measure_freq + 0.5) measures_len = math.ceil(search_time * measure_freq) search_pre_trigger_len_ns = measures_len * measure_period_ns gap_len = int(allowed_gap * measure_freq + 0.5) fixed_pre_trigger_len_ns = int(pre_trigger_time * sample_rate + 0.5) samplesLen_ns = ( fixed_pre_trigger_len_ns + search_pre_trigger_len_ns + measure_len_ns ) spectrum_window = torch.zeros(measure_len_ws) for i in range(measure_len_ws): # sox.h:741 define SOX_SAMPLE_MIN (sox_sample_t)SOX_INT_MIN(32) spectrum_window[i] = 2.0 / math.sqrt(float(measure_len_ws)) # lsx_apply_hann(spectrum_window, (int)measure_len_ws); spectrum_window *= torch.hann_window(measure_len_ws, dtype=torch.float) spectrum_start: int = int(hp_filter_freq / sample_rate * dft_len_ws + 0.5) spectrum_start: int = max(spectrum_start, 1) spectrum_end: int = int(lp_filter_freq / sample_rate * dft_len_ws + 0.5) spectrum_end: int = min(spectrum_end, dft_len_ws // 2) cepstrum_window = torch.zeros(spectrum_end - spectrum_start) for i in range(spectrum_end - spectrum_start): cepstrum_window[i] = 2.0 / math.sqrt(float(spectrum_end) - spectrum_start) # lsx_apply_hann(cepstrum_window,(int)(spectrum_end - spectrum_start)); cepstrum_window *= torch.hann_window( spectrum_end - spectrum_start, dtype=torch.float ) cepstrum_start = math.ceil(sample_rate * 0.5 / lp_lifter_freq) cepstrum_end = math.floor(sample_rate * 0.5 / hp_lifter_freq) cepstrum_end = min(cepstrum_end, dft_len_ws // 4) assert cepstrum_end > cepstrum_start noise_up_time_mult = math.exp(-1.0 / (noise_up_time * measure_freq)) noise_down_time_mult = math.exp(-1.0 / (noise_down_time * measure_freq)) measure_smooth_time_mult = math.exp(-1.0 / (measure_smooth_time * measure_freq)) trigger_meas_time_mult = math.exp(-1.0 / (trigger_time * measure_freq)) boot_count_max = int(boot_time * measure_freq - 0.5) measure_timer_ns = measure_len_ns boot_count = measures_index = flushedLen_ns = samplesIndex_ns = 0 # pack batch shape = waveform.size() waveform = waveform.view(-1, shape[-1]) n_channels, ilen = waveform.size() mean_meas = torch.zeros(n_channels) samples = torch.zeros(n_channels, samplesLen_ns) spectrum = torch.zeros(n_channels, dft_len_ws) noise_spectrum = torch.zeros(n_channels, dft_len_ws) measures = torch.zeros(n_channels, measures_len) has_triggered: bool = False num_measures_to_flush: int = 0 pos: int = 0 while pos < ilen and not has_triggered: measure_timer_ns -= 1 for i in range(n_channels): samples[i, samplesIndex_ns] = waveform[i, pos] # if (!p->measure_timer_ns) { if measure_timer_ns == 0: index_ns: int = ( samplesIndex_ns + samplesLen_ns - measure_len_ns ) % samplesLen_ns meas: float = _measure( measure_len_ws=measure_len_ws, samples=samples[i], spectrum=spectrum[i], noise_spectrum=noise_spectrum[i], spectrum_window=spectrum_window, spectrum_start=spectrum_start, spectrum_end=spectrum_end, cepstrum_window=cepstrum_window, cepstrum_start=cepstrum_start, cepstrum_end=cepstrum_end, noise_reduction_amount=noise_reduction_amount, measure_smooth_time_mult=measure_smooth_time_mult, noise_up_time_mult=noise_up_time_mult, noise_down_time_mult=noise_down_time_mult, index_ns=index_ns, boot_count=boot_count, ) measures[i, measures_index] = meas mean_meas[i] = mean_meas[i] * trigger_meas_time_mult + meas * ( 1.0 - trigger_meas_time_mult ) has_triggered = has_triggered or (mean_meas[i] >= trigger_level) if has_triggered: n: int = measures_len k: int = measures_index jTrigger: int = n jZero: int = n j: int = 0 for j in range(n): if (measures[i, k] >= trigger_level) and ( j <= jTrigger + gap_len ): jZero = jTrigger = j elif (measures[i, k] == 0) and (jTrigger >= jZero): jZero = j k = (k + n - 1) % n j = min(j, jZero) # num_measures_to_flush = range_limit(j, num_measures_to_flush, n); num_measures_to_flush = min(max(num_measures_to_flush, j), n) # end if has_triggered # end if (measure_timer_ns == 0): # end for samplesIndex_ns += 1 pos += 1 # end while if samplesIndex_ns == samplesLen_ns: samplesIndex_ns = 0 if measure_timer_ns == 0: measure_timer_ns = measure_period_ns measures_index += 1 measures_index = measures_index % measures_len if boot_count >= 0: boot_count = -1 if boot_count == boot_count_max else boot_count + 1 if has_triggered: flushedLen_ns = (measures_len - num_measures_to_flush) * measure_period_ns samplesIndex_ns = (samplesIndex_ns + flushedLen_ns) % samplesLen_ns res = waveform[:, pos - samplesLen_ns + flushedLen_ns:] # unpack batch return res.view(shape[:-1] + res.shape[-1:])
from .functional import ( amplitude_to_DB, compute_deltas, compute_kaldi_pitch, create_dct, melscale_fbanks, linear_fbanks, DB_to_amplitude, detect_pitch_frequency, inverse_spectrogram, griffinlim, mask_along_axis, mask_along_axis_iid, mu_law_encoding, mu_law_decoding, phase_vocoder, sliding_window_cmn, spectrogram, spectral_centroid, apply_codec, resample, edit_distance, pitch_shift, rnnt_loss, ) from .filtering import ( allpass_biquad, band_biquad, bandpass_biquad, bandreject_biquad, bass_biquad, biquad, contrast, dither, dcshift, deemph_biquad, equalizer_biquad, filtfilt, flanger, gain, highpass_biquad, lfilter, lowpass_biquad, overdrive, phaser, riaa_biquad, treble_biquad, vad, ) __all__ = [ 'amplitude_to_DB', 'compute_deltas', 'compute_kaldi_pitch', 'create_dct', 'melscale_fbanks', 'linear_fbanks', 'DB_to_amplitude', 'detect_pitch_frequency', 'griffinlim', 'mask_along_axis', 'mask_along_axis_iid', 'mu_law_encoding', 'mu_law_decoding', 'phase_vocoder', 'sliding_window_cmn', 'spectrogram', 'inverse_spectrogram', 'spectral_centroid', 'allpass_biquad', 'band_biquad', 'bandpass_biquad', 'bandreject_biquad', 'bass_biquad', 'biquad', 'contrast', 'dither', 'dcshift', 'deemph_biquad', 'equalizer_biquad', 'filtfilt', 'flanger', 'gain', 'highpass_biquad', 'lfilter', 'lowpass_biquad', 'overdrive', 'phaser', 'riaa_biquad', 'treble_biquad', 'vad', 'apply_codec', 'resample', 'edit_distance', 'pitch_shift', 'rnnt_loss', ]
# -*- coding: utf-8 -*- from collections.abc import Sequence import io import math import warnings from typing import Optional, Tuple import torch from torch import Tensor from torchaudio._internal import module_utils as _mod_utils import torchaudio __all__ = [ "spectrogram", "inverse_spectrogram", "griffinlim", "amplitude_to_DB", "DB_to_amplitude", "compute_deltas", "compute_kaldi_pitch", "melscale_fbanks", "linear_fbanks", "create_dct", "compute_deltas", "detect_pitch_frequency", "DB_to_amplitude", "mu_law_encoding", "mu_law_decoding", "phase_vocoder", 'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn', "spectral_centroid", "apply_codec", "resample", "edit_distance", "pitch_shift", "rnnt_loss", ] def spectrogram( waveform: Tensor, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int, power: Optional[float], normalized: bool, center: bool = True, pad_mode: str = "reflect", onesided: bool = True, return_complex: Optional[bool] = None, ) -> Tensor: r"""Create a spectrogram or a batch of spectrograms from a raw audio signal. The spectrogram can be either magnitude-only or complex. Args: waveform (Tensor): Tensor of audio of dimension `(..., time)` pad (int): Two sided padding of signal window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length (int): Length of hop between STFT windows win_length (int): Window size power (float or None): Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. If None, then the complex spectrum is returned instead. normalized (bool): Whether to normalize by magnitude after stft center (bool, optional): whether to pad :attr:`waveform` on both sides so that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. Default: ``True`` pad_mode (string, optional): controls the padding method used when :attr:`center` is ``True``. Default: ``"reflect"`` onesided (bool, optional): controls whether to return half of results to avoid redundancy. Default: ``True`` return_complex (bool, optional): Deprecated and not used. Returns: Tensor: Dimension `(..., freq, time)`, freq is ``n_fft // 2 + 1`` and ``n_fft`` is the number of Fourier bins, and time is the number of window hops (n_frame). """ if return_complex is not None: warnings.warn( "`return_complex` argument is now deprecated and is not effective." "`torchaudio.functional.spectrogram(power=None)` always returns a tensor with " "complex dtype. Please remove the argument in the function call." ) if pad > 0: # TODO add "with torch.no_grad():" back when JIT supports it waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant") # pack batch shape = waveform.size() waveform = waveform.reshape(-1, shape[-1]) # default values are consistent with librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, ) # unpack batch spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized: spec_f /= window.pow(2.).sum().sqrt() if power is not None: if power == 1.0: return spec_f.abs() return spec_f.abs().pow(power) return spec_f def inverse_spectrogram( spectrogram: Tensor, length: Optional[int], pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int, normalized: bool, center: bool = True, pad_mode: str = "reflect", onesided: bool = True, ) -> Tensor: r"""Create an inverse spectrogram or a batch of inverse spectrograms from the provided complex-valued spectrogram. Args: spectrogram (Tensor): Complex tensor of audio of dimension (..., freq, time). length (int or None): The output length of the waveform. pad (int): Two sided padding of signal. It is only effective when ``length`` is provided. window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length (int): Length of hop between STFT windows win_length (int): Window size normalized (bool): Whether the stft output was normalized by magnitude center (bool, optional): whether the waveform was padded on both sides so that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. Default: ``True`` pad_mode (string, optional): controls the padding method used when :attr:`center` is ``True``. This parameter is provided for compatibility with the spectrogram function and is not used. Default: ``"reflect"`` onesided (bool, optional): controls whether spectrogram was done in onesided mode. Default: ``True`` Returns: Tensor: Dimension `(..., time)`. Least squares estimation of the original signal. """ if not spectrogram.is_complex(): raise ValueError("Expected `spectrogram` to be complex dtype.") if normalized: spectrogram = spectrogram * window.pow(2.).sum().sqrt() # pack batch shape = spectrogram.size() spectrogram = spectrogram.reshape(-1, shape[-2], shape[-1]) # default values are consistent with librosa.core.spectrum._spectrogram waveform = torch.istft( input=spectrogram, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, normalized=False, onesided=onesided, length=length + 2 * pad if length is not None else None, return_complex=False, ) if length is not None and pad > 0: # remove padding from front and back waveform = waveform[:, pad:-pad] # unpack batch waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) return waveform def _get_complex_dtype(real_dtype: torch.dtype): if real_dtype == torch.double: return torch.cdouble if real_dtype == torch.float: return torch.cfloat if real_dtype == torch.half: return torch.complex32 raise ValueError(f'Unexpected dtype {real_dtype}') def griffinlim( specgram: Tensor, window: Tensor, n_fft: int, hop_length: int, win_length: int, power: float, n_iter: int, momentum: float, length: Optional[int], rand_init: bool ) -> Tensor: r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation. Implementation ported from *librosa* [:footcite:`brian_mcfee-proc-scipy-2015`], *A fast Griffin-Lim algorithm* [:footcite:`6701851`] and *Signal estimation from modified short-time Fourier transform* [:footcite:`1172092`]. Args: specgram (Tensor): A magnitude-only STFT spectrogram of dimension `(..., freq, frames)` where freq is ``n_fft // 2 + 1``. window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins hop_length (int): Length of hop between STFT windows. ( Default: ``win_length // 2``) win_length (int): Window size. (Default: ``n_fft``) power (float): Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. n_iter (int): Number of iteration for phase recovery process. momentum (float): The momentum parameter for fast Griffin-Lim. Setting this to 0 recovers the original Griffin-Lim method. Values near 1 can lead to faster convergence, but above 1 may not converge. length (int or None): Array length of the expected output. rand_init (bool): Initializes phase randomly if True, to zero otherwise. Returns: Tensor: waveform of `(..., time)`, where time equals the ``length`` parameter if given. """ assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum) assert momentum >= 0, 'momentum={} < 0'.format(momentum) # pack batch shape = specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) specgram = specgram.pow(1 / power) # initialize the phase if rand_init: angles = torch.rand( specgram.size(), dtype=_get_complex_dtype(specgram.dtype), device=specgram.device) else: angles = torch.full( specgram.size(), 1, dtype=_get_complex_dtype(specgram.dtype), device=specgram.device) # And initialize the previous iterate to 0 tprev = torch.tensor(0., dtype=specgram.dtype, device=specgram.device) for _ in range(n_iter): # Invert with our current estimate of the phases inverse = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length) # Rebuild the spectrogram rebuilt = torch.stft( input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=True, pad_mode='reflect', normalized=False, onesided=True, return_complex=True, ) # Update our phase estimates angles = rebuilt if momentum: angles = angles - tprev.mul_(momentum / (1 + momentum)) angles = angles.div(angles.abs().add(1e-16)) # Store the previous iterate tprev = rebuilt # Return the final phase estimates waveform = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length) # unpack batch waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) return waveform def amplitude_to_DB( x: Tensor, multiplier: float, amin: float, db_multiplier: float, top_db: Optional[float] = None ) -> Tensor: r"""Turn a spectrogram from the power/amplitude scale to the decibel scale. The output of each tensor in a batch depends on the maximum value of that tensor, and so may return different values for an audio clip split into snippets vs. a full clip. Args: x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take the form `(..., freq, time)`. Batched inputs should include a channel dimension and have the form `(batch, channel, freq, time)`. multiplier (float): Use 10. for power and 20. for amplitude amin (float): Number to clamp ``x`` db_multiplier (float): Log10(max(reference value and amin)) top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number is 80. (Default: ``None``) Returns: Tensor: Output tensor in decibel scale """ x_db = multiplier * torch.log10(torch.clamp(x, min=amin)) x_db -= multiplier * db_multiplier if top_db is not None: # Expand batch shape = x_db.size() packed_channels = shape[-3] if x_db.dim() > 2 else 1 x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1)) # Repack batch x_db = x_db.reshape(shape) return x_db def DB_to_amplitude( x: Tensor, ref: float, power: float ) -> Tensor: r"""Turn a tensor from the decibel scale to the power/amplitude scale. Args: x (Tensor): Input tensor before being converted to power/amplitude scale. ref (float): Reference which the output will be scaled by. power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude. Returns: Tensor: Output tensor in power/amplitude scale. """ return ref * torch.pow(torch.pow(10.0, 0.1 * x), power) def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float: r"""Convert Hz to Mels. Args: freqs (float): Frequencies in Hz mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: mels (float): Frequency in Mels """ if mel_scale not in ['slaney', 'htk']: raise ValueError('mel_scale should be one of "htk" or "slaney".') if mel_scale == "htk": return 2595.0 * math.log10(1.0 + (freq / 700.0)) # Fill in the linear part f_min = 0.0 f_sp = 200.0 / 3 mels = (freq - f_min) / f_sp # Fill in the log-scale part min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min) / f_sp logstep = math.log(6.4) / 27.0 if freq >= min_log_hz: mels = min_log_mel + math.log(freq / min_log_hz) / logstep return mels def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor: """Convert mel bin numbers to frequencies. Args: mels (Tensor): Mel frequencies mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: freqs (Tensor): Mels converted in Hz """ if mel_scale not in ['slaney', 'htk']: raise ValueError('mel_scale should be one of "htk" or "slaney".') if mel_scale == "htk": return 700.0 * (10.0**(mels / 2595.0) - 1.0) # Fill in the linear scale f_min = 0.0 f_sp = 200.0 / 3 freqs = f_min + f_sp * mels # And now the nonlinear scale min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min) / f_sp logstep = math.log(6.4) / 27.0 log_t = (mels >= min_log_mel) freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel)) return freqs def _create_triangular_filterbank( all_freqs: Tensor, f_pts: Tensor, ) -> Tensor: """Create a triangular filter bank. Args: all_freqs (Tensor): STFT freq points of size (`n_freqs`). f_pts (Tensor): Filter mid points of size (`n_filter`). Returns: fb (Tensor): The filter bank of size (`n_freqs`, `n_filter`). """ # Adopted from Librosa # calculate the difference between each filter mid point and each stft freq point in hertz f_diff = f_pts[1:] - f_pts[:-1] # (n_filter + 1) slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_filter + 2) # create overlapping triangles zero = torch.zeros(1) down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_filter) up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_filter) fb = torch.max(zero, torch.min(down_slopes, up_slopes)) return fb def melscale_fbanks( n_freqs: int, f_min: float, f_max: float, n_mels: int, sample_rate: int, norm: Optional[str] = None, mel_scale: str = "htk", ) -> Tensor: r"""Create a frequency bin conversion matrix. Note: For the sake of the numerical compatibility with librosa, not all the coefficients in the resulting filter bank has magnitude of 1. .. image:: https://download.pytorch.org/torchaudio/doc-assets/mel_fbanks.png :alt: Visualization of generated filter bank Args: n_freqs (int): Number of frequencies to highlight/apply f_min (float): Minimum frequency (Hz) f_max (float): Maximum frequency (Hz) n_mels (int): Number of mel filterbanks sample_rate (int): Sample rate of the audio waveform norm (str or None, optional): If 'slaney', divide the triangular mel weights by the width of the mel band (area normalization). (Default: ``None``) mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``) meaning number of frequencies to highlight/apply to x the number of filterbanks. Each column is a filterbank so that assuming there is a matrix A of size (..., ``n_freqs``), the applied result would be ``A * melscale_fbanks(A.size(-1), ...)``. """ if norm is not None and norm != "slaney": raise ValueError("norm must be one of None or 'slaney'") # freq bins all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) # calculate mel freq bins m_min = _hz_to_mel(f_min, mel_scale=mel_scale) m_max = _hz_to_mel(f_max, mel_scale=mel_scale) m_pts = torch.linspace(m_min, m_max, n_mels + 2) f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) # create filterbank fb = _create_triangular_filterbank(all_freqs, f_pts) if norm is not None and norm == "slaney": # Slaney-style mel is scaled to be approx constant energy per channel enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels]) fb *= enorm.unsqueeze(0) if (fb.max(dim=0).values == 0.).any(): warnings.warn( "At least one mel filterbank has all zero values. " f"The value for `n_mels` ({n_mels}) may be set too high. " f"Or, the value for `n_freqs` ({n_freqs}) may be set too low." ) return fb def linear_fbanks( n_freqs: int, f_min: float, f_max: float, n_filter: int, sample_rate: int, ) -> Tensor: r"""Creates a linear triangular filterbank. Note: For the sake of the numerical compatibility with librosa, not all the coefficients in the resulting filter bank has magnitude of 1. .. image:: https://download.pytorch.org/torchaudio/doc-assets/lin_fbanks.png :alt: Visualization of generated filter bank Args: n_freqs (int): Number of frequencies to highlight/apply f_min (float): Minimum frequency (Hz) f_max (float): Maximum frequency (Hz) n_filter (int): Number of (linear) triangular filter sample_rate (int): Sample rate of the audio waveform Returns: Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_filter``) meaning number of frequencies to highlight/apply to x the number of filterbanks. Each column is a filterbank so that assuming there is a matrix A of size (..., ``n_freqs``), the applied result would be ``A * linear_fbanks(A.size(-1), ...)``. """ # freq bins all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) # filter mid-points f_pts = torch.linspace(f_min, f_max, n_filter + 2) # create filterbank fb = _create_triangular_filterbank(all_freqs, f_pts) return fb def create_dct( n_mfcc: int, n_mels: int, norm: Optional[str] ) -> Tensor: r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``), normalized depending on norm. Args: n_mfcc (int): Number of mfc coefficients to retain n_mels (int): Number of mel filterbanks norm (str or None): Norm to use (either 'ortho' or None) Returns: Tensor: The transformation matrix, to be right-multiplied to row-wise data of size (``n_mels``, ``n_mfcc``). """ # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n = torch.arange(float(n_mels)) k = torch.arange(float(n_mfcc)).unsqueeze(1) dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels) if norm is None: dct *= 2.0 else: assert norm == "ortho" dct[0] *= 1.0 / math.sqrt(2.0) dct *= math.sqrt(2.0 / float(n_mels)) return dct.t() def mu_law_encoding( x: Tensor, quantization_channels: int ) -> Tensor: r"""Encode signal based on mu-law companding. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal has been scaled to between -1 and 1 and returns a signal encoded with values from 0 to quantization_channels - 1. Args: x (Tensor): Input tensor quantization_channels (int): Number of channels Returns: Tensor: Input after mu-law encoding """ mu = quantization_channels - 1.0 if not x.is_floating_point(): x = x.to(torch.float) mu = torch.tensor(mu, dtype=x.dtype) x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64) return x_mu def mu_law_decoding( x_mu: Tensor, quantization_channels: int ) -> Tensor: r"""Decode mu-law encoded signal. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with values between 0 and quantization_channels - 1 and returns a signal scaled between -1 and 1. Args: x_mu (Tensor): Input tensor quantization_channels (int): Number of channels Returns: Tensor: Input after mu-law decoding """ mu = quantization_channels - 1.0 if not x_mu.is_floating_point(): x_mu = x_mu.to(torch.float) mu = torch.tensor(mu, dtype=x_mu.dtype) x = ((x_mu) / mu) * 2 - 1.0 x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu return x def phase_vocoder( complex_specgrams: Tensor, rate: float, phase_advance: Tensor ) -> Tensor: r"""Given a STFT tensor, speed up in time without modifying pitch by a factor of ``rate``. Args: complex_specgrams (Tensor): A tensor of dimension `(..., freq, num_frame)` with complex dtype. rate (float): Speed-up factor phase_advance (Tensor): Expected phase advance in each bin. Dimension of `(freq, 1)` Returns: Tensor: Stretched spectrogram. The resulting tensor is of the same dtype as the input spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``. Example >>> freq, hop_length = 1025, 512 >>> # (channel, freq, time) >>> complex_specgrams = torch.randn(2, freq, 300, dtype=torch.cfloat) >>> rate = 1.3 # Speed up by 30% >>> phase_advance = torch.linspace( >>> 0, math.pi * hop_length, freq)[..., None] >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape # with 231 == ceil(300 / 1.3) torch.Size([2, 1025, 231]) """ if rate == 1.0: return complex_specgrams # pack batch shape = complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-2:])) # Figures out the corresponding real dtype, i.e. complex128 -> float64, complex64 -> float32 # Note torch.real is a view so it does not incur any memory copy. real_dtype = torch.real(complex_specgrams).dtype time_steps = torch.arange( 0, complex_specgrams.size(-1), rate, device=complex_specgrams.device, dtype=real_dtype) alphas = time_steps % 1.0 phase_0 = complex_specgrams[..., :1].angle() # Time Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 2]) # (new_bins, freq, 2) complex_specgrams_0 = complex_specgrams.index_select(-1, time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-1, (time_steps + 1).long()) angle_0 = complex_specgrams_0.angle() angle_1 = complex_specgrams_1.angle() norm_0 = complex_specgrams_0.abs() norm_1 = complex_specgrams_1.abs() phase = angle_1 - angle_0 - phase_advance phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi)) # Compute Phase Accum phase = phase + phase_advance phase = torch.cat([phase_0, phase[..., :-1]], dim=-1) phase_acc = torch.cumsum(phase, -1) mag = alphas * norm_1 + (1 - alphas) * norm_0 complex_specgrams_stretch = torch.polar(mag, phase_acc) # unpack batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-2] + complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch def mask_along_axis_iid( specgrams: Tensor, mask_param: int, mask_value: float, axis: int ) -> Tensor: r""" Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. Args: specgrams (Tensor): Real spectrograms `(batch, channel, freq, time)` mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param] mask_value (float): Value to assign to the masked columns axis (int): Axis to apply masking on (2 -> frequency, 3 -> time) Returns: Tensor: Masked spectrograms of dimensions `(batch, channel, freq, time)` """ if axis not in [2, 3]: raise ValueError('Only Frequency and Time masking are supported') device = specgrams.device dtype = specgrams.dtype value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value) # Create broadcastable mask mask_start = min_value[..., None, None] mask_end = (min_value + value)[..., None, None] mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) # Per batch example masking specgrams = specgrams.transpose(axis, -1) specgrams = specgrams.masked_fill((mask >= mask_start) & (mask < mask_end), mask_value) specgrams = specgrams.transpose(axis, -1) return specgrams def mask_along_axis( specgram: Tensor, mask_param: int, mask_value: float, axis: int ) -> Tensor: r""" Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. All examples will have the same mask interval. Args: specgram (Tensor): Real spectrogram `(channel, freq, time)` mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param] mask_value (float): Value to assign to the masked columns axis (int): Axis to apply masking on (1 -> frequency, 2 -> time) Returns: Tensor: Masked spectrogram of dimensions `(channel, freq, time)` """ if axis not in [1, 2]: raise ValueError('Only Frequency and Time masking are supported') # pack batch shape = specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) value = torch.rand(1) * mask_param min_value = torch.rand(1) * (specgram.size(axis) - value) mask_start = (min_value.long()).squeeze() mask_end = (min_value.long() + value.long()).squeeze() mask = torch.arange(0, specgram.shape[axis], device=specgram.device, dtype=specgram.dtype) mask = (mask >= mask_start) & (mask < mask_end) if axis == 1: mask = mask.unsqueeze(-1) assert mask_end - mask_start < mask_param specgram = specgram.masked_fill(mask, mask_value) # unpack batch specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:]) return specgram def compute_deltas( specgram: Tensor, win_length: int = 5, mode: str = "replicate" ) -> Tensor: r"""Compute delta coefficients of a tensor, usually a spectrogram: .. math:: d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2} where :math:`d_t` is the deltas at time :math:`t`, :math:`c_t` is the spectrogram coeffcients at time :math:`t`, :math:`N` is ``(win_length-1)//2``. Args: specgram (Tensor): Tensor of audio of dimension `(..., freq, time)` win_length (int, optional): The window length used for computing delta (Default: ``5``) mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``) Returns: Tensor: Tensor of deltas of dimension `(..., freq, time)` Example >>> specgram = torch.randn(1, 40, 1000) >>> delta = compute_deltas(specgram) >>> delta2 = compute_deltas(delta) """ device = specgram.device dtype = specgram.dtype # pack batch shape = specgram.size() specgram = specgram.reshape(1, -1, shape[-1]) assert win_length >= 3 n = (win_length - 1) // 2 # twice sum of integer squared denom = n * (n + 1) * (2 * n + 1) / 3 specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode) kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom # unpack batch output = output.reshape(shape) return output def _compute_nccf( waveform: Tensor, sample_rate: int, frame_time: float, freq_low: int ) -> Tensor: r""" Compute Normalized Cross-Correlation Function (NCCF). .. math:: \phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}}, where :math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`, :math:`w` is the waveform, :math:`N` is the length of a frame, :math:`b_i` is the beginning of frame :math:`i`, :math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`. """ EPSILON = 10 ** (-9) # Number of lags to check lags = int(math.ceil(sample_rate / freq_low)) frame_size = int(math.ceil(sample_rate * frame_time)) waveform_length = waveform.size()[-1] num_of_frames = int(math.ceil(waveform_length / frame_size)) p = lags + num_of_frames * frame_size - waveform_length waveform = torch.nn.functional.pad(waveform, (0, p)) # Compute lags output_lag = [] for lag in range(1, lags + 1): s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] output_frames = ( (s1 * s2).sum(-1) / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2) / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1)) nccf = torch.cat(output_lag, -1) return nccf def _combine_max( a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor], thresh: float = 0.99 ) -> Tuple[Tensor, Tensor]: """ Take value from first if bigger than a multiplicative factor of the second, elementwise. """ mask = (a[0] > thresh * b[0]) values = mask * a[0] + ~mask * b[0] indices = mask * a[1] + ~mask * b[1] return values, indices def _find_max_per_frame( nccf: Tensor, sample_rate: int, freq_high: int ) -> Tensor: r""" For each frame, take the highest value of NCCF, apply centered median smoothing, and convert to frequency. Note: If the max among all the lags is very close to the first half of lags, then the latter is taken. """ lag_min = int(math.ceil(sample_rate / freq_high)) # Find near enough max that is smallest best = torch.max(nccf[..., lag_min:], -1) half_size = nccf.shape[-1] // 2 half = torch.max(nccf[..., lag_min:half_size], -1) best = _combine_max(half, best) indices = best[1] # Add back minimal lag indices += lag_min # Add 1 empirical calibration offset indices += 1 return indices def _median_smoothing( indices: Tensor, win_length: int ) -> Tensor: r""" Apply median smoothing to the 1D tensor over the given window. """ # Centered windowed pad_length = (win_length - 1) // 2 # "replicate" padding in any dimension indices = torch.nn.functional.pad( indices, (pad_length, 0), mode="constant", value=0. ) indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1) roll = indices.unfold(-1, win_length, 1) values, _ = torch.median(roll, -1) return values def detect_pitch_frequency( waveform: Tensor, sample_rate: int, frame_time: float = 10 ** (-2), win_length: int = 30, freq_low: int = 85, freq_high: int = 3400, ) -> Tensor: r"""Detect pitch frequency. It is implemented using normalized cross-correlation function and median smoothing. Args: waveform (Tensor): Tensor of audio of dimension `(..., freq, time)` sample_rate (int): The sample rate of the waveform (Hz) frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``). win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``). freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``). freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``). Returns: Tensor: Tensor of freq of dimension `(..., frame)` """ # pack batch shape = list(waveform.size()) waveform = waveform.reshape([-1] + shape[-1:]) nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low) indices = _find_max_per_frame(nccf, sample_rate, freq_high) indices = _median_smoothing(indices, win_length) # Convert indices to frequency EPSILON = 10 ** (-9) freq = sample_rate / (EPSILON + indices.to(torch.float)) # unpack batch freq = freq.reshape(shape[:-1] + list(freq.shape[-1:])) return freq def sliding_window_cmn( specgram: Tensor, cmn_window: int = 600, min_cmn_window: int = 100, center: bool = False, norm_vars: bool = False, ) -> Tensor: r""" Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. Args: specgram (Tensor): Tensor of spectrogram of dimension `(..., time, freq)` cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600) min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start). Only applicable if center == false, ignored if center==true (int, default = 100) center (bool, optional): If true, use a window centered on the current frame (to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false) norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false) Returns: Tensor: Tensor matching input shape `(..., freq, time)` """ input_shape = specgram.shape num_frames, num_feats = input_shape[-2:] specgram = specgram.view(-1, num_frames, num_feats) num_channels = specgram.shape[0] dtype = specgram.dtype device = specgram.device last_window_start = last_window_end = -1 cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cmn_specgram = torch.zeros( num_channels, num_frames, num_feats, dtype=dtype, device=device) for t in range(num_frames): window_start = 0 window_end = 0 if center: window_start = t - cmn_window // 2 window_end = window_start + cmn_window else: window_start = t - cmn_window window_end = t + 1 if window_start < 0: window_end -= window_start window_start = 0 if not center: if window_end > t: window_end = max(t + 1, min_cmn_window) if window_end > num_frames: window_start -= (window_end - num_frames) window_end = num_frames if window_start < 0: window_start = 0 if last_window_start == -1: input_part = specgram[:, window_start: window_end - window_start, :] cur_sum += torch.sum(input_part, 1) if norm_vars: cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :] else: if window_start > last_window_start: frame_to_remove = specgram[:, last_window_start, :] cur_sum -= frame_to_remove if norm_vars: cur_sumsq -= (frame_to_remove ** 2) if window_end > last_window_end: frame_to_add = specgram[:, last_window_end, :] cur_sum += frame_to_add if norm_vars: cur_sumsq += (frame_to_add ** 2) window_frames = window_end - window_start last_window_start = window_start last_window_end = window_end cmn_specgram[:, t, :] = specgram[:, t, :] - cur_sum / window_frames if norm_vars: if window_frames == 1: cmn_specgram[:, t, :] = torch.zeros( num_channels, num_feats, dtype=dtype, device=device) else: variance = cur_sumsq variance = variance / window_frames variance -= ((cur_sum ** 2) / (window_frames ** 2)) variance = torch.pow(variance, -0.5) cmn_specgram[:, t, :] *= variance cmn_specgram = cmn_specgram.view(input_shape[:-2] + (num_frames, num_feats)) if len(input_shape) == 2: cmn_specgram = cmn_specgram.squeeze(0) return cmn_specgram def spectral_centroid( waveform: Tensor, sample_rate: int, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int, ) -> Tensor: r""" Compute the spectral centroid for each channel along the time axis. The spectral centroid is defined as the weighted average of the frequency values, weighted by their magnitude. Args: waveform (Tensor): Tensor of audio of dimension `(..., time)` sample_rate (int): Sample rate of the audio waveform pad (int): Two sided padding of signal window (Tensor): Window tensor that is applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length (int): Length of hop between STFT windows win_length (int): Window size Returns: Tensor: Dimension `(..., time)` """ specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2, device=specgram.device).reshape((-1, 1)) freq_dim = -2 return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def apply_codec( waveform: Tensor, sample_rate: int, format: str, channels_first: bool = True, compression: Optional[float] = None, encoding: Optional[str] = None, bits_per_sample: Optional[int] = None, ) -> Tensor: r""" Apply codecs as a form of augmentation. Args: waveform (Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```. sample_rate (int): Sample rate of the audio waveform. format (str): File format. channels_first (bool, optional): When True, both the input and output Tensor have dimension `(channel, time)`. Otherwise, they have dimension `(time, channel)`. compression (float or None, optional): Used for formats other than WAV. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str or None, optional): Changes the encoding for the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int or None, optional): Changes the bit depth for the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: Tensor: Resulting Tensor. If ``channels_first=True``, it has `(channel, time)` else `(time, channel)`. """ bytes = io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate, channels_first, compression, format, encoding, bits_per_sample ) bytes.seek(0) augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[["rate", f"{sample_rate}"]], channels_first=channels_first, format=format) return augmented @_mod_utils.requires_kaldi() def compute_kaldi_pitch( waveform: torch.Tensor, sample_rate: float, frame_length: float = 25.0, frame_shift: float = 10.0, min_f0: float = 50, max_f0: float = 400, soft_min_f0: float = 10.0, penalty_factor: float = 0.1, lowpass_cutoff: float = 1000, resample_frequency: float = 4000, delta_pitch: float = 0.005, nccf_ballast: float = 7000, lowpass_filter_width: int = 1, upsample_filter_width: int = 5, max_frames_latency: int = 0, frames_per_chunk: int = 0, simulate_first_pass_online: bool = False, recompute_frame: int = 500, snip_edges: bool = True, ) -> torch.Tensor: """Extract pitch based on method described in *A pitch extraction algorithm tuned for automatic speech recognition* [:footcite:`6854049`]. This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi. Args: waveform (Tensor): The input waveform of shape `(..., time)`. sample_rate (float): Sample rate of `waveform`. frame_length (float, optional): Frame length in milliseconds. (default: 25.0) frame_shift (float, optional): Frame shift in milliseconds. (default: 10.0) min_f0 (float, optional): Minimum F0 to search for (Hz) (default: 50.0) max_f0 (float, optional): Maximum F0 to search for (Hz) (default: 400.0) soft_min_f0 (float, optional): Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0) penalty_factor (float, optional): Cost factor for FO change. (default: 0.1) lowpass_cutoff (float, optional): Cutoff frequency for LowPass filter (Hz) (default: 1000) resample_frequency (float, optional): Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff. (default: 4000) delta_pitch( float, optional): Smallest relative change in pitch that our algorithm measures. (default: 0.005) nccf_ballast (float, optional): Increasing this factor reduces NCCF for quiet frames (default: 7000) lowpass_filter_width (int, optional): Integer that determines filter width of lowpass filter, more gives sharper filter. (default: 1) upsample_filter_width (int, optional): Integer that determines filter width when upsampling NCCF. (default: 5) max_frames_latency (int, optional): Maximum number of frames of latency that we allow pitch tracking to introduce into the feature processing (affects output only if ``frames_per_chunk > 0`` and ``simulate_first_pass_online=True``) (default: 0) frames_per_chunk (int, optional): The number of frames used for energy normalization. (default: 0) simulate_first_pass_online (bool, optional): If true, the function will output features that correspond to what an online decoder would see in the first pass of decoding -- not the final version of the features, which is the default. (default: False) Relevant if ``frames_per_chunk > 0``. recompute_frame (int, optional): Only relevant for compatibility with online pitch extraction. A non-critical parameter; the frame at which we recompute some of the forward pointers, after revising our estimate of the signal energy. Relevant if ``frames_per_chunk > 0``. (default: 500) snip_edges (bool, optional): If this is set to false, the incomplete frames near the ending edge won't be snipped, so that the number of frames is the file size divided by the frame-shift. This makes different types of features give the same number of frames. (default: True) Returns: Tensor: Pitch feature. Shape: `(batch, frames 2)` where the last dimension corresponds to pitch and NCCF. """ shape = waveform.shape waveform = waveform.reshape(-1, shape[-1]) result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate, frame_length, frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast, lowpass_filter_width, upsample_filter_width, max_frames_latency, frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges, ) result = result.reshape(shape[:-1] + result.shape[-2:]) return result def _get_sinc_resample_kernel( orig_freq: int, new_freq: int, gcd: int, lowpass_filter_width: int, rolloff: float, resampling_method: str, beta: Optional[float], device: torch.device = torch.device("cpu"), dtype: Optional[torch.dtype] = None): if not (int(orig_freq) == orig_freq and int(new_freq) == new_freq): raise Exception( "Frequencies must be of integer type to ensure quality resampling computation. " "To work around this, manually convert both frequencies to integer values " "that maintain their resampling rate ratio before passing them into the function. " "Example: To downsample a 44100 hz waveform by a factor of 8, use " "`orig_freq=8` and `new_freq=1` instead of `orig_freq=44100` and `new_freq=5512.5`. " "For more information, please refer to https://github.com/pytorch/audio/issues/1487." ) if resampling_method not in ['sinc_interpolation', 'kaiser_window']: raise ValueError('Invalid resampling method: {}'.format(resampling_method)) orig_freq = int(orig_freq) // gcd new_freq = int(new_freq) // gcd assert lowpass_filter_width > 0 kernels = [] base_freq = min(orig_freq, new_freq) # This will perform antialiasing filtering by removing the highest frequencies. # At first I thought I only needed this when downsampling, but when upsampling # you will get edge artifacts without this, as the edge is equivalent to zero padding, # which will add high freq artifacts. base_freq *= rolloff # The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor) # using the sinc interpolation formula: # x(t) = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - t)) # We can then sample the function x(t) with a different sample rate: # y[j] = x(j / new_freq) # or, # y[j] = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - j / new_freq)) # We see here that y[j] is the convolution of x[i] with a specific filter, for which # we take an FIR approximation, stopping when we see at least `lowpass_filter_width` zeros crossing. # But y[j+1] is going to have a different set of weights and so on, until y[j + new_freq]. # Indeed: # y[j + new_freq] = sum_i x[i] sinc(pi * orig_freq * ((i / orig_freq - (j + new_freq) / new_freq)) # = sum_i x[i] sinc(pi * orig_freq * ((i - orig_freq) / orig_freq - j / new_freq)) # = sum_i x[i + orig_freq] sinc(pi * orig_freq * (i / orig_freq - j / new_freq)) # so y[j+new_freq] uses the same filter as y[j], but on a shifted version of x by `orig_freq`. # This will explain the F.conv1d after, with a stride of orig_freq. width = math.ceil(lowpass_filter_width * orig_freq / base_freq) # If orig_freq is still big after GCD reduction, most filters will be very unbalanced, i.e., # they will have a lot of almost zero values to the left or to the right... # There is probably a way to evaluate those filters more efficiently, but this is kept for # future work. idx_dtype = dtype if dtype is not None else torch.float64 idx = torch.arange(-width, width + orig_freq, device=device, dtype=idx_dtype) for i in range(new_freq): t = (-i / new_freq + idx / orig_freq) * base_freq t = t.clamp_(-lowpass_filter_width, lowpass_filter_width) # we do not use built in torch windows here as we need to evaluate the window # at specific positions, not over a regular grid. if resampling_method == "sinc_interpolation": window = torch.cos(t * math.pi / lowpass_filter_width / 2)**2 else: # kaiser_window if beta is None: beta = 14.769656459379492 beta_tensor = torch.tensor(float(beta)) window = torch.i0(beta_tensor * torch.sqrt(1 - (t / lowpass_filter_width) ** 2)) / torch.i0(beta_tensor) t *= math.pi kernel = torch.where(t == 0, torch.tensor(1.).to(t), torch.sin(t) / t) kernel.mul_(window) kernels.append(kernel) scale = base_freq / orig_freq kernels = torch.stack(kernels).view(new_freq, 1, -1).mul_(scale) if dtype is None: kernels = kernels.to(dtype=torch.float32) return kernels, width def _apply_sinc_resample_kernel( waveform: Tensor, orig_freq: int, new_freq: int, gcd: int, kernel: Tensor, width: int, ): orig_freq = int(orig_freq) // gcd new_freq = int(new_freq) // gcd # pack batch shape = waveform.size() waveform = waveform.view(-1, shape[-1]) num_wavs, length = waveform.shape waveform = torch.nn.functional.pad(waveform, (width, width + orig_freq)) resampled = torch.nn.functional.conv1d(waveform[:, None], kernel, stride=orig_freq) resampled = resampled.transpose(1, 2).reshape(num_wavs, -1) target_length = int(math.ceil(new_freq * length / orig_freq)) resampled = resampled[..., :target_length] # unpack batch resampled = resampled.view(shape[:-1] + resampled.shape[-1:]) return resampled def resample( waveform: Tensor, orig_freq: int, new_freq: int, lowpass_filter_width: int = 6, rolloff: float = 0.99, resampling_method: str = "sinc_interpolation", beta: Optional[float] = None, ) -> Tensor: r"""Resamples the waveform at the new frequency using bandlimited interpolation. https://ccrma.stanford.edu/~jos/resample/Theory_Ideal_Bandlimited_Interpolation.html Note: ``transforms.Resample`` precomputes and reuses the resampling kernel, so using it will result in more efficient computation if resampling multiple waveforms with the same resampling parameters. Args: waveform (Tensor): The input signal of dimension `(..., time)` orig_freq (int): The original frequency of the signal new_freq (int): The desired frequency lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper but less efficient. (Default: ``6``) rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist. Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``) resampling_method (str, optional): The resampling method to use. Options: [``sinc_interpolation``, ``kaiser_window``] (Default: ``'sinc_interpolation'``) beta (float or None, optional): The shape parameter used for kaiser window. Returns: Tensor: The waveform at the new frequency of dimension `(..., time).` """ assert orig_freq > 0.0 and new_freq > 0.0 if orig_freq == new_freq: return waveform gcd = math.gcd(int(orig_freq), int(new_freq)) kernel, width = _get_sinc_resample_kernel(orig_freq, new_freq, gcd, lowpass_filter_width, rolloff, resampling_method, beta, waveform.device, waveform.dtype) resampled = _apply_sinc_resample_kernel(waveform, orig_freq, new_freq, gcd, kernel, width) return resampled @torch.jit.unused def edit_distance(seq1: Sequence, seq2: Sequence) -> int: """ Calculate the word level edit (Levenshtein) distance between two sequences. The function computes an edit distance allowing deletion, insertion and substitution. The result is an integer. For most applications, the two input sequences should be the same type. If two strings are given, the output is the edit distance between the two strings (character edit distance). If two lists of strings are given, the output is the edit distance between sentences (word edit distance). Users may want to normalize the output by the length of the reference sequence. torchscipt is not supported for this function. Args: seq1 (Sequence): the first sequence to compare. seq2 (Sequence): the second sequence to compare. Returns: int: The distance between the first and second sequences. """ len_sent2 = len(seq2) dold = list(range(len_sent2 + 1)) dnew = [0 for _ in range(len_sent2 + 1)] for i in range(1, len(seq1) + 1): dnew[0] = i for j in range(1, len_sent2 + 1): if seq1[i - 1] == seq2[j - 1]: dnew[j] = dold[j - 1] else: substitution = dold[j - 1] + 1 insertion = dnew[j - 1] + 1 deletion = dold[j] + 1 dnew[j] = min(substitution, insertion, deletion) dnew, dold = dold, dnew return int(dold[-1]) def pitch_shift( waveform: Tensor, sample_rate: int, n_steps: int, bins_per_octave: int = 12, n_fft: int = 512, win_length: Optional[int] = None, hop_length: Optional[int] = None, window: Optional[Tensor] = None, ) -> Tensor: """ Shift the pitch of a waveform by ``n_steps`` steps. Args: waveform (Tensor): The input waveform of shape `(..., time)`. sample_rate (int): Sample rate of `waveform`. n_steps (int): The (fractional) steps to shift `waveform`. bins_per_octave (int, optional): The number of steps per octave (Default: ``12``). n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins (Default: ``512``). win_length (int or None, optional): Window size. If None, then ``n_fft`` is used. (Default: ``None``). hop_length (int or None, optional): Length of hop between STFT windows. If None, then ``win_length // 4`` is used (Default: ``None``). window (Tensor or None, optional): Window tensor that is applied/multiplied to each frame/window. If None, then ``torch.hann_window(win_length)`` is used (Default: ``None``). Returns: Tensor: The pitch-shifted audio waveform of shape `(..., time)`. """ if hop_length is None: hop_length = n_fft // 4 if win_length is None: win_length = n_fft if window is None: window = torch.hann_window(window_length=win_length, device=waveform.device) # pack batch shape = waveform.size() waveform = waveform.reshape(-1, shape[-1]) ori_len = shape[-1] rate = 2.0 ** (-float(n_steps) / bins_per_octave) spec_f = torch.stft(input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=True, pad_mode='reflect', normalized=False, onesided=True, return_complex=True) phase_advance = torch.linspace(0, math.pi * hop_length, spec_f.shape[-2], device=spec_f.device)[..., None] spec_stretch = phase_vocoder(spec_f, rate, phase_advance) len_stretch = int(round(ori_len / rate)) waveform_stretch = torch.istft(spec_stretch, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=len_stretch) waveform_shift = resample(waveform_stretch, int(sample_rate / rate), sample_rate) shift_len = waveform_shift.size()[-1] if shift_len > ori_len: waveform_shift = waveform_shift[..., :ori_len] else: waveform_shift = torch.nn.functional.pad(waveform_shift, [0, ori_len - shift_len]) # unpack batch waveform_shift = waveform_shift.view(shape[:-1] + waveform_shift.shape[-1:]) return waveform_shift def rnnt_loss( logits: Tensor, targets: Tensor, logit_lengths: Tensor, target_lengths: Tensor, blank: int = -1, clamp: float = -1, reduction: str = "mean", ): """Compute the RNN Transducer loss from *Sequence Transduction with Recurrent Neural Networks* [:footcite:`graves2012sequence`]. The RNN Transducer loss extends the CTC loss by defining a distribution over output sequences of all lengths, and by jointly modelling both input-output and output-output dependencies. Args: logits (Tensor): Tensor of dimension `(batch, max seq length, max target length + 1, class)` containing output from joiner targets (Tensor): Tensor of dimension `(batch, max target length)` containing targets with zero padded logit_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of each sequence from encoder target_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of targets for each sequence blank (int, optional): blank label (Default: ``-1``) clamp (float, optional): clamp for gradients (Default: ``-1``) reduction (string, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. (Default: ``'mean'``) Returns: Tensor: Loss with the reduction option applied. If ``reduction`` is ``'none'``, then size `(batch)`, otherwise scalar. """ if reduction not in ['none', 'mean', 'sum']: raise ValueError("reduction should be one of 'none', 'mean', or 'sum'") if blank < 0: # reinterpret blank index if blank < 0. blank = logits.shape[-1] + blank costs, _ = torch.ops.torchaudio.rnnt_loss( logits=logits, targets=targets, logit_lengths=logit_lengths, target_lengths=target_lengths, blank=blank, clamp=clamp, ) if reduction == 'mean': return costs.mean() elif reduction == 'sum': return costs.sum() return costs
#!/usr/bin/env python3 """ This script should use a very simple, functional programming style. Avoid Jinja macros in favor of native Python functions. Don't go overboard on code generation; use Python only to generate content that can't be easily declared statically using CircleCI's YAML API. Data declarations (e.g. the nested loops for defining the configuration matrix) should be at the top of the file for easy updating. See this comment for design rationale: https://github.com/pytorch/vision/pull/1321#issuecomment-531033978 """ import jinja2 from jinja2 import select_autoescape import yaml import os.path PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] CU_VERSIONS_DICT = {"linux": ["cpu", "cu102", "cu111","cu113", "rocm4.1"], "windows": ["cpu", "cu113"], "macos": ["cpu"]} DOC_VERSION = ('linux', '3.8') def build_workflows(prefix='', upload=False, filter_branch=None, indentation=6): w = [] w += build_download_job(filter_branch) for btype in ["wheel", "conda"]: for os_type in ["linux", "macos", "windows"]: for python_version in PYTHON_VERSIONS: for cu_version in CU_VERSIONS_DICT[os_type]: fb = filter_branch if cu_version.startswith("rocm") and btype=="conda": continue if not fb and (os_type == 'linux' and btype == 'wheel' and python_version == '3.8' and cu_version == 'cpu'): # the fields must match the build_docs "requires" dependency fb = '/.*/' w += build_workflow_pair(btype, os_type, python_version, cu_version, fb, prefix, upload) if not filter_branch: # Build on every pull request, but upload only on nightly and tags w += build_doc_job('/.*/') w += upload_doc_job('nightly') w += docstring_parameters_sync_job(None) return indent(indentation, w) def build_download_job(filter_branch): job = { "name": "download_third_parties_nix", } if filter_branch: job["filters"] = gen_filter_branch_tree(filter_branch) return [{"download_third_parties_nix": job}] def build_workflow_pair(btype, os_type, python_version, cu_version, filter_branch, prefix='', upload=False): w = [] base_workflow_name = f"{prefix}binary_{os_type}_{btype}_py{python_version}_{cu_version}" w.append(generate_base_workflow(base_workflow_name, python_version, cu_version, filter_branch, os_type, btype)) if upload: w.append(generate_upload_workflow(base_workflow_name, filter_branch, os_type, btype, cu_version)) if filter_branch == 'nightly' and os_type != 'macos': pydistro = 'pip' if btype == 'wheel' else 'conda' w.append(generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, cu_version, os_type)) return w def build_doc_job(filter_branch): job = { "name": "build_docs", "python_version": "3.8", "requires": ["binary_linux_wheel_py3.8_cpu", ], } if filter_branch: job["filters"] = gen_filter_branch_tree(filter_branch) return [{"build_docs": job}] def upload_doc_job(filter_branch): job = { "name": "upload_docs", "context": "org-member", "python_version": "3.8", "requires": ["build_docs", ], } if filter_branch: job["filters"] = gen_filter_branch_tree(filter_branch) return [{"upload_docs": job}] def docstring_parameters_sync_job(filter_branch): job = { "name": "docstring_parameters_sync", "python_version": "3.8", "requires": ["binary_linux_wheel_py3.8_cpu", ], } if filter_branch: job["filters"] = gen_filter_branch_tree(filter_branch) return [{"docstring_parameters_sync": job}] def generate_base_workflow(base_workflow_name, python_version, cu_version, filter_branch, os_type, btype): d = { "name": base_workflow_name, "python_version": python_version, "cuda_version": cu_version, } if os_type in ['linux', 'macos']: d['requires'] = ['download_third_parties_nix'] if btype == 'conda': d['conda_docker_image'] = f'pytorch/conda-builder:{cu_version.replace("cu1","cuda1")}' elif cu_version.startswith('cu'): d['wheel_docker_image'] = f'pytorch/manylinux-{cu_version.replace("cu1","cuda1")}' elif cu_version.startswith('rocm'): d["wheel_docker_image"] = f"pytorch/manylinux-rocm:{cu_version[len('rocm'):]}" if filter_branch: d["filters"] = gen_filter_branch_tree(filter_branch) return {f"binary_{os_type}_{btype}": d} def gen_filter_branch_tree(*branches): return { "branches": { "only": list(branches), }, "tags": { # Using a raw string here to avoid having to escape # anything "only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/" } } def generate_upload_workflow(base_workflow_name, filter_branch, os_type, btype, cu_version): d = { "name": "{base_workflow_name}_upload".format(base_workflow_name=base_workflow_name), "context": "org-member", "requires": [base_workflow_name], } if btype == 'wheel': d["subfolder"] = "" if os_type == 'macos' else cu_version + "/" if filter_branch: d["filters"] = gen_filter_branch_tree(filter_branch) return {"binary_{btype}_upload".format(btype=btype): d} def generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, cu_version, os_type): required_build_suffix = "_upload" required_build_name = base_workflow_name + required_build_suffix smoke_suffix = f"smoke_test_{pydistro}".format(pydistro=pydistro) d = { "name": f"{base_workflow_name}_{smoke_suffix}", "requires": [required_build_name], "python_version": python_version, "cuda_version": cu_version, } if filter_branch: d["filters"] = gen_filter_branch_tree(filter_branch) smoke_name = f"smoke_test_{os_type}_{pydistro}" if pydistro == "conda" and os_type == "linux" and cu_version != "cpu": smoke_name += "_gpu" return {smoke_name: d} def indent(indentation, data_list): return ("\n" + " " * indentation).join(yaml.dump(data_list).splitlines()) def unittest_workflows(indentation=6): jobs = [] jobs += build_download_job(None) for os_type in ["linux", "windows", "macos"]: for device_type in ["cpu", "gpu"]: if os_type == "macos" and device_type == "gpu": continue for i, python_version in enumerate(PYTHON_VERSIONS): job = { "name": f"unittest_{os_type}_{device_type}_py{python_version}", "python_version": python_version, "cuda_version": 'cpu' if device_type == "cpu" else "cu113", } if os_type != "windows": job['requires'] = ['download_third_parties_nix'] jobs.append({f"unittest_{os_type}_{device_type}": job}) if i == 0 and os_type == "linux" and device_type == "cpu": jobs.append({ "stylecheck": { "name": f"stylecheck_py{python_version}", "python_version": python_version, "cuda_version": "cpu", } }) return indent(indentation, jobs) if __name__ == "__main__": d = os.path.dirname(__file__) env = jinja2.Environment( loader=jinja2.FileSystemLoader(d), lstrip_blocks=True, autoescape=select_autoescape(enabled_extensions=('html', 'xml')), ) with open(os.path.join(d, 'config.yml'), 'w') as f: f.write(env.get_template('config.yml.in').render( build_workflows=build_workflows, unittest_workflows=unittest_workflows, )) f.write("\n")
#!/usr/bin/env python """A wrapper script around clang-format, suitable for linting multiple files and to use for continuous integration. This is an alternative API for the clang-format command line. It runs over multiple files and directories in parallel. A diff output is produced and a sensible exit code is returned. """ import argparse import codecs import difflib import fnmatch import io import multiprocessing import os import signal import subprocess import sys import traceback from functools import partial try: from subprocess import DEVNULL # py3k except ImportError: DEVNULL = open(os.devnull, "wb") DEFAULT_EXTENSIONS = 'c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu' class ExitStatus: SUCCESS = 0 DIFF = 1 TROUBLE = 2 def list_files(files, recursive=False, extensions=None, exclude=None): if extensions is None: extensions = [] if exclude is None: exclude = [] out = [] for file in files: if recursive and os.path.isdir(file): for dirpath, dnames, fnames in os.walk(file): fpaths = [os.path.join(dirpath, fname) for fname in fnames] for pattern in exclude: # os.walk() supports trimming down the dnames list # by modifying it in-place, # to avoid unnecessary directory listings. dnames[:] = [ x for x in dnames if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern) ] fpaths = [ x for x in fpaths if not fnmatch.fnmatch(x, pattern) ] for f in fpaths: ext = os.path.splitext(f)[1][1:] if ext in extensions: out.append(f) else: out.append(file) return out def make_diff(file, original, reformatted): return list( difflib.unified_diff( original, reformatted, fromfile='{}\t(original)'.format(file), tofile='{}\t(reformatted)'.format(file), n=3)) class DiffError(Exception): def __init__(self, message, errs=None): super(DiffError, self).__init__(message) self.errs = errs or [] class UnexpectedError(Exception): def __init__(self, message, exc=None): super(UnexpectedError, self).__init__(message) self.formatted_traceback = traceback.format_exc() self.exc = exc def run_clang_format_diff_wrapper(args, file): try: ret = run_clang_format_diff(args, file) return ret except DiffError: raise except Exception as e: raise UnexpectedError('{}: {}: {}'.format(file, e.__class__.__name__, e), e) def run_clang_format_diff(args, file): try: with io.open(file, 'r', encoding='utf-8') as f: original = f.readlines() except IOError as exc: raise DiffError(str(exc)) invocation = [args.clang_format_executable, file] # Use of utf-8 to decode the process output. # # Hopefully, this is the correct thing to do. # # It's done due to the following assumptions (which may be incorrect): # - clang-format will returns the bytes read from the files as-is, # without conversion, and it is already assumed that the files use utf-8. # - if the diagnostics were internationalized, they would use utf-8: # > Adding Translations to Clang # > # > Not possible yet! # > Diagnostic strings should be written in UTF-8, # > the client can translate to the relevant code page if needed. # > Each translation completely replaces the format string # > for the diagnostic. # > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation try: proc = subprocess.Popen( invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, encoding='utf-8') except OSError as exc: raise DiffError( "Command '{}' failed to start: {}".format( subprocess.list2cmdline(invocation), exc ) ) proc_stdout = proc.stdout proc_stderr = proc.stderr # hopefully the stderr pipe won't get full and block the process outs = list(proc_stdout.readlines()) errs = list(proc_stderr.readlines()) proc.wait() if proc.returncode: raise DiffError( "Command '{}' returned non-zero exit status {}".format( subprocess.list2cmdline(invocation), proc.returncode ), errs, ) return make_diff(file, original, outs), errs def bold_red(s): return '\x1b[1m\x1b[31m' + s + '\x1b[0m' def colorize(diff_lines): def bold(s): return '\x1b[1m' + s + '\x1b[0m' def cyan(s): return '\x1b[36m' + s + '\x1b[0m' def green(s): return '\x1b[32m' + s + '\x1b[0m' def red(s): return '\x1b[31m' + s + '\x1b[0m' for line in diff_lines: if line[:4] in ['--- ', '+++ ']: yield bold(line) elif line.startswith('@@ '): yield cyan(line) elif line.startswith('+'): yield green(line) elif line.startswith('-'): yield red(line) else: yield line def print_diff(diff_lines, use_color): if use_color: diff_lines = colorize(diff_lines) sys.stdout.writelines(diff_lines) def print_trouble(prog, message, use_colors): error_text = 'error:' if use_colors: error_text = bold_red(error_text) print("{}: {} {}".format(prog, error_text, message), file=sys.stderr) def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--clang-format-executable', metavar='EXECUTABLE', help='path to the clang-format executable', default='clang-format') parser.add_argument( '--extensions', help='comma separated list of file extensions (default: {})'.format( DEFAULT_EXTENSIONS), default=DEFAULT_EXTENSIONS) parser.add_argument( '-r', '--recursive', action='store_true', help='run recursively over directories') parser.add_argument('files', metavar='file', nargs='+') parser.add_argument( '-q', '--quiet', action='store_true') parser.add_argument( '-j', metavar='N', type=int, default=0, help='run N clang-format jobs in parallel' ' (default number of cpus + 1)') parser.add_argument( '--color', default='auto', choices=['auto', 'always', 'never'], help='show colored diff (default: auto)') parser.add_argument( '-e', '--exclude', metavar='PATTERN', action='append', default=[], help='exclude paths matching the given glob-like pattern(s)' ' from recursive search') args = parser.parse_args() # use default signal handling, like diff return SIGINT value on ^C # https://bugs.python.org/issue14229#msg156446 signal.signal(signal.SIGINT, signal.SIG_DFL) try: signal.SIGPIPE except AttributeError: # compatibility, SIGPIPE does not exist on Windows pass else: signal.signal(signal.SIGPIPE, signal.SIG_DFL) colored_stdout = False colored_stderr = False if args.color == 'always': colored_stdout = True colored_stderr = True elif args.color == 'auto': colored_stdout = sys.stdout.isatty() colored_stderr = sys.stderr.isatty() version_invocation = [args.clang_format_executable, str("--version")] try: subprocess.check_call(version_invocation, stdout=DEVNULL) except subprocess.CalledProcessError as e: print_trouble(parser.prog, str(e), use_colors=colored_stderr) return ExitStatus.TROUBLE except OSError as e: print_trouble( parser.prog, "Command '{}' failed to start: {}".format( subprocess.list2cmdline(version_invocation), e ), use_colors=colored_stderr, ) return ExitStatus.TROUBLE retcode = ExitStatus.SUCCESS files = list_files( args.files, recursive=args.recursive, exclude=args.exclude, extensions=args.extensions.split(',')) if not files: return njobs = args.j if njobs == 0: njobs = multiprocessing.cpu_count() + 1 njobs = min(len(files), njobs) if njobs == 1: # execute directly instead of in a pool, # less overhead, simpler stacktraces it = (run_clang_format_diff_wrapper(args, file) for file in files) pool = None else: pool = multiprocessing.Pool(njobs) it = pool.imap_unordered( partial(run_clang_format_diff_wrapper, args), files) while True: try: outs, errs = next(it) except StopIteration: break except DiffError as e: print_trouble(parser.prog, str(e), use_colors=colored_stderr) retcode = ExitStatus.TROUBLE sys.stderr.writelines(e.errs) except UnexpectedError as e: print_trouble(parser.prog, str(e), use_colors=colored_stderr) sys.stderr.write(e.formatted_traceback) retcode = ExitStatus.TROUBLE # stop at the first unexpected error, # something could be very wrong, # don't process all files unnecessarily if pool: pool.terminate() break else: sys.stderr.writelines(errs) if outs == []: continue if not args.quiet: print_diff(outs, use_color=colored_stdout) if retcode == ExitStatus.SUCCESS: retcode = ExitStatus.DIFF return retcode if __name__ == '__main__': sys.exit(main())
import asyncio import aiohttp # type: ignore import math import os import datetime import re import boto3 # type: ignore import json import io import argparse import gzip import os from cryptography.hazmat.backends import default_backend import jwt import requests import time from typing import * BUCKET = os.getenv("bucket", "ossci-job-status") APP_ID = int(os.environ["app_id"]) # The private key needs to maintain its newlines, get it via # $ cat key.pem | tr '\n' '|' | pbcopy PRIVATE_KEY = os.environ["private_key"].replace("|", "\n") def app_headers() -> Dict[str, str]: cert_bytes = PRIVATE_KEY.encode() private_key = default_backend().load_pem_private_key(cert_bytes, None) # type: ignore time_since_epoch_in_seconds = int(time.time()) payload = { # issued at time "iat": time_since_epoch_in_seconds, # JWT expiration time (10 minute maximum) "exp": time_since_epoch_in_seconds + (10 * 60), # GitHub App's identifier "iss": APP_ID, } actual_jwt = jwt.encode(payload, private_key, algorithm="RS256") headers = { "Authorization": f"Bearer {actual_jwt}", "Accept": "application/vnd.github.machine-man-preview+json", } return headers def jprint(obj: Any) -> None: print(json.dumps(obj, indent=2)) def installation_id(user: str) -> int: r_bytes = requests.get( "https://api.github.com/app/installations", headers=app_headers() ) r = json.loads(r_bytes.content.decode()) for item in r: if item["account"]["login"] == user: return int(item["id"]) raise RuntimeError(f"User {user} not found in {r}") def user_token(user: str) -> str: """ Authorize this request with the GitHub app set by the 'app_id' and 'private_key' environment variables. 1. Get the installation ID for the user that has installed the app 2. Request a new token for that user 3. Return it so it can be used in future API requests """ # Hardcode the installation to PyTorch so we can always get a valid ID key id = installation_id("pytorch") url = f"https://api.github.com/app/installations/{id}/access_tokens" r_bytes = requests.post(url, headers=app_headers()) r = json.loads(r_bytes.content.decode()) token = str(r["token"]) return token if "AWS_KEY_ID" in os.environ and "AWS_SECRET_KEY" in os.environ: # Use keys for local development session = boto3.Session( aws_access_key_id=os.environ.get("AWS_KEY_ID"), aws_secret_access_key=os.environ.get("AWS_SECRET_KEY"), ) else: # In the Lambda, use permissions on the Lambda's role session = boto3.Session() s3 = session.resource("s3") def compress_query(query: str) -> str: query = query.replace("\n", "") query = re.sub("\s+", " ", query) return query def head_commit_query(user: str, repo: str, branches: List[str]) -> str: """ Fetch the head commit for a list of branches """ def branch_part(branch: str, num: int) -> str: return f""" r{num}: repository(name: "{repo}", owner: "{user}") {{ ref(qualifiedName:"refs/heads/{branch}") {{ name target {{ ... on Commit {{ oid }} }} }} }} """ parts = [branch_part(branch, i) for i, branch in enumerate(branches)] return "{" + "\n".join(parts) + "}" def extract_gha(suites: List[Dict[str, Any]]) -> List[Dict[str, str]]: jobs = [] for suite in suites: suite = suite["node"] if suite["workflowRun"] is None: # If no jobs were triggered this will be empty continue workflow = suite["workflowRun"]["workflow"]["name"] for run in suite["checkRuns"]["nodes"]: conclusion = run["conclusion"] if conclusion is None: if run["status"].lower() == "queued": conclusion = "queued" elif run["status"].lower() == "in_progress": conclusion = "pending" else: raise RuntimeError(f"unexpected run {run}") jobs.append( { "name": f"{workflow} / {run['name']}", "status": conclusion.lower(), "url": run["detailsUrl"], } ) return jobs def extract_status(contexts: List[Dict[str, Any]]) -> List[Dict[str, str]]: jobs = [] for context in contexts: jobs.append( { "name": context["context"], "status": context["state"].lower(), "url": context["targetUrl"], } ) return jobs def extract_jobs(raw_commits: List[Dict[str, Any]]) -> List[Dict[str, Any]]: commits = [] for raw_commit in raw_commits: if raw_commit["status"] is None: # Will be none if no non-GHA jobs were triggered status = [] else: status = extract_status(raw_commit["status"]["contexts"]) gha = extract_gha(raw_commit["checkSuites"]["edges"]) jobs = status + gha if raw_commit["author"]["user"] is None: author = raw_commit["author"]["name"] else: author = raw_commit["author"]["user"]["login"] commits.append( { "sha": raw_commit["oid"], "headline": raw_commit["messageHeadline"], "body": raw_commit["messageBody"], "author": author, "date": raw_commit["authoredDate"], "jobs": jobs, } ) return commits class BranchHandler: def __init__( self, gql: Any, user: str, repo: str, name: str, head: str, history_size: int, fetch_size: int, ): self.gql = gql self.user = user self.repo = repo self.name = name self.head = head self.fetch_size = fetch_size self.history_size = history_size def write_to_s3(self, data: Any) -> None: content = json.dumps(data, default=str) buf = io.BytesIO() gzipfile = gzip.GzipFile(fileobj=buf, mode="w") gzipfile.write(content.encode()) gzipfile.close() bucket = s3.Bucket(BUCKET) prefix = f"v6/{self.user}/{self.repo}/{self.name.replace('/', '_')}.json" bucket.put_object( Key=prefix, Body=buf.getvalue(), ContentType="application/json", ContentEncoding="gzip", Expires="0", ) print(f"Wrote {len(data)} commits from {self.name} to {prefix}") def query(self, offset: int) -> str: after = "" # The cursor for fetches are formatted like after: "<sha> <offset>", but # the first commit isn't included, so shift all the offsets and don't # use an "after" for the first batch if offset > 0: after = f', after: "{self.head} {offset - 1}"' return f""" {{ repository(name: "{self.repo}", owner: "{self.user}") {{ ref(qualifiedName:"refs/heads/{self.name}") {{ name target {{ ... on Commit {{ history(first:{self.fetch_size}{after}) {{ nodes {{ oid messageBody messageHeadline author {{ name user {{ login }} }} authoredDate checkSuites(first:100) {{ edges {{ node {{ checkRuns(first:100) {{ nodes {{ name status conclusion detailsUrl }} }} workflowRun {{ workflow {{ name }} }} }} }} }} status {{ contexts {{ context state targetUrl }} }} }} }} }} }} }} }} }} """ def check_response(self, gql_response: Any) -> None: # Just check that this path in the dict exists gql_response["data"]["repository"]["ref"]["target"]["history"]["nodes"] async def run(self) -> None: """ Fetch history for the branch (in batches) and merge them all together """ # GitHub's API errors out if you try to fetch too much data at once, so # split up the 100 commits into batches of 'self.fetch_size' fetches = math.ceil(self.history_size / self.fetch_size) async def fetch(i: int) -> Any: try: return await self.gql.query( self.query(offset=self.fetch_size * i), verify=self.check_response ) except Exception as e: print( f"Error: {e}\nFailed to fetch {self.user}/{self.repo}/{self.name} on batch {i} / {fetches}" ) return None coros = [fetch(i) for i in range(fetches)] result = await asyncio.gather(*coros) raw_commits = [] print(f"Parsing results {self.name}") # Merge all the batches for r in result: if r is None: continue try: commits_batch = r["data"]["repository"]["ref"]["target"]["history"][ "nodes" ] raw_commits += commits_batch except Exception as e: # Errors here are expected if the branch has less than HISTORY_SIZE # commits (GitHub will just time out). There's no easy way to find # this number ahead of time and avoid errors, but if we had that # then we could delete this try-catch. print(f"Error: Didn't find history in commit batch: {e}\n{r}") # Pull out the data and format it commits = extract_jobs(raw_commits) print(f"Writing results for {self.name} to S3") # Store gzip'ed data to S3 # print(len(commits)) # print(commits) self.write_to_s3(commits) class GraphQL: def __init__(self, session: aiohttp.ClientSession) -> None: self.session = session def log_rate_limit(self, headers: Any) -> None: remaining = headers.get("X-RateLimit-Remaining") used = headers.get("X-RateLimit-Used") total = headers.get("X-RateLimit-Limit") reset_timestamp = int(headers.get("X-RateLimit-Reset", 0)) # type: ignore reset = datetime.datetime.fromtimestamp(reset_timestamp).strftime( "%a, %d %b %Y %H:%M:%S" ) print( f"[rate limit] Used {used}, {remaining} / {total} remaining, reset at {reset}" ) async def query( self, query: str, verify: Optional[Callable[[Any], None]] = None, retries: int = 5, ) -> Any: """ Run an authenticated GraphQL query """ # Remove unnecessary white space query = compress_query(query) if retries <= 0: raise RuntimeError(f"Query {query[:100]} failed, no retries left") url = "https://api.github.com/graphql" try: async with self.session.post(url, json={"query": query}) as resp: self.log_rate_limit(resp.headers) r = await resp.json() if "data" not in r: raise RuntimeError(r) if verify is not None: verify(r) return r except Exception as e: print( f"Retrying query {query[:100]}, remaining attempts: {retries - 1}\n{e}" ) return await self.query(query, verify=verify, retries=retries - 1) async def main( user: str, repo: str, branches: List[str], history_size: int, fetch_size: int ) -> None: """ Grab a list of all the head commits for each branch, then fetch all the jobs for the last 'history_size' commits on that branch """ async with aiohttp.ClientSession( headers={ "Authorization": "token {}".format(user_token(user)), "Accept": "application/vnd.github.machine-man-preview+json", } ) as aiosession: gql = GraphQL(aiosession) print(f"Querying branches: {branches}") heads = await gql.query(head_commit_query(user, repo, branches)) handlers = [] for head in heads["data"].values(): sha = head["ref"]["target"]["oid"] branch = head["ref"]["name"] handlers.append( BranchHandler(gql, user, repo, branch, sha, history_size, fetch_size) ) await asyncio.gather(*[h.run() for h in handlers]) def lambda_handler(event: Any, context: Any) -> None: """ 'event' here is the payload configured from EventBridge (or set manually via environment variables) """ data: Dict[str, Any] = { "branches": None, "user": None, "repo": None, "history_size": None, "fetch_size": None, } for key in data.keys(): if key in os.environ: data[key] = os.environ[key] else: data[key] = event[key] if any(x is None for x in data.values()): raise RuntimeError( "Data missing from configuration, it must be set as an environment " f"variable or as the input JSON payload in the Lambda event:\n{data}" ) data["history_size"] = int(data["history_size"]) data["fetch_size"] = int(data["fetch_size"]) data["branches"] = data["branches"].split(",") # return asyncio.run(main(**data)) # if os.getenv("DEBUG", "0") == "1": # # For local development # lambda_handler( # { # "branches": "release/1.10", # "user": "pytorch", # "repo": "pytorch", # "history_size": 100, # "fetch_size": 10, # }, # None, # ) parser = argparse.ArgumentParser(description="Update JSON in S3 for a branch") parser.add_argument("--branch", required=True) parser.add_argument("--repo", required=True) parser.add_argument("--user", required=True) parser.add_argument("--fetch_size", default=10) parser.add_argument("--history_size", default=100) args = parser.parse_args() lambda_handler( { "branches": args.branch, "user": args.user, "repo": args.repo, "history_size": int(args.history_size), "fetch_size": int(args.fetch_size), }, None, )
#!/usr/bin/env python3 from pathlib import Path import jinja2 import os from dataclasses import dataclass from typing import Any REPO_ROOT = Path(__file__).resolve().parent.parent.parent GITHUB_DIR = REPO_ROOT / ".github" CRONS = { "5 minutes": "*/5 * * * *", "1 hour": "0 * * * *", } @dataclass class Branch: branch: str cron: str = CRONS["1 hour"] fetch_size: int = 4 history_size: int = 100 HUD_JOBS = { "pytorch": { "pytorch": [ Branch(branch="master", fetch_size=2, cron=CRONS["5 minutes"]), Branch(branch="nightly", fetch_size=2), Branch(branch="release/1.10", fetch_size=2), Branch(branch="viable/strict", fetch_size=2), ], "vision": [Branch(branch="main"), Branch(branch="release/0.11")], "audio": [Branch(branch="main"), Branch(branch="release/0.10")], "text": [Branch(branch="main"), Branch(branch="release/0.11")], "examples": [Branch(branch="master")], "tutorials": [Branch(branch="master")], "torchx": [Branch(branch="main")], }, "PyTorchLightning": {"pytorch-lightning": [Branch(branch="master")]}, } class CIWorkflow: name: str template: str def __init__(self, name: str, template: str, **kwargs: Any) -> None: self.name = name self.template = template for key, value in kwargs.items(): setattr(self, key, value) def generate_workflow_file(self, workflow_template: jinja2.Template) -> None: output_file_path = GITHUB_DIR / f"workflows/generated-{self.name}.yml" with open(output_file_path, "w") as output_file: filename = Path(workflow_template.filename).relative_to(REPO_ROOT) output_file.write("# @generated DO NOT EDIT MANUALLY\n") output_file.write(f"# Generated from {filename}\n") output_file.write(workflow_template.render(self.__dict__)) output_file.write("\n") print("Wrote", output_file_path.relative_to(REPO_ROOT)) WORKFLOWS = [] for user_name, repos in HUD_JOBS.items(): for repo_name, branches in repos.items(): for branch in branches: WORKFLOWS.append( CIWorkflow( template="update_github_status.yml.j2", repo=repo_name, user=user_name, branch=branch.branch, name=f"update-github-status-{user_name}-{repo_name}-{branch.branch.replace('/', '_')}", cron=branch.cron, fetch_size=branch.fetch_size, history_size=branch.history_size, ) ) if __name__ == "__main__": jinja_env = jinja2.Environment( variable_start_string="!{{", loader=jinja2.FileSystemLoader(str(GITHUB_DIR / "templates")), undefined=jinja2.StrictUndefined, ) # Delete the existing generated files first, this should align with .gitattributes file description. existing_workflows = GITHUB_DIR.glob("workflows/generated-*") for w in existing_workflows: try: os.remove(w) except Exception as e: print(f"Error occurred when deleting file {w}: {e}") for workflow in WORKFLOWS: template = jinja_env.get_template(workflow.template) workflow.generate_workflow_file(workflow_template=template)
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import sys from setuptools import find_packages, setup def get_version(): return "0.2.3.dev0" if __name__ == "__main__": if sys.version_info < (3, 8): sys.exit("python >= 3.8 required for torchelastic") with open("README.md", encoding="utf8") as f: readme = f.read() with open("requirements.txt") as f: reqs = f.read() version = get_version() print("-- Building version: " + version) setup( # Metadata name="torchelastic", version=version, author="PyTorch Elastic Devs", author_email="[email protected]", description="PyTorch Elastic Training", long_description=readme, long_description_content_type="text/markdown", url="https://github.com/pytorch/elastic", license="BSD-3", keywords=["pytorch", "machine learning", "elastic", "distributed"], python_requires=">=3.8", install_requires=reqs.strip().split("\n"), include_package_data=True, packages=find_packages(exclude=("*.test", "aws*", "*.fb")), # PyPI package information. classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Topic :: System :: Distributed Computing", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], )
#!/usr/bin/env python3 from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import os import os.path import shutil import subprocess import tarfile import textwrap import urllib.request import uuid import zipfile from os import walk from shutil import copyfile import yaml PETCTL_DIR = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) # Format a multiline command into a single line by trimming white spaces # and replacing newlines with spaces def format_command(cmd): return textwrap.dedent(cmd).strip().replace(os.linesep, " ") # This method runs all commands in a separate # process and returns the output def run_commands(cmds): set_kubeconfig_environment_var() for cmd in cmds: process = subprocess.run( cmd, shell=True, check=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ, ) if process.stdout: logger.info(process.stdout) if process.stderr: logger.info(process.stderr) return process.stdout # Configures job yaml file based on user inputs def configure_yaml(args): SAMPLE_YAML_FILE = os.path.join(PETCTL_DIR, "config", "sample_specs.yaml") result_yaml_file = os.path.join(PETCTL_DIR, "config", "azure-pytorch-elastic.yaml") logger.info(f"Configuring job yaml {result_yaml_file}") with open(SAMPLE_YAML_FILE) as f: data = yaml.load(f) data["spec"]["parallelism"] = args.max_size data["spec"]["template"]["spec"]["containers"][0]["env"].extend( [ {"name": "JOB_ID", "value": str(uuid.uuid1()) + "_" + args.name}, {"name": "MIN_SIZE", "value": str(args.min_size)}, {"name": "MAX_SIZE", "value": str(args.max_size)}, ] ) yaml.dump(data, open(result_yaml_file, "w")) # Configures job yaml file based on user docker image def configure_yaml_storage(container_name): yaml_file = os.path.join(PETCTL_DIR, "config/azure-pytorch-elastic.yaml") logger.info(f"Configuring job yaml {yaml_file}") with open(yaml_file) as f: data = yaml.load(f) data["spec"]["template"]["spec"]["volumes"][0]["flexVolume"]["options"][ "container" ] = container_name yaml.dump(data, open(yaml_file, "w")) # Configures job yaml file based on user docker image def configure_yaml_docker(image_name): yaml_file = os.path.join(PETCTL_DIR, "config/azure-pytorch-elastic.yaml") logger.info(f"Configuring job yaml {yaml_file}") with open(yaml_file) as f: data = yaml.load(f) data["spec"]["template"]["spec"]["containers"][0]["image"] = image_name yaml.dump(data, open(yaml_file, "w")) # Configures kubernetes json file based on user inputs def configure_json(args): KUBERNETES_JSON_FILE = os.path.join(PETCTL_DIR, "config/kubernetes.json") result_json_file = os.path.join(PETCTL_DIR, "config/", "kubernetes.json") logger.info(f"Configuring kubernetes specs {result_json_file}") with open(KUBERNETES_JSON_FILE) as f: data = json.load(f) data["properties"]["masterProfile"]["count"] = 1 data["properties"]["agentPoolProfiles"][0]["count"] = args.min_size data["properties"]["masterProfile"]["vmSize"] = args.master_vm data["properties"]["agentPoolProfiles"][0]["vmSize"] = args.worker_vm json.dump(data, open(result_json_file, "w"), indent=4) # Download AKS engine installer script for Linux def download_aks_engine_script(): url = ( "https://raw.githubusercontent.com/Azure/aks-engine/master/scripts/get-akse.sh" ) urllib.request.urlretrieve(url, "config/get-akse.sh") logger.info("Downloading aks engine script.....") # Download AKS engine binary for Windows def download_aks_engine_script_for_windows(): print("Downloading aks engine binary.....") url = ( "https://github.com/Azure/aks-engine/releases" "/download/v0.47.0/aks-engine-v0.47.0-windows-amd64.zip" ) filename, _ = urllib.request.urlretrieve(url, "config/aks.zip") zip_file_object = zipfile.ZipFile(filename, "r") for name in zip_file_object.namelist(): if "aks-engine.exe" in name: zip_file_object.extract(name, "aks-engine") copyfile("aks-engine/" + name, "aks-engine.exe") break # Installs AKS engine from the script/binary def install_aks_engine(): if os.name == "nt": download_aks_engine_script_for_windows() else: download_aks_engine_script() commands = ["chmod 700 config/get-akse.sh", "./config/get-akse.sh"] run_commands(commands) # Download AzCopy script to upload to AzureBlobStorage def download_azcopy_script(): print("Downloading azcopy cli") url = "https://aka.ms/downloadazcopy-v10-linux" filename, _ = urllib.request.urlretrieve(url, "config/azcopy.tar.gz") tar_file_object = tarfile.open(filename, "r:gz") for member in tar_file_object.getmembers(): if member.isreg(): member.name = os.path.basename(member.name) if "azcopy" == member.name: tar_file_object.extract(member.name, ".") break # Download AzCopy script for windows def download_azcopy_script_for_windows(): url = "https://aka.ms/downloadazcopy-v10-windows" filename, _ = urllib.request.urlretrieve(url, "config/azcopy.zip") zip_file_object = zipfile.ZipFile(filename, "r") for member in zip_file_object.infolist(): if not member.is_dir(): member.filename = os.path.basename(member.filename) if "azcopy" in member.filename: zip_file_object.extract(member, ".") break """ Helper function to upload to AzureBlob storage based on Storage account, Storage container, SAS Token """ def upload_to_azure_blob(args): destination = ( f"https://{args.account_name}.blob.core.windows.net/" "{args.container_name}{args.sas_token}" ) if os.name == "nt": download_azcopy_script_for_windows() commands = [ format_command( f""" azcopy copy "{args.source_path}" "{destination}" --recursive=True""" ) ] run_commands(commands) else: download_azcopy_script() commands = [ format_command( f""" ./azcopy copy "{args.source_path}" "{destination}" --recursive=True""" ) ] run_commands(commands) configure_yaml_storage(args.container_name) """ Sets KUBECONFIG environment variable to the path to the json file generated """ def set_kubeconfig_environment_var(): if os.path.isdir("_output"): config_path = os.path.join( PETCTL_DIR, "_output", "azure-pytorch-elastic", "kubeconfig" ) logger.info(f"Reading KUBECONFIG environment variable from {config_path}") for files in walk(config_path): for f in files: if f and f[0].endswith(".json"): config_path = os.path.join(config_path, f[0]) if config_path.endswith(".json"): os.environ["KUBECONFIG"] = config_path logger.info( f"Setting KUBECONFIG env variable {os.environ.get('KUBECONFIG')}" ) # Create storage secret named 'pet-blob-secret' def create_storage_secrets(args): commands = [ format_command( f""" kubectl create secret generic pet-blob-secret --from-literal accountname={args.account_name} --from-literal accountkey={args.account_key} --type=azure/blobfuse""" ) ] run_commands(commands) # Install Azure blobfuse drivers def install_blobfuse_drivers(): commands = [ "kubectl apply -f " + "https://raw.githubusercontent.com/Azure/kubernetes-volume-drivers" + "/master/flexvolume/blobfuse/deployment/blobfuse-flexvol-installer-1.9.yaml" ] run_commands(commands) # Create docker image secrets given user inputs def create_docker_image_secret(args): configure_yaml_docker(args.image_name) commands = [ format_command( f""" kubectl create secret docker-registry pet-docker-secret --docker-server={args.server} --docker-username={args.username} --docker-password={args.password} --docker-email='[email protected]'""" ) ] run_commands(commands) logger.info("Docker image registered..") # Deploy AKS cluster def deploy_aks_cluster(args): logger.info("Started AKS cluster deployment. This will take some time .....") commands = [ format_command( f""" aks-engine deploy -f --subscription-id {args.subscription_id} --dns-prefix {args.dns_prefix} --resource-group {args.rg} --location {args.location} --api-model config/kubernetes.json --client-id {args.client_id} --client-secret {args.client_secret} --set servicePrincipalProfile.clientId={args.client_id} --set servicePrincipalProfile.secret={args.client_secret}""" ) ] run_commands(commands) # Scale the cluster up and down based on user input def scale_cluster(args): command = [ format_command( f""" aks-engine scale --subscription-id {args.subscription_id} --resource-group {args.rg} --client-id {args.client_id} --client-secret {args.client_secret} --location {args.location} --api-model _output/azure-pytorch-elastic/apimodel.json --new-node-count {args.new_node_count} --apiserver azure-pytorch-elastic.{4}.cloudapp.azure.com""" ) ] run_commands(command) def delete_resources_util(): commands = [ "kubectl config delete-cluster azure-pytorch-elastic", "kubectl delete secret pet-blob-secret", "kubectl delete namespace --all", ] run_commands(commands) if os.path.isdir("_output"): shutil.rmtree(os.path.join(PETCTL_DIR, "_output")) logger.info( ( "Deleted all resources," "please manually delete the AKS resources from the Azure Portal." ) )
from __future__ import absolute_import, division, print_function, unicode_literals import util # Create a Kubernetes specs and YAML job file based on user inputs def configure(args): util.configure_yaml(args) util.configure_json(args) # Deploys a Kubernetes cluster def setup(args): # Install AKS Engine util.install_aks_engine() # Deploy an AKS cluster using kubernetes.json util.deploy_aks_cluster(args) # Upload code/data to Azure blob storage def upload_storage(args): util.upload_to_azure_blob(args) # Create Azure blob storage secret def storage_secret(args): util.create_storage_secrets(args) # Create docker image secrets def docker_secret(args): util.create_docker_image_secret(args) # Scale the cluster def scale_cluster(args): util.scale_cluster(args) # Submits your training job def run_job(args): util.install_blobfuse_drivers() commands = [ "kubectl delete -f config/azure-pytorch-elastic.yaml", "kubectl apply -f config/azure-pytorch-elastic.yaml", "kubectl describe pods", "kubectl get pods --selector app=azure-pytorch-elastic", ] util.run_commands(commands) # Check current status of your pods def check_status(): commands = [ "kubectl describe pods", "kubectl get pods --selector app=azure-pytorch-elastic", ] util.run_commands(commands) # Get logs of your job from each pod def get_logs(): util.run_commands(["kubectl logs --selector app=azure-pytorch-elastic "]) # Deletes secrets and cluster def delete_resources(): util.delete_resources_util() if __name__ == "__main__": parser = util.argparse.ArgumentParser() subparser = parser.add_subparsers( title="actions", description="setup | configure | run job", dest="command" ) # ---------------------------------- # # SETUP # # ---------------------------------- # parser_setup = subparser.add_parser( "setup", help="set up aks-engine, cluster and other dependencies" ) parser_setup.add_argument( "--dns_prefix", type=str, required=False, default="azure-pytorch-elastic", help="Dns prefix of the app", ) parser_setup.add_argument( "--subscription_id", type=str, required=True, help="Subscription id of the cluster", ) parser_setup.add_argument( "--rg", type=str, required=True, help="Resource group of the cluster" ) parser_setup.add_argument( "--location", type=str, required=True, help="Location of the cluster" ) parser_setup.add_argument( "--client_id", type=str, required=True, help="Service principal client id" ) parser_setup.add_argument( "--client_secret", type=str, required=True, help="Service Principal client secret", ) parser_setup.set_defaults(func=setup) # ---------------------------------- # # CONFIGURE JOB YAML # # ---------------------------------- # parser_configure = subparser.add_parser("configure", help="Generate yaml job file") parser_configure.add_argument("--name", required=True, help="config parameters") parser_configure.add_argument( "--min_size", type=int, required=False, help="minimum number of worker hosts to continue training", ) parser_configure.add_argument( "--max_size", type=int, required=False, help="maximum number of worker hosts to allow scaling out", ) parser_configure.add_argument( "--size", type=int, required=False, help="set size to automatically set min_size = max_size = size", ) parser_configure.add_argument( "--master_vm", type=str, required=False, default="Standard_DS1_v2", help="Azure VM instance for master node", ) parser_configure.add_argument( "--worker_vm", type=str, required=False, default="Standard_NC6s_v3", help="Azure VM instance for woker nodes", ) parser_configure.set_defaults(func=configure) # ---------------------------------- # # UPLOAD STORAGE # # ---------------------------------- # parser_upload_storage = subparser.add_parser( "upload_storage", help="Upload to Azure Blob storage" ) parser_upload_storage.add_argument( "--account_name", type=str, required=True, help="Azure Blob storage Account name", ) parser_upload_storage.add_argument( "--container_name", type=str, required=True, help="Azure Blob storage container name", ) parser_upload_storage.add_argument( "--sas_token", type=str, required=True, help="Azure Blob storage SAS token" ) parser_upload_storage.add_argument( "--source_path", type=str, required=True, help="Path to local files" ) parser_upload_storage.set_defaults(func=upload_storage) # ---------------------------------- # # SETUP SECRETS # # ---------------------------------- # parser_storage_secret = subparser.add_parser( "storage_secret", help="Generate secret for Azure Blob storage" ) parser_storage_secret.add_argument( "--account_name", type=str, required=True, help="Azure Blob storage account name", ) parser_storage_secret.add_argument( "--account_key", type=str, required=True, help="Azure Blob storage account key" ) parser_storage_secret.set_defaults(func=storage_secret) parser_docker_secret = subparser.add_parser( "docker_secret", help="Generate secret for Docker Image" ) parser_docker_secret.add_argument( "--server", type=str, required=True, help="Docker server" ) parser_docker_secret.add_argument( "--username", type=str, required=True, help="Docker username" ) parser_docker_secret.add_argument( "--password", type=str, required=True, help="Docker password" ) parser_docker_secret.add_argument( "--image_name", type=str, required=True, help="Docker Imagename" ) parser_docker_secret.set_defaults(func=docker_secret) # ---------------------------------- # # RUN JOB # # ---------------------------------- # parser_run_job = subparser.add_parser("run_job", help="Run your training job") parser_run_job.set_defaults(func=run_job) # ---------------------------------- # # CHECK STATUS # # ---------------------------------- # parser_check_status = subparser.add_parser( "check_status", help="Check status of your jobs" ) parser_run_job.set_defaults(func=check_status) # ---------------------------------- # # DELETE RESOURCES # # ---------------------------------- # parser_delete_resources = subparser.add_parser( "delete_resources", help="Deletes the kubernetes cluster and all namespaces and secrets", ) parser_delete_resources.set_defaults(func=delete_resources) # ---------------------------------- # # GET LOGS # # ---------------------------------- # parser_get_logs = subparser.add_parser( "get_logs", help="Get logs from all your pods" ) parser_get_logs.set_defaults(func=get_logs) # ---------------------------------- # # SCALE CLUSTER # # ---------------------------------- # parser_scale = subparser.add_parser("scale", help="Scale up/down your cluster") parser_scale.add_argument( "--subscription_id", type=str, required=True, help="Subscription id of the cluster", ) parser_scale.add_argument( "--rg", type=str, required=True, help="Resource group of the cluster" ) parser_scale.add_argument( "--location", type=str, required=True, help="Location of the cluster" ) parser_scale.add_argument( "--client_id", type=str, required=True, help="Service principal client id" ) parser_scale.add_argument( "--client_secret", type=str, required=True, help="Service Principal client secret", ) parser_scale.add_argument( "--new_node_count", type=int, required=True, help="New node count to scale cluster to", ) parser_scale.set_defaults(func=util.scale_cluster) args = parser.parse_args() # ----- # Execution order: Configure --> Setup --> Run # ----- if args.command == "configure": configure(args) elif args.command == "setup": setup(args) elif args.command == "upload_storage": upload_storage(args) elif args.command == "storage_secret": storage_secret(args) elif args.command == "docker_secret": docker_secret(args) elif args.command == "run_job": run_job(args) elif args.command == "check_status": check_status() elif args.command == "delete_resources": delete_resources() elif args.command == "get_logs": get_logs() elif args.command == "scale": scale_cluster(args)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # PyTorch documentation build configuration file, created by # sphinx-quickstart on Fri Dec 23 13:31:47 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import pytorch_sphinx_theme # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) from docutils import nodes from sphinx import addnodes from sphinx.util.docfields import TypedField # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = "1.6" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.viewcode", "sphinxcontrib.katex", "sphinx.ext.autosectionlabel", ] # katex options # # katex_options = r""" delimiters : [ {left: "$$", right: "$$", display: true}, {left: "\\(", right: "\\)", display: false}, {left: "\\[", right: "\\]", display: true} ] """ napoleon_use_ivar = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = [".rst", ".md"] # The master toctree document. master_doc = "index" # General information about the project. project = "PyTorch/Elastic" copyright = "2020, PyTorch Elastic Contributors" author = "PyTorch Elastic Contributors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # TODO: change to [:2] at v1.0 version = "v0.2.3.dev0" # The full version, including alpha/beta/rc tags. # TODO: verify this works as expected release = "master" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "pytorch_sphinx_theme" html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { "pytorch_project": "elastic", "collapse_navigation": False, "display_version": True, "logo_only": True, } html_logo = "_static/img/pytorch-logo-dark.svg" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] def setup(app): # NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value # and can be moved outside of this function (and the setup(app) function # can be deleted). html_css_files = [ "https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" ] # In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is # `add_stylesheet` (deprecated in 1.8). add_css = getattr( app, "add_css_file", getattr(app, "add_stylesheet", None) ) # noqa B009 for css_file in html_css_files: add_css(css_file) # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "TorchElasticdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "pytorch.tex", "Torchelastic Documentation", "Torch Contributors", "manual", ) ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "Torchelastic", "Torchelastic Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "Torchelastic", "Torchelastic Documentation", author, "Torchelastic", "PyTorch Elastic Training", "Miscellaneous", ) ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("https://docs.python.org/", None), "numpy": ("https://docs.scipy.org/doc/numpy/", None), "torch": ("https://pytorch.org/docs/stable/", None), } # -- A patch that prevents Sphinx from cross-referencing ivar tags ------- # See http://stackoverflow.com/a/41184353/3343043 def patched_make_field(self, types, domain, items, **kw): # `kw` catches `env=None` needed for newer sphinx while maintaining # backwards compatibility when passed along further down! def handle_item(fieldarg, content): par = nodes.paragraph() par += addnodes.literal_strong("", fieldarg) # Patch: this line added # par.extend(self.make_xrefs(self.rolename, domain, fieldarg, # addnodes.literal_strong)) if fieldarg in types: par += nodes.Text(" (") # NOTE: using .pop() here to prevent a single type node to be # inserted twice into the doctree, which leads to # inconsistencies later when references are resolved fieldtype = types.pop(fieldarg) if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text): typename = "".join(n.astext() for n in fieldtype) typename = typename.replace("int", "python:int") typename = typename.replace("long", "python:long") typename = typename.replace("float", "python:float") typename = typename.replace("type", "python:type") par.extend( self.make_xrefs( self.typerolename, domain, typename, addnodes.literal_emphasis, **kw, ) ) else: par += fieldtype par += nodes.Text(")") par += nodes.Text(" -- ") par += content return par fieldname = nodes.field_name("", self.label) if len(items) == 1 and self.can_collapse: fieldarg, content = items[0] bodynode = handle_item(fieldarg, content) else: bodynode = self.list_type() for fieldarg, content in items: bodynode += nodes.list_item("", handle_item(fieldarg, content)) fieldbody = nodes.field_body("", bodynode) return nodes.field("", fieldname, fieldbody) TypedField.make_field = patched_make_field
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. """ For each rst file, generates a corresponding rst file that redirects http://pytorch.org/elastic/<version>/<file_name>.html to http://pytorch.org/elastic/latest/<file_name>.html """ import argparse import glob import os import sys import torchelastic def parse_args(args): parser = argparse.ArgumentParser() parser.add_argument( "--source_dir", required=True, help="directory where rst files are" ) parser.add_argument("--build_dir", required=True, help="directory to drop md files") return parser.parse_args(args[1:]) if __name__ == "__main__": args = parse_args(sys.argv) build_ver = torchelastic.__version__ source_dir = args.source_dir build_dir = args.build_dir print(f"Creating redirect files from source_dir: {source_dir} into {build_dir}") for rst_file in glob.glob(os.path.join(source_dir, "**/*.rst"), recursive=True): rst_relative_path = os.path.relpath(rst_file, source_dir) md_relative_path = os.path.splitext(rst_relative_path)[0] + ".md" html_relative_path = os.path.splitext(rst_relative_path)[0] + ".html" md_file = os.path.join(build_dir, md_relative_path) os.makedirs(os.path.dirname(md_file), exist_ok=True) print(f"Creating redirect md for {rst_relative_path} --> {md_file}") with open(md_file, "w") as f: f.write("---\n") f.write("layout: docs_redirect\n") f.write("title: PyTorch | Redirect\n") f.write(f'redirect_url: "/elastic/{build_ver}/{html_relative_path}"\n') f.write("---\n")
#!/usr/bin/env python3 import io import os import pprint import sys import torch.distributed as dist if __name__ == "__main__": env_dict = { k: os.environ[k] for k in ( "LOCAL_RANK", "RANK", "GROUP_RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "TORCHELASTIC_RESTART_COUNT", "TORCHELASTIC_MAX_RESTARTS", ) } with io.StringIO() as buff: print("======================================================", file=buff) print( f"Environment variables set by the agent on PID {os.getpid()}:", file=buff ) pprint.pprint(env_dict, stream=buff) print("======================================================", file=buff) print(buff.getvalue()) sys.stdout.flush() dist.init_process_group(backend="gloo") dist.barrier() print( ( f"On PID {os.getpid()}, after init process group, " f"rank={dist.get_rank()}, world_size = {dist.get_world_size()}\n" ) )
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. r""" Source: `pytorch imagenet example <https://github.com/pytorch/examples/blob/master/imagenet/main.py>`_ # noqa B950 Modified and simplified to make the original pytorch example compatible with torchelastic.distributed.launch. Changes: 1. Removed ``rank``, ``gpu``, ``multiprocessing-distributed``, ``dist_url`` options. These are obsolete parameters when using ``torchelastic.distributed.launch``. 2. Removed ``seed``, ``evaluate``, ``pretrained`` options for simplicity. 3. Removed ``resume``, ``start-epoch`` options. Loads the most recent checkpoint by default. 4. ``batch-size`` is now per GPU (worker) batch size rather than for all GPUs. 5. Defaults ``workers`` (num data loader workers) to ``0``. Usage :: >>> python -m torchelastic.distributed.launch --nnodes=$NUM_NODES --nproc_per_node=$WORKERS_PER_NODE --rdzv_id=$JOB_ID --rdzv_backend=etcd --rdzv_endpoint=$ETCD_HOST:$ETCD_PORT main.py --arch resnet18 --epochs 20 --batch-size 32 <DATA_DIR> """ import argparse import io import os import shutil import time from contextlib import contextmanager from datetime import timedelta from typing import List, Tuple import numpy import torch import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.datasets as datasets import torchvision.models as models import torchvision.transforms as transforms from torch.distributed.elastic.utils.data import ElasticDistributedSampler from torch.nn.parallel import DistributedDataParallel from torch.optim import SGD from torch.utils.data import DataLoader model_names = sorted( name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name]) ) parser = argparse.ArgumentParser(description="PyTorch Elastic ImageNet Training") parser.add_argument("data", metavar="DIR", help="path to dataset") parser.add_argument( "-a", "--arch", metavar="ARCH", default="resnet18", choices=model_names, help="model architecture: " + " | ".join(model_names) + " (default: resnet18)", ) parser.add_argument( "-j", "--workers", default=0, type=int, metavar="N", help="number of data loading workers", ) parser.add_argument( "--epochs", default=90, type=int, metavar="N", help="number of total epochs to run" ) parser.add_argument( "-b", "--batch-size", default=32, type=int, metavar="N", help="mini-batch size (default: 32), per worker (GPU)", ) parser.add_argument( "--lr", "--learning-rate", default=0.1, type=float, metavar="LR", help="initial learning rate", dest="lr", ) parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum") parser.add_argument( "--wd", "--weight-decay", default=1e-4, type=float, metavar="W", help="weight decay (default: 1e-4)", dest="weight_decay", ) parser.add_argument( "-p", "--print-freq", default=10, type=int, metavar="N", help="print frequency (default: 10)", ) parser.add_argument( "--dist-backend", default="nccl", choices=["nccl", "gloo"], type=str, help="distributed backend", ) parser.add_argument( "--checkpoint-file", default="/tmp/checkpoint.pth.tar", type=str, help="checkpoint file path, to load and save to", ) def main(): args = parser.parse_args() device_id = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(device_id) print(f"=> set cuda device = {device_id}") dist.init_process_group( backend=args.dist_backend, init_method="env://", timeout=timedelta(seconds=10) ) model, criterion, optimizer = initialize_model( args.arch, args.lr, args.momentum, args.weight_decay, device_id ) train_loader, val_loader = initialize_data_loader( args.data, args.batch_size, args.workers ) # resume from checkpoint if one exists; state = load_checkpoint( args.checkpoint_file, device_id, args.arch, model, optimizer ) start_epoch = state.epoch + 1 print(f"=> start_epoch: {start_epoch}, best_acc1: {state.best_acc1}") print_freq = args.print_freq for epoch in range(start_epoch, args.epochs): state.epoch = epoch train_loader.batch_sampler.sampler.set_epoch(epoch) adjust_learning_rate(optimizer, epoch, args.lr) # train for one epoch train(train_loader, model, criterion, optimizer, epoch, device_id, print_freq) # evaluate on validation set acc1 = validate(val_loader, model, criterion, device_id, print_freq) # remember best acc@1 and save checkpoint is_best = acc1 > state.best_acc1 state.best_acc1 = max(acc1, state.best_acc1) if device_id == 0: save_checkpoint(state, is_best, args.checkpoint_file) class State: """ Container for objects that we want to checkpoint. Represents the current "state" of the worker. This object is mutable. """ def __init__(self, arch, model, optimizer): self.epoch = -1 self.best_acc1 = 0 self.arch = arch self.model = model self.optimizer = optimizer def capture_snapshot(self): """ Essentially a ``serialize()`` function, returns the state as an object compatible with ``torch.save()``. The following should work :: snapshot = state_0.capture_snapshot() state_1.apply_snapshot(snapshot) assert state_0 == state_1 """ return { "epoch": self.epoch, "best_acc1": self.best_acc1, "arch": self.arch, "state_dict": self.model.state_dict(), "optimizer": self.optimizer.state_dict(), } def apply_snapshot(self, obj, device_id): """ The complimentary function of ``capture_snapshot()``. Applies the snapshot object that was returned by ``capture_snapshot()``. This function mutates this state object. """ self.epoch = obj["epoch"] self.best_acc1 = obj["best_acc1"] self.state_dict = obj["state_dict"] self.model.load_state_dict(obj["state_dict"]) self.optimizer.load_state_dict(obj["optimizer"]) def save(self, f): torch.save(self.capture_snapshot(), f) def load(self, f, device_id): # Map model to be loaded to specified single gpu. snapshot = torch.load(f, map_location=f"cuda:{device_id}") self.apply_snapshot(snapshot, device_id) def initialize_model( arch: str, lr: float, momentum: float, weight_decay: float, device_id: int ): print(f"=> creating model: {arch}") model = models.__dict__[arch]() # For multiprocessing distributed, DistributedDataParallel constructor # should always set the single device scope, otherwise, # DistributedDataParallel will use all available devices. model.cuda(device_id) cudnn.benchmark = True model = DistributedDataParallel(model, device_ids=[device_id]) # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().cuda(device_id) optimizer = SGD( model.parameters(), lr, momentum=momentum, weight_decay=weight_decay ) return model, criterion, optimizer def initialize_data_loader( data_dir, batch_size, num_data_workers ) -> Tuple[DataLoader, DataLoader]: traindir = os.path.join(data_dir, "train") valdir = os.path.join(data_dir, "val") normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) train_dataset = datasets.ImageFolder( traindir, transforms.Compose( [ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ] ), ) train_sampler = ElasticDistributedSampler(train_dataset) train_loader = DataLoader( train_dataset, batch_size=batch_size, num_workers=num_data_workers, pin_memory=True, sampler=train_sampler, ) val_loader = DataLoader( datasets.ImageFolder( valdir, transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ] ), ), batch_size=batch_size, shuffle=False, num_workers=num_data_workers, pin_memory=True, ) return train_loader, val_loader def load_checkpoint( checkpoint_file: str, device_id: int, arch: str, model: DistributedDataParallel, optimizer, # SGD ) -> State: """ Loads a local checkpoint (if any). Otherwise, checks to see if any of the neighbors have a non-zero state. If so, restore the state from the rank that has the most up-to-date checkpoint. .. note:: when your job has access to a globally visible persistent storage (e.g. nfs mount, S3) you can simply have all workers load from the most recent checkpoint from such storage. Since this example is expected to run on vanilla hosts (with no shared storage) the checkpoints are written to local disk, hence we have the extra logic to broadcast the checkpoint from a surviving node. """ state = State(arch, model, optimizer) if os.path.isfile(checkpoint_file): print(f"=> loading checkpoint file: {checkpoint_file}") state.load(checkpoint_file, device_id) print(f"=> loaded checkpoint file: {checkpoint_file}") # logic below is unnecessary when the checkpoint is visible on all nodes! # create a temporary cpu pg to broadcast most up-to-date checkpoint with tmp_process_group(backend="gloo") as pg: rank = dist.get_rank(group=pg) # get rank that has the largest state.epoch epochs = torch.zeros(dist.get_world_size(), dtype=torch.int32) epochs[rank] = state.epoch dist.all_reduce(epochs, op=dist.ReduceOp.SUM, group=pg) t_max_epoch, t_max_rank = torch.max(epochs, dim=0) max_epoch = t_max_epoch.item() max_rank = t_max_rank.item() # max_epoch == -1 means no one has checkpointed return base state if max_epoch == -1: print(f"=> no workers have checkpoints, starting from epoch 0") return state # broadcast the state from max_rank (which has the most up-to-date state) # pickle the snapshot, convert it into a byte-blob tensor # then broadcast it, unpickle it and apply the snapshot print(f"=> using checkpoint from rank: {max_rank}, max_epoch: {max_epoch}") with io.BytesIO() as f: torch.save(state.capture_snapshot(), f) raw_blob = numpy.frombuffer(f.getvalue(), dtype=numpy.uint8) blob_len = torch.tensor(len(raw_blob)) dist.broadcast(blob_len, src=max_rank, group=pg) print(f"=> checkpoint broadcast size is: {blob_len}") if rank != max_rank: # pyre-fixme[6]: For 1st param expected `Union[List[int], Size, # typing.Tuple[int, ...]]` but got `Union[bool, float, int]`. blob = torch.zeros(blob_len.item(), dtype=torch.uint8) else: blob = torch.as_tensor(raw_blob, dtype=torch.uint8) dist.broadcast(blob, src=max_rank, group=pg) print(f"=> done broadcasting checkpoint") if rank != max_rank: with io.BytesIO(blob.numpy()) as f: snapshot = torch.load(f) state.apply_snapshot(snapshot, device_id) # wait till everyone has loaded the checkpoint dist.barrier(group=pg) print(f"=> done restoring from previous checkpoint") return state @contextmanager def tmp_process_group(backend): cpu_pg = dist.new_group(backend=backend) try: yield cpu_pg finally: dist.destroy_process_group(cpu_pg) def save_checkpoint(state: State, is_best: bool, filename: str): checkpoint_dir = os.path.dirname(filename) os.makedirs(checkpoint_dir, exist_ok=True) # save to tmp, then commit by moving the file in case the job # gets interrupted while writing the checkpoint tmp_filename = filename + ".tmp" torch.save(state.capture_snapshot(), tmp_filename) os.rename(tmp_filename, filename) print(f"=> saved checkpoint for epoch {state.epoch} at {filename}") if is_best: best = os.path.join(checkpoint_dir, "model_best.pth.tar") print(f"=> best model found at epoch {state.epoch} saving to {best}") shutil.copyfile(filename, best) def train( train_loader: DataLoader, model: DistributedDataParallel, criterion, # nn.CrossEntropyLoss optimizer, # SGD, epoch: int, device_id: int, print_freq: int, ): batch_time = AverageMeter("Time", ":6.3f") data_time = AverageMeter("Data", ":6.3f") losses = AverageMeter("Loss", ":.4e") top1 = AverageMeter("Acc@1", ":6.2f") top5 = AverageMeter("Acc@5", ":6.2f") progress = ProgressMeter( len(train_loader), [batch_time, data_time, losses, top1, top5], prefix="Epoch: [{}]".format(epoch), ) # switch to train mode model.train() end = time.time() for i, (images, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) images = images.cuda(device_id, non_blocking=True) target = target.cuda(device_id, non_blocking=True) # compute output output = model(images) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % print_freq == 0: progress.display(i) def validate( val_loader: DataLoader, model: DistributedDataParallel, criterion, # nn.CrossEntropyLoss device_id: int, print_freq: int, ): batch_time = AverageMeter("Time", ":6.3f") losses = AverageMeter("Loss", ":.4e") top1 = AverageMeter("Acc@1", ":6.2f") top5 = AverageMeter("Acc@5", ":6.2f") progress = ProgressMeter( len(val_loader), [batch_time, losses, top1, top5], prefix="Test: " ) # switch to evaluate mode model.eval() with torch.no_grad(): end = time.time() for i, (images, target) in enumerate(val_loader): if device_id is not None: images = images.cuda(device_id, non_blocking=True) target = target.cuda(device_id, non_blocking=True) # compute output output = model(images) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % print_freq == 0: progress.display(i) # TODO: this should also be done with the ProgressMeter print( " * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5) ) return top1.avg class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name: str, fmt: str = ":f"): self.name = name self.fmt = fmt self.reset() def reset(self) -> None: self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1) -> None: self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})" return fmtstr.format(**self.__dict__) class ProgressMeter(object): def __init__(self, num_batches: int, meters: List[AverageMeter], prefix: str = ""): self.batch_fmtstr = self._get_batch_fmtstr(num_batches) self.meters = meters self.prefix = prefix def display(self, batch: int) -> None: entries = [self.prefix + self.batch_fmtstr.format(batch)] entries += [str(meter) for meter in self.meters] print("\t".join(entries)) def _get_batch_fmtstr(self, num_batches: int) -> str: num_digits = len(str(num_batches // 1)) fmt = "{:" + str(num_digits) + "d}" return "[" + fmt + "/" + fmt.format(num_batches) + "]" def adjust_learning_rate(optimizer, epoch: int, lr: float) -> None: """ Sets the learning rate to the initial LR decayed by 10 every 30 epochs """ learning_rate = lr * (0.1 ** (epoch // 30)) for param_group in optimizer.param_groups: param_group["lr"] = learning_rate def accuracy(output, target, topk=(1,)): """ Computes the accuracy over the k top predictions for the specified values of k """ with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(1, -1).view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__ == "__main__": main()
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import sys import time def wait_for(msg, timeout: float = 300, interval: int = 1, print_spinner: bool = True): """ for _ in wait_for("asg to provision", timeout_sec, interval_sec): if check_condition():. break """ spin = ["-", "/", "|", "\\", "-", "/", "|", "\\"] idx = 0 start = time.time() max_time = start + timeout while True: if print_spinner: elapsed = time.time() - start print( f"Waiting for {msg}" f" ({elapsed:03.0f}/{timeout:3.0f}s elapsed) {spin[idx]}\r", end="", ) sys.stdout.flush() idx = (idx + 1) % len(spin) if time.time() >= max_time: raise RuntimeError(f"Timed out while waiting for: {msg}") else: time.sleep(interval) yield
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import getpass import logging import os import random import string from jinja2 import Template from util import wait_for log = logging.getLogger(__name__) class CloudFormation: def __init__(self, session): self._session = session self._cfn = session.client("cloudformation") def create_specs_file(self, specs_file, s3_bucket_name, efs_id): username = getpass.getuser() rand = "".join(random.choices(string.ascii_uppercase + string.digits, k=5)) hash = f"{username}-{rand}" stack_name = f"torchelastic-{hash}" this_dir = os.path.dirname(__file__) cfn_template = os.path.join(this_dir, "cfn/setup.yml") sample_specs = os.path.join(this_dir, "config/sample_specs.json") params = { "WorkerRoleName": f"torchelastic_worker_role-{hash}", "RendezvousRoleName": f"torchelastic_rendezvous_role-{hash}", } if s3_bucket_name: params["S3BucketName"] = s3_bucket_name if efs_id: params["EFSFileSystemId"] = efs_id self.create_stack(stack_name, cfn_template, **params) for _ in wait_for( f"cfn stack: {stack_name} to create", timeout=600, interval=2 ): status, outputs = self.describe_stack(stack_name) if status == "CREATE_COMPLETE": break elif status == "CREATE_FAILED" or status.startswith("ROLLBACK_"): # when stack creation fails cfn starts rolling the stack back raise RuntimeError( f"Error creating stack {stack_name}, status = {status}" ) outputs["User"] = username log.info(f"Writing specs file to: {specs_file}") with open(sample_specs) as f: specs_template = Template(f.read()) specs_template.stream(**outputs).dump(specs_file) def describe_stack(self, stack_name): describe_res = self._cfn.describe_stacks(StackName=stack_name) stacks = describe_res["Stacks"] if len(stacks) > 1: raise RuntimeError(f"Found more than one stack with name {stack_name}") stack_desc = stacks[0] status = stack_desc["StackStatus"] # cfn outputs an array of maps, each element in the array is # a single output of the form "{OutputKey: <key>, OutputValue: <value>}" # simplify to a map of <key>, <value> pairs outputs = {} if "Outputs" in stack_desc: for cfn_output in stack_desc["Outputs"]: key = cfn_output["OutputKey"] value = cfn_output["OutputValue"] outputs[key] = value return status, outputs def create_stack(self, stack_name, cfn_template, **params): log.info(f"Creating cloudformation stack with template: {cfn_template}") with open(cfn_template) as f: template_body = f.read() cfn_parameters = [] for key, value in params.items(): cfn_parameters.append({"ParameterKey": key, "ParameterValue": value}) res = self._cfn.create_stack( StackName=stack_name, TemplateBody=template_body, Capabilities=["CAPABILITY_NAMED_IAM"], Parameters=cfn_parameters, ) return res["StackId"]
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import argparse import getpass import json import logging import os import sys from os.path import expanduser from urllib.parse import urlparse import auth from autoscaling import AutoScalingGroup from cloudformation import CloudFormation from s3 import S3 log = logging.getLogger(__name__) PETCTL_DIR = os.path.join(expanduser("~"), ".petctl") PETCTL_CONFIG_FILE = os.path.join(PETCTL_DIR, "config") SPECS_FILE = os.path.join(PETCTL_DIR, "specs.json") def split_args(args, delimiter="--"): if delimiter in args: idx = args.index(delimiter) if idx == (len(args) - 1): return args, [] else: return args[0:idx], args[idx + 1 :] else: return args, [] def parse_arguments(args, **default_args): parser = argparse.ArgumentParser() parser.add_argument( "--specs_file", help="see https://github.com/pytorch/elastic/blob/master/aws/README.md#create-specs-file", # noqa B950 ) parser.set_defaults(**default_args) subparser = parser.add_subparsers( title="actions", description="run_job | kill_job", dest="command" ) # ----------------------------------------- # Run Job # ----------------------------------------- parser_run_job = subparser.add_parser( "run_job", help="runs a torchelastic job on asg" ) parser_run_job.add_argument("--name", required=True, help="name of the job") parser_run_job.add_argument( "--min_size", type=int, required=False, help="minimum number of worker hosts to continue training", ) parser_run_job.add_argument( "--max_size", type=int, required=False, help="maximum number of worker hosts to allow scaling out", ) parser_run_job.add_argument( "--size", type=int, required=True, help="number of worker hosts to start the job with", ) parser_run_job.add_argument( "--instance_type", required=False, help="Instance type to run the job on" ) parser_run_job.add_argument( dest="script_path", help="script or script dir path (e.g. ~/script.py, s3://..., docker://)", ) parser_run_job.set_defaults(func=run_job) # ----------------------------------------- # Kill Job # ----------------------------------------- parser_kill_job = subparser.add_parser( "kill_job", help="kills a torchelastic job on asg" ) parser_kill_job.add_argument(dest="job_name", help="name of the job to kill") parser_kill_job.set_defaults(func=kill_job) # ----------------------------------------- # List hosts in job # ----------------------------------------- parser_list_hosts = subparser.add_parser( "list_hosts", help="lists InService hosts in the job" ) parser_list_hosts.add_argument( dest="job_name", help="name of the job to list the hosts for" ) parser_list_hosts.set_defaults(func=list_hosts) # ----------------------------------------- # Upload script # ----------------------------------------- parser_upload = subparser.add_parser("upload", help="uploads the file/dir to s3") parser_upload.add_argument( dest="script_path", help="script or script dir path (e.g. ~/script.py, s3://..., docker://)", ) parser_upload.add_argument( dest="s3_dest", help="s3 destination (default: s3://{s3_bucket}/{s3_prefix}/{USER}/scripts)", ) parser_upload.set_defaults(func=upload_script) # ----------------------------------------- # Configure # ----------------------------------------- subparser.add_parser("configure", help="configures petctl") # ----------------------------------------- # Setup # ----------------------------------------- parser_setup = subparser.add_parser( "setup", help="creates necessary aws resources and outputs a specs file" ) parser_setup.add_argument( "--region", default="us-west-2", help="aws region to setup on" ) parser_setup.add_argument( "--s3_bucket", help="s3 bucket to use for running petctl (if empty, one is created)", ) parser_setup.add_argument( "--efs_id", help="efs id to use, if empty, one is created" ) petctl_args, script_args = split_args(args[1:]) parsed = parser.parse_args(petctl_args) parsed.script_args = script_args return parsed def load_specs_json(file): log.info(f"Loading launch specs from: {args.specs_file}") with open(file) as f: return json.load(f) def run_job(session, specs_json, args): job_name = args.name script_args = args.script_args rdzv_specs = specs_json["rdzv"] worker_specs = specs_json["worker"] script_url = urlparse(args.script_path) scheme = script_url.scheme if scheme == "docker": # docker://tmp/script.py -> tmp/script.py (relative to working dir in docker) # docker:///tmp/script.py -> /tmp/script.py (absolute path in docker) script = script_url.netloc + script_url.path elif scheme == "s3": # fetch_and_run supports s3:// so just pass through script = args.script_path else: s3_bucket = worker_specs["s3_bucket"] s3_prefix = worker_specs["s3_prefix"] script = S3(session).cp(args.script_path, s3_bucket, f"{s3_prefix}/{job_name}") asg = AutoScalingGroup(session) rdzv_asg_name = f"{job_name}_rdzv" worker_asg_name = f"{job_name}_worker" # create a single node asg to host the etcd server for rendezvous etcd_server_hostname = asg.create_asg_sync(rdzv_asg_name, size=1, **rdzv_specs)[0] rdzv_endpoint = f"{etcd_server_hostname}:2379" # allow overriding instance types from cli if args.instance_type: worker_specs["instance_type"] = args.instance_type worker_specs["rdzv_endpoint"] = rdzv_endpoint worker_specs["job_name"] = job_name worker_specs["script"] = script worker_specs["args"] = " ".join(script_args) worker_specs["user"] = getpass.getuser() instance_type = worker_specs["instance_type"] script_args_str = worker_specs["args"] log.info( f"\n------------------------------------------------------------------\n" f"Starting job...\n" f" job name : {job_name}\n" f" instance type: {instance_type}\n" f" size : {args.size} (min={args.min_size}, max={args.max_size})\n" f" rdzv endpoint: {rdzv_endpoint}\n" f" cmd : {script}\n" f" cmd args : {script_args_str}\n" f"------------------------------------------------------------------\n" ) asg.create_asg( worker_asg_name, args.size, args.min_size, args.max_size, **worker_specs ) def kill_job(session, specs_json, args): job_name = args.job_name log.info(f"Killing job {job_name}") asg = AutoScalingGroup(session) asg.delete_asg(f"{job_name}_rdzv") asg.delete_asg(f"{job_name}_worker") def upload_script(session, specs_json, args): script_path = args.script_path s3_dest = args.s3_dest if not s3_dest: s3_bucket = specs_json["s3_bucket"] s3_prefix = os.path.join(specs_json["s3_prefix"], getpass.getuser()) else: s3_bucket = urlparse(s3_dest).netloc s3_prefix = urlparse(s3_dest).path.strip("/") log.info(f"Uploading: {script_path} to s3://{s3_bucket}/{s3_prefix}") s3 = S3(session) url = s3.cp(script_path, s3_bucket, s3_prefix) log.info(f"Finished uploading to: {url}") def list_hosts(session, specs_json, args): job_name = args.job_name asg = AutoScalingGroup(session) asgs = [f"{job_name}_rdzv", f"{job_name}_worker"] hosts = {} for asg_name in asgs: instance_ids, hostnames = asg.list_hostnames(asg_name) hosts[asg_name] = zip(instance_ids, hostnames) print(f"\n--------------------------------------------------------------") for asg_name in hosts: print(f"Hosts in {asg_name}:") for i, host in enumerate(hosts[asg_name], start=1): instance_id = host[0] public_dns = host[1] print(f" {i}) {instance_id} ({public_dns})") print(f"--------------------------------------------------------------") print("To SSH run:") print(f"\taws ssm start-session --target <instance_id>") print(f"--------------------------------------------------------------") def configure(args): """ Configures petctl. Writes a simple json config file indicating the specs file to use and the aws region to the petctl config directory (default ~/.petctl). Prompts the user to input the specs file location and aws region. """ while True: specs_file = input( "Absolute path to specs file (e.g. /home/${USER}/specs.json): " ) if os.path.isfile(specs_file): break print(f"[{specs_file}] does not exist! Provide an existing path") while True: region = input("Default aws region to use (e.g. us-west-2): ") if region: break print("AWS region cannot be empty!") write_config_file(region, specs_file) log.info(f"Configuration complete. petctl config file: {PETCTL_CONFIG_FILE}") def setup(args): """ Similar to config but creates AWS resources using cfn template and based on the cfn stack output, creates the specs file for the user, then writes petctl config. """ region = args.region s3_bucket_name = args.s3_bucket efs_id = args.efs_id os.makedirs(PETCTL_DIR, exist_ok=True) session = auth.get_session(region) cfn = CloudFormation(session) cfn.create_specs_file(SPECS_FILE, s3_bucket_name, efs_id) write_config_file(region, SPECS_FILE) log.info(f"Setup complete. petctl config file: {PETCTL_CONFIG_FILE}") def write_config_file(region, specs_file): petctl_config = {"specs_file": specs_file, "region": region} os.makedirs(PETCTL_DIR, exist_ok=True) with open(PETCTL_CONFIG_FILE, "w+") as f: json.dump(petctl_config, f, indent=4) def load_configuration(): if os.path.isfile(PETCTL_CONFIG_FILE): with open(PETCTL_CONFIG_FILE) as f: return json.load(f) else: return {} if __name__ == "__main__": logging.basicConfig( level=logging.INFO, format="[%(levelname)s] %(asctime)s %(module)s: %(message)s" ) petctl_configs = load_configuration() args = parse_arguments(sys.argv, **petctl_configs) if args.command == "setup": args = parse_arguments(sys.argv) setup(args) elif args.command == "configure": configure(args) else: log.info( f"{PETCTL_CONFIG_FILE} not found or is empty," f" consider running: petctl setup|configure" ) region = args.region specs_json = load_specs_json(args.specs_file) session = auth.get_session(region) args.func(session, specs_json, args)
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import logging import os import shutil import tarfile as tar import tempfile log = logging.getLogger(__name__) class S3: def __init__(self, session): self._session = session self._s3 = session.client("s3") def cp(self, target_path, bucket, key): """ Uploads target_path to s3://bucket/key. If the target_path is a file then uploads to s3://bucket/key/file_name, if the target_path is a directory, then a tarball is created with the contents of target_path and uploaded to s3://bucket/key/dir_name.tar.gz. The tar is created as if created by running the command: cd target_path && tar xzf /tmp/$(basename target_path).tar.gz * Returns the destination s3 url """ target_basename = os.path.basename(target_path) if os.path.isdir(target_path): tmpdir = tempfile.mkdtemp(prefix="petctl_") tar_basename = f"{target_basename}.tar.gz" tar_file = os.path.join(tmpdir, tar_basename) log.info(f"Compressing {target_path} into {tar_basename}") with tar.open(tar_file, "x:gz") as f: f.add(target_path, arcname="", recursive=True) dest_key = f"{key}/{tar_basename}" target_file = tar_file else: tmpdir = None dest_key = f"{key}/{target_basename}" target_file = target_path log.info(f"Uploading {target_file} to s3://{bucket}/{dest_key}") self._s3.upload_file(target_file, bucket, dest_key) if tmpdir: log.info(f"Deleting tmp dir: {tmpdir}") shutil.rmtree(tmpdir) return f"s3://{bucket}/{dest_key}"
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import logging import os from enum import Enum, unique from jinja2 import Template from util import wait_for log = logging.getLogger(__name__) @unique class Accelerator(Enum): NONE = 0 GPU = 1 @classmethod def get_accelerator(cls, instance_type): """ get_accelerator("p3.2xlarge") returns Accelerator.GPU get_accelerator("i3.xlarge") returns Accelerator.NONE """ instance_accelerators = { "g2": Accelerator.GPU, "g3": Accelerator.GPU, "g4": Accelerator.GPU, "p2": Accelerator.GPU, "p3": Accelerator.GPU, } instance_family = instance_type[0:2] return instance_accelerators.get(instance_family, Accelerator.NONE) @classmethod def from_str(cls, accelerator_str): """ returns the enum Accelerator value from a string representation """ accelerators = {"none": Accelerator.NONE, "gpu": Accelerator.GPU} return accelerators.get(accelerator_str.lower(), Accelerator.NONE) def describe(self): """ Returns a string representation of the enum. This method is intended to be used to label certain AWS resources in their descriptions/names for informative purposes e.g. launch template created for GPUs can be named as: torchelastic_gpu """ string_rep = {Accelerator.NONE.value(): "cpu", Accelerator.GPU.value(): "gpu"} return string_rep.get(self, "unknown_accelerator") class AutoScalingGroup: def __init__(self, session): self._session = session self._asg = session.client("autoscaling") self._ec2 = session.client("ec2") def get_user_data(self, user_data_template, **kwargs): if os.path.isabs(user_data_template): user_data_path = user_data_template else: user_data_path = os.path.join(os.path.dirname(__file__), user_data_template) with open(user_data_path) as f: user_data_template = Template(f.read()) user_data = user_data_template.render(**kwargs) return user_data def get_ami_id(self, accelerator): """ Use EKS optimized AMI since it has everything we need pre-installed """ eks_owner_id = "602401143452" eks_amis = { Accelerator.NONE: "amazon-eks-node-1.14-v20190927", Accelerator.GPU: "amazon-eks-gpu-node-1.14-v20190927", } res = self._ec2.describe_images( Filters=[ {"Name": "owner-id", "Values": [eks_owner_id]}, { "Name": "name", "Values": [eks_amis.get(accelerator, Accelerator.NONE)], }, ] ) images = res["Images"] assert ( len(images) == 1 ), f"Multiple EKS AMIs found for {self._session.aws_region()}" return images[0]["ImageId"] def create_launch_config( self, name, instance_type, instance_role, user_data_template, security_groups=None, accelerator="gpu", max_spot_price=None, ebs_volume_gb=128, **user_data_kwargs, ): req = { "LaunchConfigurationName": name, "InstanceType": instance_type, "IamInstanceProfile": instance_role, "ImageId": self.get_ami_id(Accelerator.from_str(accelerator)), "SecurityGroups": security_groups, "AssociatePublicIpAddress": True, "UserData": self.get_user_data(user_data_template, **user_data_kwargs), "BlockDeviceMappings": [ { "DeviceName": "/dev/xvda", "Ebs": { "VolumeSize": ebs_volume_gb, "VolumeType": "gp2", "DeleteOnTermination": True, }, } ], } if max_spot_price: req["SpotMaxPrice"] = str(max_spot_price) log.info(f"Creating launch config: {name}") self._asg.create_launch_configuration(**req) def describe_launch_config(self, name): res = self._asg.describe_launch_configurations(LaunchConfigurationNames=[name]) lcs = res["LaunchConfigurations"] return lcs[0] if len(lcs) == 1 else None def delete_launch_config(self, name): if self.describe_launch_config(name): log.info(f"Deleting asg launch config: {name}") self._asg.delete_launch_configuration(LaunchConfigurationName=name) def create_asg(self, name, size, min_size=None, max_size=None, **kwargs): """ Creates an asg. For specifications on kwargs see config/sample_specs.json """ if not min_size: min_size = size if not max_size: max_size = size assert min_size <= size <= max_size kwargs["size"] = size kwargs["min_size"] = min_size kwargs["max_size"] = max_size self.create_launch_config(name, **kwargs) log.info(f"Creating autoscaling group: {name}") self._asg.create_auto_scaling_group( AutoScalingGroupName=name, LaunchConfigurationName=name, VPCZoneIdentifier=",".join(kwargs["subnets"]), MinSize=min_size, MaxSize=max_size, DesiredCapacity=size, ) def create_asg_sync(self, name, size, min_size=None, max_size=None, **kwargs): self.create_asg(name, size, min_size, max_size, **kwargs) _, hostnames = self.get_hostnames(name, size) return hostnames def describe_asg(self, name): res = self._asg.describe_auto_scaling_groups(AutoScalingGroupNames=[name]) asgs = res["AutoScalingGroups"] num_asgs = len(asgs) return asgs[0] if num_asgs == 1 else None def delete_asg(self, name): if self.describe_asg(name): log.info(f"Deleting autoscaling group: {name}") self._asg.delete_auto_scaling_group( AutoScalingGroupName=name, ForceDelete=True ) for _ in wait_for(f"instances in {name} to terminate"): if not self.describe_asg(name): log.info(f"Deleted autoscaling group: {name}") break # launch config needs to be deleted after asg self.delete_launch_config(name) def list_hostnames(self, name): return self.get_hostnames(name, 1) def get_hostnames(self, name, size): """ Waits until the asg has at least <size> instances in "InService" state and returns their public dns names. """ for _ in wait_for(f"autoscaling group: {name} to reach size >= {size}"): asg_desc = self.describe_asg(name) if not asg_desc: return [] else: instances = asg_desc["Instances"] ready_instance_ids = [ e["InstanceId"] for e in instances if e["LifecycleState"] == "InService" ] if len(ready_instance_ids) >= size: paginator = self._ec2.get_paginator("describe_instances") hostnames = [] instance_ids = [] for e in paginator.paginate(InstanceIds=ready_instance_ids): for r in e["Reservations"]: for i in r["Instances"]: hostnames.append(i["PublicDnsName"]) instance_ids.append(i["InstanceId"]) return instance_ids, hostnames
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import abc import boto3 class AwsSessionProvider: """ Provides AWS credentials in the form of boto3 Session. This class may be sub-classed to provide custom methods of getting aws_access_key_id and aws_secret_access_key. Child classes are expected to provide overriding implementations of the three `_get_*` methods below. When used directly, it follows the default credential lookup chain as documented in: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html """ def get_session(self, region=None) -> boto3.Session: access_key = self._get_access_key() secret_key = self._get_secret_key() session_token = self._get_session_token() # either both access and secret keys are None # or both are not None; just check one to assume # the presence of the other if access_key is None: return boto3.session.Session() else: return boto3.session.Session( aws_access_key_id=access_key, aws_secret_access_key=secret_key, aws_session_token=session_token, region_name=region, ) def _get_access_key(self): """ Returns the aws_access_key_id. Override when sub-classing. """ return None def _get_secret_key(self): """ Returns the aws_secret_access_key. Override when sub-classing. """ return None def _get_session_token(self): """ Returns the aws_session_token. Override when sub-classing. """ return None
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from .session import AwsSessionProvider def get_session(region): return AwsSessionProvider().get_session(region) try: from .static_init import * # noqa: F401 F403 except ModuleNotFoundError: pass
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
#!/usr/bin/env/python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torch.distributed.launcher.api import ( # noqa F401 elastic_launch, launch_agent, LaunchConfig, )
#!/usr/bin/env/python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os os.environ["LOGLEVEL"] = "INFO" # Since logger initialized during imoprt statement # the log level should be set first from torch.distributed.run import main as run_main def main(args=None) -> None: run_main(args) if __name__ == "__main__": main()
from subprocess import check_output, STDOUT, CalledProcessError import sys import pytest import glob PYTHON_CODE_DIR = "python_code" ALL_FILES = glob.glob(PYTHON_CODE_DIR + "/*.py") @pytest.mark.parametrize('file_path', ALL_FILES) def test_run_file(file_path): if 'nvidia' in file_path: # FIXME: NVIDIA models checkoints are on cuda pytest.skip("temporarily disabled") if 'pytorch_fairseq_translation' in file_path: pytest.skip("temporarily disabled") if 'ultralytics_yolov5' in file_path: # FIXME torch.nn.modules.module.ModuleAttributeError: 'autoShape' object has no attribute 'fuse pytest.skip("temporarily disabled") if 'huggingface_pytorch-transformers' in file_path: # FIXME torch.nn.modules.module.ModuleAttributeError: 'autoShape' object has no attribute 'fuse pytest.skip("temporarily disabled") if 'pytorch_fairseq_roberta' in file_path: pytest.skip("temporarily disabled") # We just run the python files in a separate sub-process. We really want a # subprocess here because otherwise we might run into package versions # issues: imagine script A that needs torchvivion 0.9 and script B that # needs torchvision 0.10. If script A is run prior to script B in the same # process, script B will still be run with torchvision 0.9 because the only # "import torchvision" statement that counts is the first one, and even # torchub sys.path shenanigans can do nothing about this. By creating # subprocesses we're sure that all file executions are fully independent. try: # This is inspired (and heavily simplified) from # https://github.com/cloudpipe/cloudpickle/blob/343da119685f622da2d1658ef7b3e2516a01817f/tests/testutils.py#L177 out = check_output([sys.executable, file_path], stderr=STDOUT) print(out.decode()) except CalledProcessError as e: raise RuntimeError(f"Script {file_path} errored with output:\n{e.output.decode()}")
valid_tags = ['vision', 'nlp', 'generative', 'audio', 'scriptable', ]
import argparse import os import glob from urllib.request import urlopen, HTTPError from tags import valid_tags import yaml import mistune class ValidMD: def __init__(self, filename): self.filename = filename self.required_user_fields = ['title', 'summary', 'image', 'author', 'tags', 'github-link', 'category'] self.optional_image_fields = ['featured_image_1', 'featured_image_2'] self.valid_tags = valid_tags self.valid_categories = ['researchers', 'developers'] self.required_sections = ['Model Description'] self.optional_demo_link = ['demo-model-link'] def validate_tags(self, tags): ''' Only allow tags in pre-defined set ''' for t in tags: if t not in self.valid_tags: raise ValueError( 'Tag {} is not valid in {}. Valid tag set is {}' .format(t, self.filename, self.valid_tags)) def validate_category(self, category): ''' Only allow categories in predefined set ''' if category not in self.valid_categories: raise ValueError( 'Category {} is not valid in {}. Choose from {}' .format(category, self.filename, self.valid_categories)) def validate_link(self, link): ''' Make sure the github repo exists ''' try: urlopen(link) except HTTPError: raise ValueError('{} is not valid url in {}' .format(link, self.filename)) def validate_image(self, image_name): ''' Make sure reference image exists in images/ ''' images = [os.path.basename(i) for i in glob.glob('images/*')]\ + ['pytorch-logo.png', 'no-image'] if image_name not in images: raise ValueError('Image {} referenced in {} not found in images/' .format(image_name, self.filename)) def validate_header(self, header): ''' Make sure the header is in the required format ''' assert header['layout'] == 'hub_detail' assert header['background-class'] == 'hub-background' assert header['body-class'] == 'hub' for field in self.required_user_fields: header[field] # assert that it exists self.validate_tags(header['tags']) self.validate_link(header['github-link']) self.validate_image(header['image']) self.validate_category(header['category']) for field in self.optional_demo_link: if field in header.keys(): self.validate_link(header[field]) for field in self.optional_image_fields: if field in header.keys(): self.validate_image(header[field]) for k in header.keys(): if not k.endswith('-link'): self.no_extra_colon(k, header[k]) def no_extra_colon(self, field, value): # Jekyll doesn't build with extra colon in these fields if ':' in str(value): raise ValueError('Remove extra \':\' in field {} with value {} in file {}' .format(field, value, self.filename)) def validate_markdown(self, markdown): m = mistune.create_markdown(renderer=mistune.AstRenderer()) for block in m(markdown): if block['type'] == 'heading': # we dont want colon after section names text_children = [c for c in block['children'] if c['type'] == 'text'] for c in text_children: assert not c['text'].endswith(':') if c['text'] in self.required_sections: self.required_sections.remove(c['text']) try: assert len(self.required_sections) == 0 except AssertionError as e: print("Missing required sections: {}".format(self.required_sections)) raise e def check_markdown_file(self): print('Checking {}...'.format(self.filename)) # separate header and markdown. # Then, check header and markdown separately header = [] markdown = [] header_read = False with open(self.filename, 'r') as f: for line in f: if line.startswith('---'): header_read = not header_read continue if header_read == True: header += [line] else: markdown += [line] # checks that it's valid yamp header = yaml.safe_load(''.join(header)) assert header, "Failed to parse a valid yaml header" self.validate_header(header) # check markdown markdown = "".join(markdown) self.validate_markdown(markdown) def sanity_check(): for f in glob.glob('*.md'): # Skip documentation if f in ('README.md', 'CONTRIBUTING.md', 'CODE_OF_CONDUCT.md'): continue ValidMD(f).check_markdown_file() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-f', '--file', default=None, help='filename') args = parser.parse_args() if args.file: ValidMD(args.file).check_markdown_file() else: sanity_check()
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from elf.options import auto_import_options, PyOptionSpec from rlpytorch import Model from elfgames.go.multiple_prediction import MultiplePrediction class Model_Policy(Model): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addBoolOption( 'bn', 'toggles batch norm', True) spec.addBoolOption( 'leaky_relu', 'toggles leaky ReLU', True) spec.addIntOption( 'num_layer', 'number of layers', 39) spec.addIntOption( 'dim', 'model dimension', 128) return spec @auto_import_options def __init__(self, option_map, params): super().__init__(option_map, params) self.board_size = params["board_size"] self.num_future_actions = params["num_future_actions"] self.num_planes = params["num_planes"] # print("#future_action: " + str(self.num_future_actions)) # print("#num_planes: " + str(self.num_planes)) # Simple method. multiple conv layers. self.convs = [] self.convs_bn = [] last_planes = self.num_planes for i in range(self.options.num_layer): conv = nn.Conv2d(last_planes, self.options.dim, 3, padding=1) conv_bn = (nn.BatchNorm2d(self.options.dim) if self.options.bn else lambda x: x) setattr(self, "conv" + str(i), conv) self.convs.append(conv) setattr(self, "conv_bn" + str(i), conv_bn) self.convs_bn.append(conv_bn) last_planes = self.options.dim self.final_conv = nn.Conv2d( self.options.dim, self.num_future_actions, 3, padding=1) # Softmax as the final layer self.softmax = nn.Softmax(dim=1) self.relu = nn.LeakyReLU(0.1) if self.options.leaky_relu else nn.ReLU() def forward(self, x): s = self._var(x["s"]) for conv, conv_bn in zip(self.convs, self.convs_bn): s = conv_bn(self.relu(conv(s))) output = self.final_conv(s) pis = [] d = self.board_size * self.board_size for i in range(self.num_future_actions): pis.append(self.softmax(output[:, i].contiguous().view(-1, d))) return dict(pis=pis, pi=pis[0]) # Format: key, [model, method] Models = { "df_policy": [Model_Policy, MultiplePrediction] }
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from elf import GCWrapper, ContextArgs, MoreLabels from elf.options import auto_import_options, PyOptionSpec import _elfgames_go_inference as go # from server_addrs import addrs class Loader(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addStrOption( 'preload_sgf', 'TODO: fill this help message in', '') spec.addIntOption( 'preload_sgf_move_to', 'TODO: fill this help message in', -1) spec.addStrOption( 'mode', 'TODO: fill this help message in', "online") spec.addBoolOption( 'actor_only', 'TODO: fill this help message in', False) spec.addIntOption( 'num_reset_ranking', 'TODO: fill this help message in', 5000) spec.addBoolOption( 'verbose', 'TODO: fill this help message in', False) spec.addBoolOption( 'print_result', 'TODO: fill this help message in', False) spec.addIntOption( 'data_aug', 'specify data augumentation, 0-7, -1 mean random', -1) spec.addIntOption( 'num_games_per_thread', ('For offline mode, it is the number of concurrent games per ' 'thread, used to increase diversity of games; for selfplay mode, ' 'it is the number of games played at each thread, and after that ' 'we need to call restartAllGames() to resume.'), -1) spec.addIntOption( 'num_future_actions', 'TODO: fill this help message in', 1) spec.addIntOption( 'move_cutoff', 'Cutoff ply in replay', -1) spec.addStrOption( 'mode', 'TODO: fill this help message in', 'online') spec.addBoolOption( 'black_use_policy_network_only', 'TODO: fill this help message in', False) spec.addIntOption( 'ply_pass_enabled', 'TODO: fill this help message in', 0) spec.addBoolOption( 'use_mcts', 'TODO: fill this help message in', False) spec.addBoolOption( 'use_df_feature', 'TODO: fill this help message in', False) spec.addStrOption( 'dump_record_prefix', 'TODO: fill this help message in', '') spec.addFloatOption( 'resign_thres', 'TODO: fill this help message in', 0.0) spec.addBoolOption( 'following_pass', 'TODO: fill this help message in', False) spec.addIntOption( 'gpu', 'TODO: fill this help message in', -1) spec.addBoolOption( 'parameter_print', 'TODO: fill this help message in', True) spec.addIntOption( 'batchsize', 'batch size', 128) spec.addIntOption( 'batchsize2', 'batch size', -1) spec.addFloatOption( 'eval_winrate_thres', 'Win rate threshold for evalution', 0.55) spec.addIntOption( 'suicide_after_n_games', 'return after n games have finished, -1 means it never ends', -1) spec.merge(PyOptionSpec.fromClasses((ContextArgs, MoreLabels))) return spec @auto_import_options def __init__(self, option_map): self.context_args = ContextArgs(option_map) self.more_labels = MoreLabels(option_map) def _set_params(self): co = go.ContextOptions() self.context_args.initialize(co) co.job_id = os.environ.get("job_id", "local") if self.options.parameter_print: co.print() opt = go.GameOptions() opt.seed = 0 opt.mode = self.options.mode opt.use_mcts = self.options.use_mcts opt.use_df_feature = self.options.use_df_feature opt.dump_record_prefix = self.options.dump_record_prefix opt.verbose = self.options.verbose opt.black_use_policy_network_only = \ self.options.black_use_policy_network_only opt.data_aug = self.options.data_aug opt.ply_pass_enabled = self.options.ply_pass_enabled opt.num_reset_ranking = self.options.num_reset_ranking opt.move_cutoff = self.options.move_cutoff opt.num_games_per_thread = self.options.num_games_per_thread opt.following_pass = self.options.following_pass opt.resign_thres = self.options.resign_thres opt.preload_sgf = self.options.preload_sgf opt.preload_sgf_move_to = self.options.preload_sgf_move_to opt.print_result = self.options.print_result self.max_batchsize = max( self.options.batchsize, self.options.batchsize2) \ if self.options.batchsize2 > 0 \ else self.options.batchsize co.batchsize = self.max_batchsize GC = go.GameContext(co, opt) if self.options.parameter_print: print("**** Options ****") print(opt.info()) print("*****************") print("Version: ", GC.ctx().version()) return co, GC, opt def initialize(self): co, GC, opt = self._set_params() params = GC.getParams() if self.options.parameter_print: print("Mode: ", opt.mode) print("Num Actions: ", params["num_action"]) desc = {} if self.options.mode == "online": desc["human_actor"] = dict( input=["s"], reply=["pi", "a", "V"], batchsize=1, ) # Used for MCTS/Direct play. desc["actor_black"] = dict( input=["s"], reply=["pi", "V", "a", "rv"], timeout_usec=10, batchsize=co.mcts_options.num_rollouts_per_batch ) else: raise "No such mode: " + self.options.mode params.update(dict( num_group=1 if self.options.actor_only else 2, T=self.options.T, )) self.more_labels.add_labels(desc) return GCWrapper( GC, self.max_batchsize, desc, num_recv=2, gpu=(self.options.gpu if (self.options.gpu is not None and self.options.gpu >= 0) else None), use_numpy=False, params=params, verbose=self.options.parameter_print)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from elf import GCWrapper, ContextArgs, MoreLabels from elf.options import auto_import_options, PyOptionSpec import _elfgames_go as go from server_addrs import addrs class Loader(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addStrOption( 'preload_sgf', 'TODO: fill this help message in', '') spec.addIntOption( 'preload_sgf_move_to', 'TODO: fill this help message in', -1) spec.addBoolOption( 'actor_only', 'TODO: fill this help message in', False) spec.addStrListOption( 'list_files', 'Provide a list of json files for offline training', []) spec.addIntOption( 'port', 'TODO: fill this help message in', 5556) spec.addStrOption( 'server_addr', 'TODO: fill this help message in', '') spec.addStrOption( 'server_id', 'TODO: fill this help message in', '') spec.addIntOption( 'q_min_size', 'TODO: fill this help message in', 10) spec.addIntOption( 'q_max_size', 'TODO: fill this help message in', 1000) spec.addIntOption( 'num_reader', 'TODO: fill this help message in', 50) spec.addIntOption( 'num_reset_ranking', 'TODO: fill this help message in', 5000) spec.addIntOption( 'client_max_delay_sec', 'Maximum amount of allowed delays in sec. If the client ' 'didn\'t respond after that, we think it is dead.', 1200) spec.addBoolOption( 'verbose', 'TODO: fill this help message in', False) spec.addBoolOption( 'keep_prev_selfplay', 'TODO: fill this help message in', False) spec.addBoolOption( 'print_result', 'TODO: fill this help message in', False) spec.addIntOption( 'data_aug', 'specify data augumentation, 0-7, -1 mean random', -1) spec.addIntOption( 'ratio_pre_moves', ('how many moves to perform in each thread, before we use the ' 'data to train the model'), 0) spec.addFloatOption( 'start_ratio_pre_moves', ('how many moves to perform in each thread, before we use the ' 'first sgf file to train the model'), 0.5) spec.addIntOption( 'num_games_per_thread', ('For offline mode, it is the number of concurrent games per ' 'thread, used to increase diversity of games; for selfplay mode, ' 'it is the number of games played at each thread, and after that ' 'we need to call restartAllGames() to resume.'), -1) spec.addIntOption( 'expected_num_clients', 'Expected number of clients', -1 ) spec.addIntOption( 'num_future_actions', 'TODO: fill this help message in', 1) spec.addIntOption( 'move_cutoff', 'Cutoff ply in replay', -1) spec.addStrOption( 'mode', 'TODO: fill this help message in', 'online') spec.addBoolOption( 'black_use_policy_network_only', 'TODO: fill this help message in', False) spec.addBoolOption( 'white_use_policy_network_only', 'TODO: fill this help message in', False) spec.addIntOption( 'ply_pass_enabled', 'TODO: fill this help message in', 0) spec.addBoolOption( 'use_mcts', 'TODO: fill this help message in', False) spec.addBoolOption( 'use_mcts_ai2', 'TODO: fill this help message in', False) spec.addFloatOption( 'white_puct', 'PUCT for white when it is > 0.0. If it is -1 then we use' 'the same puct for both side (specified by mcts_options).' 'A HACK to use different puct for different model. Should' 'be replaced by a more systematic approach.', -1.0) spec.addIntOption( 'white_mcts_rollout_per_batch', 'white mcts rollout per batch', -1) spec.addIntOption( 'white_mcts_rollout_per_thread', 'white mcts rollout per thread', -1) spec.addBoolOption( 'use_df_feature', 'TODO: fill this help message in', False) spec.addStrOption( 'dump_record_prefix', 'TODO: fill this help message in', '') spec.addIntOption( 'policy_distri_cutoff', 'TODO: fill this help message in', 0) spec.addFloatOption( 'resign_thres', 'TODO: fill this help message in', 0.0) spec.addBoolOption( 'following_pass', 'TODO: fill this help message in', False) spec.addIntOption( 'selfplay_timeout_usec', 'TODO: fill this help message in', 0) spec.addIntOption( 'gpu', 'TODO: fill this help message in', -1) spec.addBoolOption( 'policy_distri_training_for_all', 'TODO: fill this help message in', False) spec.addBoolOption( 'parameter_print', 'TODO: fill this help message in', True) spec.addIntOption( 'batchsize', 'batch size', 128) spec.addIntOption( 'batchsize2', 'batch size', -1) spec.addIntOption( 'T', 'number of timesteps', 6) spec.addIntOption( 'selfplay_init_num', ('Initial number of selfplay games to generate before training a ' 'new model'), 2000) spec.addIntOption( 'selfplay_update_num', ('Additional number of selfplay games to generate after a model ' 'is updated'), 1000) spec.addBoolOption( 'selfplay_async', ('Whether to use async mode in selfplay'), False) spec.addIntOption( 'eval_num_games', ('number of evaluation to be performed to decide whether a model ' 'is better than the other'), 400) spec.addFloatOption( 'eval_winrate_thres', 'Win rate threshold for evalution', 0.55) spec.addIntOption( 'eval_old_model', ('If specified, then we directly switch to evaluation mode ' 'between the loaded model and the old model specified by this ' 'switch'), -1) spec.addStrOption( 'eval_model_pair', ('If specified for df_selfplay.py, then the two models will be ' 'evaluated on this client'), '') spec.addStrOption( 'comment', 'Comment for this run', '') spec.addBoolOption( 'cheat_eval_new_model_wins_half', 'When enabled, in evaluation mode, when the game ' 'finishes, the player with the most recent model gets 100%% ' 'win rate half of the time.' 'This is used to test the framework', False) spec.addBoolOption( 'cheat_selfplay_random_result', 'When enabled, in selfplay mode the result of the game is random' 'This is used to test the framework', False) spec.addIntOption( 'suicide_after_n_games', 'return after n games have finished, -1 means it never ends', -1) spec.merge(PyOptionSpec.fromClasses((ContextArgs, MoreLabels))) return spec @auto_import_options def __init__(self, option_map): self.context_args = ContextArgs(option_map) self.more_labels = MoreLabels(option_map) def _set_params(self): co = go.ContextOptions() self.context_args.initialize(co) co.job_id = os.environ.get("job_id", "local") if self.options.parameter_print: co.print() opt = go.GameOptions() opt.seed = 0 opt.list_files = self.options.list_files if self.options.server_addr: opt.server_addr = self.options.server_addr else: if self.options.server_id: opt.server_addr = addrs[self.options.server_id] opt.server_id = self.options.server_id else: opt.server_addr = "" opt.server_id = "" opt.port = self.options.port opt.mode = self.options.mode opt.use_mcts = self.options.use_mcts opt.use_mcts_ai2 = self.options.use_mcts_ai2 opt.use_df_feature = self.options.use_df_feature opt.dump_record_prefix = self.options.dump_record_prefix opt.policy_distri_training_for_all = \ self.options.policy_distri_training_for_all opt.verbose = self.options.verbose opt.black_use_policy_network_only = \ self.options.black_use_policy_network_only opt.white_use_policy_network_only = \ self.options.white_use_policy_network_only opt.data_aug = self.options.data_aug opt.ratio_pre_moves = self.options.ratio_pre_moves opt.q_min_size = self.options.q_min_size opt.q_max_size = self.options.q_max_size opt.num_reader = self.options.num_reader opt.start_ratio_pre_moves = self.options.start_ratio_pre_moves opt.ply_pass_enabled = self.options.ply_pass_enabled opt.num_future_actions = self.options.num_future_actions opt.num_reset_ranking = self.options.num_reset_ranking opt.move_cutoff = self.options.move_cutoff opt.policy_distri_cutoff = self.options.policy_distri_cutoff opt.num_games_per_thread = self.options.num_games_per_thread opt.following_pass = self.options.following_pass opt.resign_thres = self.options.resign_thres opt.preload_sgf = self.options.preload_sgf opt.preload_sgf_move_to = self.options.preload_sgf_move_to opt.keep_prev_selfplay = self.options.keep_prev_selfplay opt.expected_num_clients = self.options.expected_num_clients opt.white_puct = self.options.white_puct opt.white_mcts_rollout_per_batch = \ self.options.white_mcts_rollout_per_batch opt.white_mcts_rollout_per_thread = \ self.options.white_mcts_rollout_per_thread opt.client_max_delay_sec = self.options.client_max_delay_sec opt.print_result = self.options.print_result opt.selfplay_init_num = self.options.selfplay_init_num opt.selfplay_update_num = self.options.selfplay_update_num opt.selfplay_async = self.options.selfplay_async opt.eval_num_games = self.options.eval_num_games opt.eval_thres = self.options.eval_winrate_thres opt.cheat_eval_new_model_wins_half = \ self.options.cheat_eval_new_model_wins_half opt.cheat_selfplay_random_result = \ self.options.cheat_selfplay_random_result self.max_batchsize = max( self.options.batchsize, self.options.batchsize2) \ if self.options.batchsize2 > 0 \ else self.options.batchsize co.batchsize = self.max_batchsize GC = go.GameContext(co, opt) if self.options.parameter_print: print("**** Options ****") print(opt.info()) print("*****************") print("Version: ", GC.ctx().version()) return co, GC, opt def initialize(self): co, GC, opt = self._set_params() params = GC.getParams() if self.options.parameter_print: print("Mode: ", opt.mode) print("Num Actions: ", params["num_action"]) desc = {} if self.options.mode == "online": desc["human_actor"] = dict( input=["s"], reply=["pi", "a", "V"], batchsize=1, ) # Used for MCTS/Direct play. desc["actor_black"] = dict( input=["s"], reply=["pi", "V", "a", "rv"], timeout_usec=10, batchsize=co.mcts_options.num_rollouts_per_batch ) elif self.options.mode == "selfplay": # Used for MCTS/Direct play. desc["actor_black"] = dict( input=["s"], reply=["pi", "V", "a", "rv"], batchsize=self.options.batchsize, timeout_usec=self.options.selfplay_timeout_usec, ) desc["actor_white"] = dict( input=["s"], reply=["pi", "V", "a", "rv"], batchsize=self.options.batchsize2 if self.options.batchsize2 > 0 else self.options.batchsize, timeout_usec=self.options.selfplay_timeout_usec, ) desc["game_end"] = dict( batchsize=1, ) desc["game_start"] = dict( batchsize=1, input=["black_ver", "white_ver"], reply=None ) elif self.options.mode == "train" or \ self.options.mode == "offline_train": desc["train"] = dict( input=["s", "offline_a", "winner", "mcts_scores", "move_idx", "selfplay_ver"], reply=None ) desc["train_ctrl"] = dict( input=["selfplay_ver"], reply=None, batchsize=1 ) else: raise "No such mode: " + self.options.mode params.update(dict( num_group=1 if self.options.actor_only else 2, T=self.options.T, )) self.more_labels.add_labels(desc) return GCWrapper( GC, self.max_batchsize, desc, num_recv=2, gpu=(self.options.gpu if (self.options.gpu is not None and self.options.gpu >= 0) else None), use_numpy=False, params=params, verbose=self.options.parameter_print)
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from torch.autograd import Variable from elf.options import auto_import_options, PyOptionSpec from rlpytorch import add_err from rlpytorch.trainer import topk_accuracy class MultiplePrediction: @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addBoolOption( 'multipred_backprop', 'Whether to backprop the total loss', True) return spec @auto_import_options def __init__(self, option_map): self.policy_loss = nn.NLLLoss().cuda() self.value_loss = nn.MSELoss().cuda() def update(self, mi, batch, stats): ''' Update given batch ''' # Current timestep. state_curr = mi["model"](batch) total_policy_loss = None eps = 1e-6 targets = batch["offline_a"] if "pis" not in state_curr: state_curr["pis"] = [state_curr["pi"]] for i, pred in enumerate(state_curr["pis"]): if i == 0: prec1, prec5 = topk_accuracy( pred.data, targets[:, i].contiguous(), topk=(1, 5)) stats["top1_acc"].feed(prec1[0]) stats["top5_acc"].feed(prec5[0]) # backward. loss = self.policy_loss( (pred + eps).log(), Variable(targets[:, i])) stats["loss" + str(i)].feed(loss.data[0]) total_policy_loss = add_err(total_policy_loss, loss / (i + 1)) total_value_loss = None if "V" in state_curr and "winner" in batch: total_value_loss = self.value_loss( state_curr["V"], Variable(batch["winner"])) stats["total_policy_loss"].feed(total_policy_loss.data[0]) if total_value_loss is not None: stats["total_value_loss"].feed(total_value_loss.data[0]) total_loss = total_policy_loss + total_value_loss else: total_loss = total_policy_loss stats["total_loss"].feed(total_loss.data[0]) if self.options.multipred_backprop: total_loss.backward()
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from elf.options import auto_import_options, PyOptionSpec from rlpytorch import Model from elfgames.go.mcts_prediction import MCTSPrediction class Model_PolicyValue(Model): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addBoolOption( 'bn', 'toggles batch norm', True) spec.addBoolOption( 'leaky_relu', 'toggles leaky ReLU', True) spec.addIntOption( 'num_block', 'number of blocks', 20) spec.addIntOption( 'dim', 'model dimension', 128) return spec @auto_import_options def __init__(self, option_map, params): super().__init__(option_map, params) self.board_size = params["board_size"] self.num_future_actions = params["num_future_actions"] self.num_planes = params["num_planes"] # print("#future_action: " + str(self.num_future_actions)) # print("#num_planes: " + str(self.num_planes)) # Network structure of AlphaGo Zero # https://www.nature.com/nature/journal/v550/n7676/full/nature24270.html # Simple method. multiple conv layers. self.relu = nn.LeakyReLU(0.1) if self.options.leaky_relu else nn.ReLU() self.convs = [] last_planes = self.num_planes self.init_conv = self._conv_layer(last_planes) for i in range(self.options.num_block): conv_lower = self._conv_layer() conv_upper = self._conv_layer(relu=False) setattr(self, "conv_lower" + str(i), conv_lower) setattr(self, "conv_upper" + str(i), conv_upper) self.convs.append((conv_lower, conv_upper)) self.pi_final_conv = self._conv_layer(self.options.dim, 2, 1) self.value_final_conv = self._conv_layer(self.options.dim, 1, 1) d = self.board_size ** 2 self.pi_linear = nn.Linear(d * 2, d) self.value_linear1 = nn.Linear(d, 256) self.value_linear2 = nn.Linear(256, 1) # Softmax as the final layer self.logsoftmax = nn.LogSoftmax(dim=1) self.tanh = nn.Tanh() def _conv_layer( self, input_channel=None, output_channel=None, kernel=3, relu=True): if input_channel is None: input_channel = self.options.dim if output_channel is None: output_channel = self.options.dim layers = [] layers.append(nn.Conv2d( input_channel, output_channel, kernel, padding=(kernel // 2), )) if self.options.bn: layers.append(nn.BatchNorm2d(output_channel)) if relu: layers.append(self.relu) return nn.Sequential(*layers) def forward(self, x): s = self._var(x["s"]) s = self.init_conv(s) for conv_lower, conv_upper in self.convs: s1 = conv_lower(s) s1 = conv_upper(s1) s1 = s1 + s s = self.relu(s1) d = self.board_size ** 2 pi = self.pi_final_conv(s) pi = self.pi_linear(pi.view(-1, d * 2)) logpi = self.logsoftmax(pi) pi = logpi.exp() V = self.value_final_conv(s) V = self.relu(self.value_linear1(V.view(-1, d))) V = self.value_linear2(V) V = self.tanh(V) return dict(logpi=logpi, pi=pi, V=V) # Format: key, [model, method] Models = { "df": [Model_PolicyValue, MCTSPrediction] }
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import torch import torch.nn as nn import torch.distributed as dist from elf.options import auto_import_options, PyOptionSpec from rlpytorch import Model from elfgames.go.mcts_prediction import MCTSPrediction from elfgames.go.multiple_prediction import MultiplePrediction class Block(Model): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addBoolOption( 'bn', 'toggles batch norm', True) spec.addBoolOption( 'leaky_relu', 'toggles leaky ReLU', False) spec.addFloatOption( 'bn_momentum', 'batch norm momentum (pytorch style)', 0.1) spec.addFloatOption( "bn_eps", "batch norm running vars eps", 1e-5) spec.addIntOption( 'dim', 'model dimension', 128) return spec @auto_import_options def __init__(self, option_map, params): super().__init__(option_map, params) self.relu = nn.LeakyReLU(0.1) if self.options.leaky_relu else nn.ReLU() self.conv_lower = self._conv_layer() self.conv_upper = self._conv_layer(relu=False) def _conv_layer( self, input_channel=None, output_channel=None, kernel=3, relu=True): if input_channel is None: input_channel = self.options.dim if output_channel is None: output_channel = self.options.dim layers = [] layers.append(nn.Conv2d( input_channel, output_channel, kernel, padding=(kernel // 2), )) if self.options.bn: layers.append( nn.BatchNorm2d(output_channel, momentum=(self.options.bn_momentum or None), eps=self.options.bn_eps)) if relu: layers.append(self.relu) return nn.Sequential(*layers) def forward(self, s): s1 = self.conv_lower(s) s1 = self.conv_upper(s1) s1 = s1 + s s = self.relu(s1) return s class GoResNet(Model): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addIntOption( 'num_block', 'number of resnet blocks', 20) spec.merge(Block.get_option_spec()) return spec @auto_import_options def __init__(self, option_map, params): super().__init__(option_map, params) self.blocks = [] for _ in range(self.options.num_block): self.blocks.append(Block(option_map, params)) self.resnet = nn.Sequential(*self.blocks) def forward(self, s): return self.resnet(s) class Model_PolicyValue(Model): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addBoolOption( 'bn', 'toggles batch norm', True) spec.addBoolOption( 'leaky_relu', 'toggles leaky ReLU', False) spec.addFloatOption( 'bn_momentum', 'batch norm momentum (pytorch style)', 0.1) spec.addIntOption( 'num_block', 'number of blocks', 20) spec.addIntOption( 'dim', 'model dimension', 128) spec.addBoolOption( 'use_data_parallel', 'TODO: fill this in', False) spec.addBoolOption( 'use_data_parallel_distributed', 'TODO: fill this in', False) spec.addIntOption( 'dist_rank', 'TODO: fill this in', -1) spec.addIntOption( 'dist_world_size', 'TODO: fill this in', -1) spec.addStrOption( 'dist_url', 'TODO: fill this in', '') spec.addIntOption( 'gpu', 'which gpu to use', -1) spec.merge(GoResNet.get_option_spec()) return spec @auto_import_options def __init__(self, option_map, params): super().__init__(option_map, params) self.board_size = params["board_size"] self.num_future_actions = params["num_future_actions"] self.num_planes = params["num_planes"] # print("#future_action: " + str(self.num_future_actions)) # print("#num_planes: " + str(self.num_planes)) # Network structure of AlphaGo Zero # https://www.nature.com/nature/journal/v550/n7676/full/nature24270.html # Simple method. multiple conv layers. self.relu = nn.LeakyReLU(0.1) if self.options.leaky_relu else nn.ReLU() last_planes = self.num_planes self.init_conv = self._conv_layer(last_planes) self.pi_final_conv = self._conv_layer(self.options.dim, 2, 1) self.value_final_conv = self._conv_layer(self.options.dim, 1, 1) d = self.board_size ** 2 # Plus 1 for pass. self.pi_linear = nn.Linear(d * 2, d + 1) self.value_linear1 = nn.Linear(d, 256) self.value_linear2 = nn.Linear(256, 1) # Softmax as the final layer self.logsoftmax = nn.LogSoftmax(dim=1) self.tanh = nn.Tanh() self.resnet = GoResNet(option_map, params) if torch.cuda.is_available() and self.options.gpu is not None: self.init_conv.cuda(self.options.gpu) self.resnet.cuda(self.options.gpu) if self.options.use_data_parallel: if self.options.gpu is not None: self.init_conv = nn.DataParallel( self.init_conv, output_device=self.options.gpu) self.resnet = nn.DataParallel( self.resnet, output_device=self.options.gpu) self._check_and_init_distributed_model() def _check_and_init_distributed_model(self): if not self.options.use_data_parallel_distributed: return if not dist.is_initialized(): world_size = self.options.dist_world_size url = self.options.dist_url rank = self.options.dist_rank # This is for SLURM's special use case if rank == -1: rank = int(os.environ.get("SLURM_NODEID")) print("=> Distributed training: world size: {}, rank: {}, URL: {}". format(world_size, rank, url)) dist.init_process_group(backend="nccl", init_method=url, rank=rank, world_size=world_size) # Initialize the distributed data parallel model master_gpu = self.options.gpu if master_gpu is None or master_gpu < 0: raise RuntimeError("Distributed training requires " "to put the model on the GPU, but the GPU is " "not given in the argument") # This is needed for distributed model since the distributed model # initialization will require the model be on the GPU, even though # the later code will put the same model on the GPU again with # self.options.gpu, so this should be ok # self.resnet.cuda(master_gpu) self.init_conv = nn.parallel.DistributedDataParallel( self.init_conv) self.resnet = nn.parallel.DistributedDataParallel( self.resnet) def _conv_layer( self, input_channel=None, output_channel=None, kernel=3, relu=True): if input_channel is None: input_channel = self.options.dim if output_channel is None: output_channel = self.options.dim layers = [] layers.append(nn.Conv2d( input_channel, output_channel, kernel, padding=(kernel // 2) )) if self.options.bn: layers.append( nn.BatchNorm2d(output_channel, momentum=(self.options.bn_momentum or None), eps=self.options.bn_eps)) if relu: layers.append(self.relu) return nn.Sequential(*layers) def prepare_cooldown(self): try: for module in self.modules(): if module.__class__.__name__.startswith('BatchNorm'): module.reset_running_stats() except Exception as e: print(e) print("The module doesn't have method 'reset_running_stats', " "skipping. Please set bn_momentum to 0.1" "(for cooldown = 50) in this case") def forward(self, x): s = self._var(x["s"]) s = self.init_conv(s) s = self.resnet(s) d = self.board_size ** 2 pi = self.pi_final_conv(s) pi = self.pi_linear(pi.view(-1, d * 2)) logpi = self.logsoftmax(pi) pi = logpi.exp() V = self.value_final_conv(s) V = self.relu(self.value_linear1(V.view(-1, d))) V = self.value_linear2(V) V = self.tanh(V) return dict(logpi=logpi, pi=pi, V=V) # Format: key, [model, method] Models = { "df_pred": [Model_PolicyValue, MultiplePrediction], "df_kl": [Model_PolicyValue, MCTSPrediction] }
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from torch.autograd import Variable import elf.logging as logging from elf.options import auto_import_options, PyOptionSpec from rlpytorch.trainer.timer import RLTimer class MCTSPrediction(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addBoolOption( 'backprop', 'Whether to backprop the total loss', True) return spec @auto_import_options def __init__(self, option_map): self.policy_loss = nn.KLDivLoss().cuda() self.value_loss = nn.MSELoss().cuda() self.logger = logging.getIndexedLogger( 'elfgames.go.MCTSPrediction-', '') self.timer = RLTimer() def update(self, mi, batch, stats, use_cooldown=False, cooldown_count=0): ''' Update given batch ''' self.timer.restart() if use_cooldown: if cooldown_count == 0: mi['model'].prepare_cooldown() self.timer.record('prepare_cooldown') # Current timestep. state_curr = mi['model'](batch) self.timer.record('forward') if use_cooldown: self.logger.debug(self.timer.print(1)) return dict(backprop=False) targets = batch["mcts_scores"] logpi = state_curr["logpi"] pi = state_curr["pi"] # backward. # loss = self.policy_loss(logpi, Variable(targets)) * logpi.size(1) loss = - (logpi * Variable(targets) ).sum(dim=1).mean() # * logpi.size(1) stats["loss"].feed(float(loss)) total_policy_loss = loss entropy = (logpi * pi).sum() * -1 / logpi.size(0) stats["entropy"].feed(float(entropy)) stats["blackwin"].feed( float((batch["winner"] > 0.0).float().sum()) / batch["winner"].size(0)) total_value_loss = None if "V" in state_curr and "winner" in batch: total_value_loss = self.value_loss( state_curr["V"].squeeze(), Variable(batch["winner"])) stats["total_policy_loss"].feed(float(total_policy_loss)) if total_value_loss is not None: stats["total_value_loss"].feed(float(total_value_loss)) total_loss = total_policy_loss + total_value_loss else: total_loss = total_policy_loss stats["total_loss"].feed(float(total_loss)) self.timer.record('feed_stats') if self.options.backprop: total_loss.backward() self.timer.record('backward') self.logger.debug(self.timer.print(1)) return dict(backprop=True) else: self.logger.debug(self.timer.print(1)) return dict(backprop=False)
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from .model_base import Model from .model_loader import ModelLoader, load_env from .model_interface import ModelInterface from .sampler import Sampler from .methods import ActorCritic, RNNActorCritic from .runner import EvalIters, EvalItersBasic, SingleProcessRun from .trainer import Trainer, Evaluator, LSTMTrainer from .methods import add_err, PolicyGradient, DiscountedReward, ValueMatcher
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from collections import OrderedDict from copy import deepcopy from time import sleep import torch import torch.nn as nn from torch.autograd import Variable torch.backends.cudnn.benchmark = True class Model(nn.Module): ''' Base class for an RL model, it is a wrapper for ``nn.Module``''' def __init__(self, option_map, params): """Initialize model with ``args``. Set ``step`` to ``0`` and ``volatile`` to ```false``. ``step`` records the number of times the weight has been updated. ``volatile`` indicates that the Variable should be used in inference mode, i.e. don't save the history. """ super(Model, self).__init__() self.option_map = option_map self.params = params self.step = 0 self.volatile = False def clone(self, gpu=None): """Deep copy an existing model. ``options``, ``step`` and ``state_dict`` are copied. Args: gpu(int): gpu id to be put the model on Returns: Cloned model """ model = type(self)(self.option_map, self.params) model.load_state_dict(deepcopy(self.state_dict())) model.step = self.step if gpu is not None: model.cuda(gpu) return model def set_volatile(self, volatile): """Set model to ``volatile``. Args: volatile(bool): indicating that the Variable should be used in inference mode, i.e. don't save the history. """ self.volatile = volatile def _var(self, x): ''' Convert tensor x to a pytorch Variable. Returns: Variable for x ''' if not isinstance(x, Variable): return Variable(x, volatile=self.volatile) else: return x def before_update(self): """Customized operations for each model before update. To be extended. """ pass def save(self, filename, num_trial=10): """Save current model, step and args to ``filename`` Args: filename(str): filename to be saved. num_trial(int): maximum number of retries to save a model. """ # Avoid calling the constructor by doing self.clone() # deepcopy should do it state_dict = deepcopy(self).cpu().state_dict() # Note that the save might experience issues, so if we encounter # errors, try a few times and then give up. content = { 'state_dict': state_dict, 'step': self.step, 'options': vars(self.options), } for i in range(num_trial): try: torch.save(content, filename) return except BaseException: sleep(1) print( "Failed to save %s after %d trials, giving up ..." % (filename, num_trial)) def load( self, filename, omit_keys=[], replace_prefix=[], check_loaded_options=True): ''' Load current model, step and args from ``filename`` Args: filename(str): model filename to load from omit_keys(list): list of omitted keys. Sometimes model will have extra keys and weights (e.g. due to extra tasks during training). We should omit them; otherwise loading will not work. ''' data = torch.load(filename) if isinstance(data, OrderedDict): self.load_state_dict(data) else: for k in omit_keys: del data["state_dict"][k + ".weight"] del data["state_dict"][k + ".bias"] sd = data["state_dict"] keys = list(sd.keys()) for key in keys: # Should be commented out for PyTorch > 0.40 # if key.endswith("num_batches_tracked"): # del sd[key] # continue for src, dst in replace_prefix: if key.startswith(src): # print(f"Src=\"{src}\", Dst=\"{dst}\"") sd[dst + key[len(src):]] = sd[key] del sd[key] self.load_state_dict(sd) self.step = data.get("step", 0) self.filename = os.path.realpath(data.get("filename", filename)) if check_loaded_options: # Ensure that for options defined in both the current model # options and the loaded model options, the values match between # current model and loaded model. loaded_options = data.get('options', {}) current_options = vars(self.options) for option_name in \ (set(loaded_options.keys()) & set(current_options.keys())): if loaded_options[option_name] != current_options[option_name]: raise ValueError( f'Discrepancy between current and loaded model ' f'parameter: {option_name} ' f'loaded: {loaded_options[option_name]}, ' f'current: {current_options[option_name]}' ) def load_from(self, model): ''' Load from an existing model. State is not deep copied. To deep copy the model, uss ``clone``. ''' if hasattr(model, 'option_map'): self.option_map = model.option_map if hasattr(model, 'params'): self.params = deepcopy(model.params) self.load_state_dict(model.state_dict()) self.step = model.step def inc_step(self): ''' increment the step. ``step`` records the number of times the weight has been updated.''' self.step += 1 def signature(self): '''Get model's signature. Returns: the model's signature string, specified by step. ''' return "Model[%d]" % self.step def prepare_cooldown(self): """Prepare for "cooldown" forward passes (useful for batchnorm).""" pass
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import importlib import pprint import random import time import torch import warnings from elf.options import import_options, PyOptionSpec from elf import logging from .model_interface import ModelInterface from .sampler import Sampler from .utils.fp16_utils import FP16Model def load_module(mod): """Load a python module.""" module = importlib.import_module(mod) print(module, mod) return module class ModelLoader(object): """Class to load a previously saved model.""" @classmethod def get_option_spec(cls, model_class=None, model_idx=None): spec = PyOptionSpec() spec.addStrOption( 'load', 'load model', '') spec.addStrListOption( 'onload', ('functions to call after loading. e.g., reset,zero_first_layer. ' 'These functions are specified in the model'), []) spec.addStrListOption( 'omit_keys', 'omitted keys when loading', []) spec.addStrListOption( 'replace_prefix', 'replace prefix', []) spec.addIntOption( 'gpu', 'which GPU to use', -1) spec.addBoolOption( 'check_loaded_options', 'Toggles consistency check of loaded vs. current model options.', True) spec.addBoolOption( 'use_fp16', 'use_fp16', False) spec.addFloatOption( 'load_model_sleep_interval', ('If zero, has no effect. If positive, then before loading the ' 'model, we will sleep for an interval of ' 'duration (secs) ~ Uniform[0, load_model_sleep_interval]'), 0.0) if model_class is not None and hasattr(model_class, 'get_option_spec'): spec.merge(model_class.get_option_spec()) idx_suffix = '' if model_idx is None else str(model_idx) spec.addPrefixSuffixToOptionNames('', idx_suffix) return spec def __init__(self, option_map, model_class, model_idx=None, logger=None): """Initialize ModelLoader. Loading will fail if extra keys are not put in ``omit_keys`` Args: model_class(class): class name of the model model_idx(int): index of the model to be loaded. There may be multiple models in an `ModelInterface` to load. """ import_options( self, option_map, self.get_option_spec(model_class, model_idx)) if logger is not None: self.logger = logger else: self.logger = logging.getIndexedLogger( 'rlpytorch.model_loader.ModelLoader-', f'-model_index{model_idx}') self.option_map_for_model = option_map.clone() self.model_class = model_class self.model_idx = model_idx self._on_get_args = lambda *args, **kwargs: None option_spec = self.get_option_spec(model_class, model_idx) option_names = set(option_spec.getOptionNames()) model_option_spec = model_class.get_option_spec() model_option_names = set(model_option_spec.getOptionNames()) # Here, the names in option_names are still possibly suffixed with # the model_idx. If so, we need to remove this suffix. model_options_to_load = {} for option_name in option_names: if model_idx is not None and option_name.endswith(str(model_idx)): # This is the name without the model_idx suffix orig_option_name = option_name[:-len(str(model_idx))] value = getattr(self.options, option_name) setattr(self.options, orig_option_name, value) delattr(self.options, option_name) if orig_option_name in model_option_names: model_options_to_load[orig_option_name] = value if model_options_to_load: self.option_map_for_model.loadOptionDict( model_options_to_load) def load_model(self, params): """Actually loads the model with initialized args. Call onload funtions if needed. Args: params(dict): additinoal parameters to be put into args. """ if self.options.load_model_sleep_interval > 1e-7: interval = random.random() * self.options.load_model_sleep_interval self.logger.info(f'Sleeping for {interval} seconds') time.sleep(interval + 1e-7) # Initialize models. model = self.model_class(self.option_map_for_model, params) if self.options.load: self.logger.info(f'Loading model from {self.options.load}') if self.options.omit_keys: self.logger.info(f'Omitting keys {self.options.omit_keys}') if self.options.replace_prefix: replace_prefix = [ item.split(",") for item in self.options.replace_prefix ] self.logger.info( f'replace_prefix for state dict: {replace_prefix}') else: replace_prefix = [] model.load( self.options.load, omit_keys=self.options.omit_keys, replace_prefix=replace_prefix, check_loaded_options=self.options.check_loaded_options) self.logger.info( f'Finished loading model from {self.options.load}') if self.options.onload: for func in self.options.onload: try: getattr(model, func)() self.logger.info('Called function {func!s} for model') except BaseException: self.logger.info('Calling function {func!s} failed!') raise if self.options.use_fp16: old_step = model.step model = FP16Model(self.option_map_for_model, params, model) model.step = old_step if torch.cuda.is_available() and \ self.options.gpu is not None and \ self.options.gpu >= 0: model.cuda(self.options.gpu) return model def _on_get_args(self, *args, **kwargs): warnings.warn( ('_on_get_args is deprecated, get rid of this as soon as old ' 'model files are no longer needed'), DeprecationWarning) def load_env( envs, num_models=None, overrides=None, additional_to_load=None): """Load envs. Envs will be specified as environment variables. Specifically, the environment variables ``game``, ``model_file`` and ``model`` are required. ``additional_to_load`` is a dict with the following format: {'variable_name': (option_spec, callable)} For each element in ``additional_to_load``, ``load_env`` will parse the ``option_spec``, pass the resulting option map to ``callable``, and store the result of ``callable`` in the return value (under the key ``name``). Returns: env: dict of ``game`` : game module ``method``: Learning method used ``model_loaders``: loaders for model """ logger = logging.getIndexedLogger('rlpytorch.model_loader.load_env', '') logger.info('Loading env') game_loader_class = load_module(envs["game"]).Loader model_file = load_module(envs["model_file"]) # TODO This is not good, need to fix. if len(model_file.Models[envs["model"]]) == 2: model_class, method_class = model_file.Models[envs["model"]] sampler_class = Sampler else: model_class, method_class, sampler_class = \ model_file.Models[envs["model"]] overrides = dict(overrides) if overrides else {} overrides.update(getattr(model_file, "Overrides", {})) option_spec = PyOptionSpec() option_spec.merge(PyOptionSpec.fromClasses(( logging.GlobalLoggingConfigurator, game_loader_class, method_class, sampler_class, ModelInterface, ))) if num_models is None: option_spec.merge(ModelLoader.get_option_spec(model_class)) else: for i in range(num_models): option_spec.merge( ModelLoader.get_option_spec(model_class, model_idx=i)) if additional_to_load: for additional_option_spec, _ in additional_to_load.values(): option_spec.merge(additional_option_spec) option_map = option_spec.parse(overrides=overrides) global_logger_configurator = logging.GlobalLoggingConfigurator(option_map) global_logger_configurator.configure() pretty_option_str = pprint.pformat(option_map.getOptionDict(), width=50) logger.info(f'Parsed options: {pretty_option_str}') game = game_loader_class(option_map) method = method_class(option_map) sampler = sampler_class(option_map) mi = ModelInterface(option_map) # You might want multiple models loaded. if num_models is None: model_loaders = [ModelLoader(option_map, model_class)] else: model_loaders = [ModelLoader(option_map, model_class, model_idx=i) for i in range(num_models)] env = dict( game=game, method=method, sampler=sampler, model_loaders=model_loaders, mi=mi, ) if additional_to_load: for name, (_, option_map_callable) in additional_to_load.items(): env[name] = option_map_callable(option_map) logger.info('Finished loading env') return env
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from collections import deque import torch import torch.cuda import torch.optim from elf.options import auto_import_options, PyOptionSpec # All model must provide .outputs and .preprocess # E.g., .outputs = { "Q" : self.final_linear_layer } # .preprocess = lambda self, x: downsample(x) class ModelInterface(object): """An interface for the model to receive intermediate results from forward passes.""" @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addStrOption( 'opt_method', 'optimization method (adam or sgd)', 'adam') spec.addFloatOption( 'lr', 'learning rate', 1e-3) spec.addFloatOption( 'adam_eps', 'Adam epsilon', 1e-3) spec.addFloatOption( 'momentum', 'momentum parameter', 0.9) spec.addFloatOption( 'weight_decay', 'weight decay rate', 0.0) return spec @auto_import_options def __init__(self, option_map): """Initialization for models and optimizers. ``models`` is a dict that can contain multiple models in a single `ModelInterface`. For each model in ``models``, there is an optimizer in ``optimizers`` in correspondence, using ``torch.optim.Adam``. """ self.option_map = option_map self.models = {} self.old_models = deque() self.optimizers = {} def clone(self, gpu=None): """Clone the state for the model interface, including ``models`` and ``optimizers``. Args: gpu(int): gpu id to be put the model on Returns: cloned `ModelInterface`. """ mi = ModelInterface(self.option_map) for key, model in self.models.items(): mi.models[key] = model.clone(gpu=gpu) if key in self.optimizers: # Same parameters. mi.optimizers[key] = torch.optim.Adam( mi.models[key].parameters()) new_optim = mi.optimizers[key] old_optim = self.optimizers[key] new_optim_params = new_optim.param_groups[0] old_optim_params = old_optim.param_groups[0] # Copy the parameters. for k in new_optim_params.keys(): if k != "params": new_optim_params[k] = old_optim_params[k] # Copy the state ''' new_optim.state = { } for k, v in old_optim.state.items(): if isinstance(v, (int, float, str)): new_optim.state[k] = v else: new_optim.state[k] = v.clone() if gpu is not None: new_optim.state[k] = new_optim.state[k].cuda(gpu) ''' return mi def __contains__(self, key): return key in self.models def add_model( self, key, model, copy=False, cuda=False, gpu_id=None, opt=False, params={}): '''Add a model to `ModelInterface`. Args: key(str): key in ``self.models``. model(`Model`): the model to be added. copy(bool): indicate if the model needs to be deep copied. cuda(bool): indicate if model needs to be converted to cuda. gpu_id(int): gpu index. opt(bool): Whether you want your model to be optimized (weights to be updated). params(dict): an dict of parameters for optimizers. Returns: Raise exception if key is already in ``self.models``, None if model is successfully added. ''' if key in self.models: raise("ModelInterface: key[%s] is already present!" % key) # New model. if gpu_id is not None and gpu_id >= 0: with torch.cuda.device(gpu_id): self.models[key] = model.clone() if copy else model else: self.models[key] = model.clone() if copy else model if cuda: if gpu_id is not None and gpu_id >= 0: self.models[key].cuda(gpu_id) else: self.models[key].cuda() def set_default(params, ks, arg_ks=None): if arg_ks is None: arg_ks = [None] * len(ks) for k, arg_k in zip(ks, arg_ks): if arg_k is None: arg_k = k params[k] = params.get(k, getattr(self.options, arg_k)) curr_model = self.models[key] if opt or len(params) > 0: set_default( params, ["lr", "opt_method", "adam_eps", "momentum", "weight_decay"]) method = params["opt_method"] curr_model.train() if method == "adam": self.optimizers[key] = torch.optim.Adam( curr_model.parameters(), lr=params["lr"], betas=(0.9, 0.999), eps=params["adam_eps"], weight_decay=params["weight_decay"]) elif method == "sgd": self.optimizers[key] = torch.optim.SGD( curr_model.parameters(), lr=params["lr"], momentum=params["momentum"], weight_decay=params["weight_decay"]) else: raise ValueError( "Optimization method %s is not supported! " % params["opt_method"]) return True def update_model(self, key, model, save_old_model=False): ''' If the key is present, update an old model. Does not deep copy it. If the key is not present, add it (no deep copy). Args: key(str): the key in ``models`` to be updated model(`Model`): updated model ''' # print("Updating model " + key) if key not in self.models: self.add_model(key, model) return if save_old_model: self.old_models.append(self.models[key].clone().cpu()) if len(self.old_models) > 20: self.old_models.popleft() self.models[key].load_from(model) def remove_model(self, key): del self.models[key] if key in self.optimizers: del self.optimizers[key] def average_model(self, key, model): """Average the model params from ``self.models[key]`` and ``model``, and update to ``self.models[key]``. Args: key(str): the key in ``models`` model(Model): the model containing the parameters to update """ for param, other_param in zip( self.models[key].parameters(), model.parameters()): param.data += other_param.data.cuda(param.data.get_device()) param.data /= 2 def copy(self, dst_key, src_key): ''' Deep copy a model from src_key to dst_key in ``self.models`` Args: dst_key(str): destination key in ``self.models`` src_key(str): source key in ``self.models`` ''' assert dst_key in self.models, \ f'ModelInterface: dst_key = {dst_key} cannot be found' assert src_key in self.models, \ f'ModelInterface: src_key = {src_key} cannot be found' self.update_model(dst_key, self.models[src_key].clone()) ''' Usage: record = interface(input) Then record["Q"] will be the Q-function given the input. ''' def zero_grad(self): ''' Zero the gradient for all ``optimizers`` ''' for k, optimizer in self.optimizers.items(): optimizer.zero_grad() def update_weights(self): """For each optimizer, call before_update for all the models, then update the weights and increment the step for the model.""" for k, optimizer in self.optimizers.items(): self.models[k].before_update() optimizer.step() self.models[k].inc_step() def __getitem__(self, key): ''' Get an item associated with ``key`` from ``self.models``''' return self.models[key]
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torch.autograd import Variable from elf.options import auto_import_options, PyOptionSpec from .utils import add_err from .discounted_reward import DiscountedReward from .policy_gradient import PolicyGradient from .value_matcher import ValueMatcher class RNNActorCritic(object): """RNN actor-critic model.""" @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addStrOption( 'value_node', 'name of the value node', 'V') spec.merge(PyOptionSpec.fromClasses( (PolicyGradient, DiscountedReward, ValueMatcher) )) return spec @auto_import_options def __init__(self, option_map): """Initialization of RNNActorCritic component methods: `PolicyGradient`, `DiscountedReward`, and `ValueMatcher`. """ self.discounted_reward = DiscountedReward() self.pg = PolicyGradient() self.value_matcher = ValueMatcher() def update(self, mi, batch, hiddens, stats): m = mi["model"] value_node = self.options.value_node T = batch["a"].size(0) h = Variable(hiddens) hs = [] ss = [] # Forward to compute LSTM. for t in range(0, T - 1): if t > 0: term = Variable(1.0 - batch["terminal"][t].float()).view(-1, 1) h.register_hook(lambda grad: grad.mul(term)) state_curr = m(batch.hist(t), h) h = m.transition(state_curr["h"], batch["a"][t]) hs.append(h) ss.append(state_curr) R = ss[-1][value_node].squeeze().data self.discounted_reward.setR(R, stats) err = None # Backward to compute gradient descent. for t in range(T - 2, -1, -1): state_curr = ss[t] # go through the sample and get the rewards. bht = batch.hist(t) V = state_curr[value_node].squeeze() R = self.discounted_reward.feed( dict(r=batch["r"][t], terminal=batch["terminal"][t]), stats) err = add_err( err, self.pg.feed( R - V.data, state_curr, bht, stats, old_pi_s=bht)) err = add_err(err, self.value_matcher.feed( {value_node: V, "target": R}, stats)) stats["cost"].feed(err.data[0] / (T - 1)) err.backward()
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from elf.options import auto_import_options, PyOptionSpec from .policy_gradient import PolicyGradient from .discounted_reward import DiscountedReward from .value_matcher import ValueMatcher from .utils import add_err class ActorCritic(object): """An actor critic model.""" @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addStrOption( 'value_node', 'name of the value node', 'V') spec.merge(PyOptionSpec.fromClasses( (PolicyGradient, DiscountedReward, ValueMatcher) )) return spec @auto_import_options def __init__(self, option_map): """Initialization of ActorCritic component methods: `PolicyGradient`, `DiscountedReward`, and `ValueMatcher`. """ self.discounted_reward = DiscountedReward() self.pg = PolicyGradient() self.value_matcher = ValueMatcher() def update(self, mi, batch, stats): """Actor critic model update. Feed stats for later summarization. Args: mi(`ModelInterface`): mode interface used batch(dict): batch of data. Keys in a batch: ``s``: state, ``r``: immediate reward, ``terminal``: if game is terminated stats(`Stats`): Feed stats for later summarization. """ m = mi["model"] value_node = self.options.value_node T = batch["s"].size(0) state_curr = m(batch.hist(T - 1)) self.discounted_reward.setR( state_curr[value_node].squeeze().data, stats) err = None for t in range(T - 2, -1, -1): bht = batch.hist(t) state_curr = m.forward(bht) # go through the sample and get the rewards. V = state_curr[value_node].squeeze() R = self.discounted_reward.feed( dict(r=batch["r"][t], terminal=batch["terminal"][t]), stats=stats) policy_err = self.pg.feed( R - V.data, state_curr, bht, stats, old_pi_s=bht) err = add_err(err, policy_err) err = add_err(err, self.value_matcher.feed( {value_node: V, "target": R}, stats)) stats["cost"].feed(err.data[0] / (T - 1)) err.backward()
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from torch.autograd import Variable from elf.options import auto_import_options, PyOptionSpec from .utils import average_norm_clip class ValueMatcher(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addFloatOption( 'grad_clip_norm', 'gradient norm clipping', 0.0) spec.addStrOption( 'value_node', 'name of the value node', 'V') return spec @auto_import_options def __init__(self, option_map): """Initialization of value matcher. Initialize value loss to be ``nn.SmoothL1Loss``. """ self.value_loss = nn.SmoothL1Loss().cuda() def _reg_backward(self, v): ''' Register the backward hook. Clip the gradient if necessary.''' grad_clip_norm = self.options.grad_clip_norm if grad_clip_norm > 1e-20: def bw_hook(grad_in): grad = grad_in.clone() if grad_clip_norm is not None: average_norm_clip(grad, grad_clip_norm) return grad v.register_hook(bw_hook) def feed(self, batch, stats): """ One iteration of value match. nabla_w Loss(V - target) Keys in a batch: ``V`` (variable): value ``target`` (tensor): target value. Inputs that are of type Variable can backpropagate. Feed to stats: predicted value and value error Returns: value_err """ V = batch[self.options.value_node] value_err = self.value_loss(V, Variable(batch["target"])) self._reg_backward(V) stats["predicted_" + self.options.value_node].feed(V.data[0]) stats[self.options.value_node + "_err"].feed(value_err.data[0]) return value_err
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from .actor_critic import ActorCritic from .rnn_actor_critic import RNNActorCritic from .q_learning import Q_learning from .policy_gradient import PolicyGradient from .discounted_reward import DiscountedReward from .value_matcher import ValueMatcher from .utils import add_err
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from elf.options import auto_import_options, PyOptionSpec class DiscountedReward(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addFloatOption( 'discount', 'exponential discount rate', 0.99) return spec @auto_import_options def __init__(self, option_map): """Initialization of discounted reward.""" pass def setR(self, R, stats): """Set rewards and feed to stats.""" self.R = R stats["init_reward"].feed(R.mean()) def feed(self, batch, stats): """Update discounted reward and feed to stats. Keys in a batch: ``r`` (tensor): immediate reward. ``terminal`` (tensor): whether the current game has terminated. Feed to stats: immediate reward and accumulated reward """ r = batch["r"] term = batch["terminal"] # Compute the reward. self.R = self.R * self.options.discount + r # If we see any terminal signal, break the reward backpropagation # chain. for i, terminal in enumerate(term): if terminal: self.R[i] = r[i] stats["reward"].feed(r.mean()) stats["acc_reward"].feed(self.R.mean()) return self.R
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. def average_norm_clip(grad, clip_val): ''' Compute the norm and clip it if necessary. The first dimension will be batchsize. Args: grad(Tensor): the gradient clip_val(float): value to clip to ''' batchsize = grad.size(0) avg_l2_norm = 0.0 for i in range(batchsize): avg_l2_norm += grad[i].data.norm() avg_l2_norm /= batchsize if avg_l2_norm > clip_val: # print("l2_norm: %.5f clipped to %.5f" % (avg_l2_norm, clip_val)) grad *= clip_val / avg_l2_norm def accumulate(acc, new): ''' accumulate by the same key in a list of dicts Args: acc(dict): the dict to accumulate to new(dict): new dict entry Returns: A new dict containing the accumulated sums of each key. ''' ret = {k: new[k] if a is None else a + new[k] for k, a in acc.items() if k in new} ret.update({k: v for k, v in new.items() if not (k in acc)}) return ret def add_err(overall_err, new_err): ''' Add ``new_err`` to ``overall_err`` Args: overall_err(float): summed overall error new_err(float): new error ''' if overall_err is None: return new_err else: overall_err += new_err return overall_err def add_stats(stats, key, value): ''' Feed ``value`` to ``stats[key]``''' if stats: stats[key].feed(value) def check_terminals(has_terminal, batch): ''' Check if the environment sent a terminal signal ''' # Block backpropagation if we go pass a terminal node. for i, terminal in enumerate(batch["terminal"]): if terminal: has_terminal[i] = True def check_terminals_anyT(has_terminal, batch, T): ''' Check if any of ``batch[t], t <= T`` is terminal''' for t in range(T): check_terminals(has_terminal, batch[t])
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from torch.autograd import Variable from elf.options import auto_import_options, PyOptionSpec from .utils import accumulate, add_err, average_norm_clip class PolicyGradient(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addFloatOption( 'entropy_ratio', 'the entropy ratio we put on PolicyGradient', 0.01) spec.addFloatOption( 'grad_clip_norm', 'gradient norm clipping', 0.0) spec.addFloatOption( 'min_prob', 'mininal probability used in training', 1e-6) spec.addFloatOption( 'ratio_clamp', 'maximum importance sampling ratio', 10.0) spec.addStrListOption( 'policy_action_nodes', 'the entropy ratio we put on PolicyGradient', ['pi,a']) return spec @auto_import_options def __init__(self, option_map): """Initialize the policy gradient. Specifically, initialize policy loss to be an ``nn.NLLLoss`` and parse ``policy_action_nodes``. """ self.policy_loss = nn.NLLLoss().cuda() self.policy_action_nodes = [] for node_specifier in self.options.policy_action_nodes: policy, _, action = node_specifier.partition(",") self.policy_action_nodes.append((policy, action)) def _compute_one_policy_entropy_err(self, pi, a): """Compute policy error and entropy error for one. Pass in ``min_prob`` to avoid ``Nan`` in logrithms. Returns: dict of ``logpi``: log policy ``policy_err``: polict error ``entropy_err``: entropy error """ batchsize = a.size(0) # Add normalization constant logpi = (pi + self.options.min_prob).log() # TODO Seems that logpi.clone() won't create a few hook list. # See https://github.com/pytorch/pytorch/issues/2601 logpi2 = (pi + self.options.min_prob).log() # Get policy. N * #num_actions policy_err = self.policy_loss(logpi, a) entropy_err = (logpi2 * pi).sum() / batchsize return dict( logpi=logpi, policy_err=policy_err, entropy_err=entropy_err) def _compute_policy_entropy_err(self, pi, a): """Compute policy error and entropy error for a batch. Pass in ``min_prob`` to avoid ``Nan`` in logrithms. Returns: dict of ``logpi``: log policy ``policy_err``: polict error ``entropy_err``: entropy error """ errs = {} if isinstance(pi, list): # Action map, and we need compute the error one by one. for i, pix in enumerate(pi): for j, pixy in enumerate(pix): errs = accumulate( errs, self._compute_one_policy_entropy_err( pixy, a[:, i, j], self.options.min_prob) ) else: errs = self._compute_one_policy_entropy_err(pi, a) return errs def _reg_backward(self, v, pg_weights): """Register the backward hook. Clip the gradient if necessary.""" def bw_hook(grad_in): grad_clip_norm = self.options.grad_clip_norm # this works only on pytorch 0.2.0 grad = grad_in.mul(pg_weights.view(-1, 1)) # import pdb # pdb.set_trace() if grad_clip_norm > 1e-20: average_norm_clip(grad, grad_clip_norm) return grad v.register_hook(bw_hook) def feed(self, Q, pi_s, actions, stats, old_pi_s=dict()): """One iteration of policy gradient. rho nabla_w log p_w(a|s) Q + entropy_ratio * nabla H(pi(.|s)) Args: Q(tensor): estimated return actions(tensor): action pi_s(variable): policy old_pi_s(tensor, optional): old policy, in order to get importance factor. If you specify multiple policies, then all the log prob of these policies are added, and their importance factors are multiplied. Feed to stats: policy error and nll error """ # We need to set it beforehand. # Note that the samples we collect might be off-policy, so we need # to do importance sampling. pg_weights = Q.clone() policy_err = None entropy_err = None log_pi_s = [] for pi_node, a_node in self.policy_action_nodes: pi = pi_s[pi_node] a = actions[a_node].squeeze() if pi_node in old_pi_s: old_pi = old_pi_s[pi_node].squeeze() # Cap it. clamped_ratios = torch.clamp( pi.data.div(old_pi), max=self.options.ratio_clamp) coeff = clamped_ratios.gather(1, a.view(-1, 1)).squeeze() pg_weights.mul_(coeff) # There is another term (to compensate clamping), but we omit # it for now. # Compute policy gradient error: errs = self._compute_policy_entropy_err(pi, Variable(a)) policy_err = add_err(policy_err, errs["policy_err"]) entropy_err = add_err(entropy_err, errs["entropy_err"]) log_pi_s.append(errs["logpi"]) stats["nll_" + pi_node].feed(errs["policy_err"].data[0]) stats["entropy_" + pi_node].feed(errs["entropy_err"].data[0]) for log_pi in log_pi_s: self._reg_backward(log_pi, Variable(pg_weights)) if len(self.policy_action_nodes) > 1: stats["total_nll"].feed(policy_err.data[0]) stats["total_entropy"].feed(entropy_err.data[0]) return policy_err + entropy_err * self.options.entropy_ratio
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from torch.autograd import Variable from elf.options import auto_import_options, PyOptionSpec from .discounted_reward import DiscountedReward from .utils import add_err class Q_learning(object): """A Q-learning model.""" @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addStrOption( 'a_node', 'action node', 'a') spec.addStrOption( 'q_node', 'Q node', 'Q') spec.merge(DiscountedReward.get_option_spec()) return spec @auto_import_options def __init__(self, option_map): """Initialization of q learning.""" self.discounted_reward = DiscountedReward(option_map) self.q_loss = nn.SmoothL1Loss().cuda() def update(self, mi, batch, stats): ''' Actor critic model update. Feed stats for later summarization. Args: mi(`ModelInterface`): mode interface used batch(dict): batch of data. Keys in a batch: ``s``: state, ``r``: immediate reward, ``terminal``: if game is terminated stats(`Stats`): Feed stats for later summarization. ''' m = mi["model"] Q_node = self.options.Q_node a_node = self.options.a_node T = batch["s"].size(0) state_curr = m(batch.hist(T - 1)) Q = state_curr[Q_node].squeeze().data V = Q.max(1) self.discounted_reward.setR(V, stats) err = None for t in range(T - 2, -1, -1): bht = batch.hist(t) state_curr = m.forward(bht) # go through the sample and get the rewards. Q = state_curr[Q_node].squeeze() a = state_curr[a_node].squeeze() R = self.discounted_reward.feed( dict(r=batch["r"][t], terminal=batch["terminal"][t]), stats=stats) # Then you want to match Q value here. # Q: batchsize * #action. Q_sel = Q.gather(1, a.view(-1, 1)).squeeze() err = add_err(err, nn.L2Loss(Q_sel, Variable(R))) stats["cost"].feed(err.data[0] / (T - 1)) err.backward()
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from .single_process import SingleProcessRun # from .multi_process import MultiProcessRun from .eval_iters import EvalIters, EvalItersBasic
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from elf.options import auto_import_options, PyOptionSpec from ..stats import Stats class EvalItersBasic(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addIntOption( 'num_eval', 'number of games to evaluate', 500) spec.addBoolOption( 'tqdm', 'toggle tqdm visualization', False) return spec @auto_import_options def __init__(self, option_map): """Initialization for Evaluation.""" self.count = 0 def add_count(self): self.count += 1 def update_count(self, n): self.count = n def iters(self): ''' loop through until we reach ``num_eval`` games. if use ``tqdm``, also visualize the progress bar. Print stats summary in the end. ''' if self.options.tqdm: import tqdm tq = tqdm.tqdm(total=self.options.num_eval, ncols=50) while self.count < self.options.num_eval: old_n = self.count yield old_n diff = self.count - old_n tq.update(diff) tq.close() else: while self.count < self.options.num_eval: yield self.count class EvalIters(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addIntOption( 'num_eval', 'number of games to evaluate', 500) spec.addBoolOption( 'tqdm', 'toggle tqdm visualization', False) return spec @auto_import_options def __init__(self, option_map): """Initialization for Evaluation.""" self.stats = Stats(option_map, "eval") self.eval_iter_basic = EvalItersBasic(option_map) def _on_get_args(self, _): self.stats.reset() def iters(self): ''' loop through until we reach ``num_eval`` games. if use ``tqdm``, also visualize the progress bar. Print stats summary in the end. ''' if self.options.tqdm: import tqdm tq = tqdm.tqdm(total=self.options.num_eval, ncols=50) while self.stats.count_completed() < self.options.num_eval: old_n = self.stats.count_completed() yield old_n diff = self.stats.count_completed() - old_n tq.update(diff) tq.close() else: while self.stats.count_completed() < self.options.num_eval: yield self.stats.count_completed() self.stats.print_summary()
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import tqdm from elf.options import auto_import_options, PyOptionSpec from .parameter_server import SharedData class MultiProcessRun(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addIntOption( 'num_minibatch', 'number of minibatches', 5000) spec.addIntOption( 'num_episode', 'number of episodes', 10000) spec.addIntOption( 'num_process', 'number of processes', 2) spec.addBoolOption( 'tqdm', 'toggle tqdm visualization', False) return spec @auto_import_options def __init__(self, option_map): """Initialization for MultiProcessRun.""" pass def setup(self, GC, mi, remote_init, remote_process, episode_start=None, episode_summary=None, args=None): ''' Setup for MultiProcessRun. Args: GC(`GameContext`): Game Context mi(`ModelInterface`): ModelInterface remote_init(func): Callbacks for remote initialization, used in SharedData remote_process(func): Callbacks for remote process, used in SharedData episode_start(func): operations to perform before each episode episode_summary(func): operations to summarize after each epidsode args(dict): Additional arguments for class `SharedData` ''' self.GC = GC self.episode_start = episode_start self.episode_summary = episode_summary self.remote_init = remote_init self.remote_process = remote_process self.shared_data = \ SharedData(self.options.num_process, mi, GC.inputs[1][0], cb_remote_initialize=remote_init, cb_remote_batch_process=remote_process, args=args) self.total_train_count = 0 self.success_train_count = 0 def _train(self, batch): # Send to remote for remote processing. # TODO Might have issues when batch is on GPU. self.total_train_count += 1 success = self.shared_data.send_batch(batch) if success: self.success_train_count += 1 def run(self): """Main training loop. Initialize Game Context and looping the required episodes. Call episode_start and episode_summary before and after each episode if necessary. Visualize with a progress bar if ``tqdm`` is set. Print training stats after each episode. In the end, print summary for game context and stop it. """ self.GC.reg_callback("train", self._train) self.GC.start() for k in range(self.options.num_episode): if self.episode_start is not None: self.episode_start(k) if self.options.tqdm: iterator = tqdm.trange(self.options.num_minibatch, ncols=50) else: iterator = range(self.options.num_minibatch) for i in iterator: self.GC.run() if self.episode_summary is not None: self.episode_summary(k) print( "Train stat: (%.2f) %d/%d" % (float( self.success_train_count) / self.total_train_count, self.success_train_count, self.total_train_count)) self.GC.stop()
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # XXX hack fix path import os import random import sys import torch.multiprocessing as _mp import utils_elf sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'elf')) mp = _mp.get_context('spawn') ''' Usage: In process main function, run the following and then you get a shared model. if rank == 0: model = build_model(with_cuda) else: model = None model = param_server.sync_model(rank, model) ''' class Cond: ''' Wrapper for `Condition` class from torch multiprocessing''' def __init__(self): self.cond = mp.Condition() def wait(self): self.cond.acquire() self.cond.wait() self.cond.release() def wait_noblock(self): self.cond.acquire() self.cond.wait(0) self.cond.release() def notify(self): self.cond.acquire() self.cond.notify() self.cond.release() class ParameterServer(object): ''' ParameterServer to handle updates in the model concurrently ''' def __init__(self, n_processes): ''' Initialization. Args: n_processes: number of processes. ''' self.queue = mp.Queue() self.n_processes = n_processes self.barrier = mp.Barrier(n_processes) # For update signal. self.send_done = Cond() self.recv_done = Cond() def __getstate__(self): return ( self.queue, self.barrier, self.n_processes, self.send_done, self.recv_done) def __setstate__(self, state): self.queue, self.barrier, self.n_processes, \ self.send_done, self.recv_done = \ state def server_send_model(self, mi): """Send the model to others and starts to wait. Finish waiting if all client receives the model. Args: mi(`ModelInterface`): model interface to send """ assert mi is not None for i in range(self.n_processes - 1): self.queue.put(mi) self._server_shared_mi = mi self.barrier.wait() def client_receive_model(self): """Receive model from the queue. Finish waiting if all client receives the model. Returns: `ModelInterface` shared in clients. """ mi = self.queue.get() # clone the gradients to break the sharing for _, model in mi.models.items(): for param in model.parameters(): if param.grad is not None: param._grad = param.grad.clone() self.barrier.wait() self._client_shared_mi = mi return self._client_shared_mi def server_update_model(self, key, new_mi, noblock=False): ''' Update shared model in the server, wait until all clients receive. Args: key(str): the key in ``models`` to update new_mi(`ModelInterface`): new model interface to update noblock(bool): indicates if updating models block other threads. Default is blocking. ''' # if recv is not done, skip it. if noblock: try: self.recv_done.wait_noblock() except BaseException: # The recv is not done yet. Cannot send. return False else: self.recv_done.wait() self._server_shared_mi.update_model(key, new_mi) # Then wait until other people have received. self.send_done.notify() return True def client_refresh_model(self, gpu=None, skip=False): ''' Clone updated shared model from the server. Args: gpu(int): gpu index skip(bool): if we skip this model. Will return ``None`` if set to ``True`` Returns: refreshed model. ''' # First wait until we are synced up. self.send_done.wait() if not skip: mi = self._client_shared_mi.clone(gpu=gpu) else: mi = None self.recv_done.notify() return mi class SharedData: def __init__(self, total_process, mi, batch_template, cb_remote_initialize=None, cb_remote_batch_process=None, args=None): ''' Initialize `SharedData` class with a few hooks Args: total_process: number of processes mi: ModelInterface batch_template: cb_remote_initialize: Callbacks for remote Initialization cb_remote_batch_process: Callbacks for remote process args: additional arguments ''' self.server = ParameterServer(total_process) self.cb_remote_initialize = cb_remote_initialize self.cb_remote_batch_process = cb_remote_batch_process self.args = args # def get_gpu_id(i): return i + 1 def get_gpu_id(i): return 0 # Share only training batches. shared_batches = [] cvs_send = [] cvs_recv = [] qs = [] for i in range(total_process - 1): # gpu_id = get_gpu_id(i) # shared_batches.append( # cpu2gpu(all_batches[train_idx][0], gpu=gpu_id)) shared_batches.append(utils_elf.pin_clone(batch_template)) qs.append(mp.Queue(1)) qs[-1].put(shared_batches[i]) cvs_send.append(Cond()) cvs_recv.append(Cond()) self.cvs_send = cvs_send self.cvs_recv = cvs_recv self.shared_batches = shared_batches self.qs = qs self.b = mp.Barrier(total_process) self.optimizers = [ mp.process( target=self.process_main, args=( i, get_gpu_id(i))) for i in range( total_process - 1)] for optimizer in self.optimizers: optimizer.start() # Wait until all models have received the shared memory. self.b.wait() self.server.server_send_model(mi) def process_main(self, i, gpu_id): ''' Main process. Transportation between cpu and gpu. Args: i(int): process id gpu_id(int): gpu id ''' batch = self.qs[i].get() self.b.wait() batch_gpu = utils_elf.cpu2gpu(batch, gpu=gpu_id) mi = self.server.client_receive_model() context = self.cb_remote_initialize(mi, gpu_id, self.args) print( "[%d] Context initialization completed, gpu_id = %d.. " % (i, gpu_id)) # Ready. self.cvs_send[i].notify() while True: self.cvs_recv[i].wait() utils_elf.transfer_cpu2gpu(batch, batch_gpu, non_blocking=True) self.cvs_send[i].notify() self.cb_remote_batch_process(context, batch_gpu) def send_batch(self, batch): ''' Send batch to a cpu process Args: batch(dict): batch data ''' process_idx = random.randint(0, len(self.shared_batches) - 1) try: self.cvs_send[process_idx].wait_noblock() utils_elf.transfer_cpu2cpu(batch, self.shared_batches[process_idx]) self.cvs_recv[process_idx].notify() return True except Exception as e: # print("Failed to send batch to %d" % process_idx) # print(type(e)) # print(e.args) # print(e) return False
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import threading from elf.options import auto_import_options, PyOptionSpec class SingleProcessRun(object): @classmethod def get_option_spec(cls): spec = PyOptionSpec() spec.addIntOption( 'num_minibatch', 'number of minibatches', 5000) spec.addIntOption( 'num_cooldown', 'Last #minibatches to refresh running mean/std for batchnorm ' 'in addition to the training stage', 0) spec.addIntOption( 'num_episode', 'number of episodes', 10000) spec.addBoolOption( 'tqdm', 'toggle tqdm visualization', False) return spec @auto_import_options def __init__(self, option_map): """Initialization for SingleProcessRun.""" pass def setup(self, GC, episode_start=None, episode_summary=None, after_start=None, before_stop=None): ''' Setup for SingleProcessRun. Args: GC(`GameContext`): Game Context episode_start(func): operations to perform before each episode episode_summary(func): operations to summarize after each episode after_start(func): operations called after GC.start() but before the main loop. ''' self.GC = GC self.episode_summary = episode_summary self.episode_start = episode_start self.after_start = after_start self.before_stop = before_stop def run(self): """Main training loop. Initialize Game Context and looping the required episodes. Call episode_start and episode_summary before and after each episode if necessary. Visualize with a progress bar if ``tqdm`` is set. Print training stats after each episode. In the end, print summary for game context and stop it. """ self.GC.start() if self.after_start is not None: self.after_start() for k in range(self.options.num_episode): if self.episode_start is not None: self.episode_start(k) if self.options.tqdm: import tqdm tq = tqdm.tqdm(total=self.options.num_minibatch, ncols=50) else: tq = None self.episode_counter = 0 while self.episode_counter < self.options.num_minibatch: old_counter = self.episode_counter # Make sure if the callback function in GC.run() change the # counter, then the set value will not be added by 1. self.GC.run() self.episode_counter += 1 diff = self.episode_counter - old_counter if tq is not None: if diff < 0: print(f'Diff negative: {old_counter} -> ' f'{self.episode_counter}') tq = tqdm.tqdm( total=self.options.num_minibatch, ncols=50) tq.update(self.episode_counter) else: tq.update(diff) if self.options.num_cooldown > 0: print(f'Starting {self.options.num_cooldown} cooldown passes') self.cooldown_counter = 0 while self.cooldown_counter < self.options.num_cooldown: self.GC.run( use_cooldown=True, cooldown_count=self.cooldown_counter) self.cooldown_counter += 1 if self.episode_summary is not None: self.episode_summary(k) if self.before_stop is not None: self.before_stop() self.GC.stop() def set_episode_counter(self, counter): self.episode_counter = counter def inc_episode_counter(self, delta): self.episode_counter += delta def run_multithread(self): ''' Start training in a multithreaded environment ''' def train_thread(): for i in range(self.options.num_episode): for k in range(self.options.num_minibatch): if self.episode_start is not None: self.episode_start(k) if k % 500 == 0: print( "Receive minibatch %d/%d" % (k, self.options.num_minibatch)) self.GC.runGroup("train") # Print something. self.episode_summary(i) def actor_thread(): while True: self.GC.runGroup("actor") self.GC.start() # Start the two threads. train_th = threading.Thread(target=train_thread) actor_th = threading.Thread(target=actor_thread) train_th.start() actor_th.start() train_th.join() actor_th.join()
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # From https://code.activestate.com/recipes/577504/ from __future__ import print_function from sys import getsizeof, stderr from itertools import chain from collections import deque try: from reprlib import repr except ImportError: pass def total_size(o, handlers={}, obj_handlers={}, verbose=False): """ Returns the approximate memory footprint an object and all of its contents. Automatically finds the contents of the following builtin containers and their subclasses: tuple, list, deque, dict, set and frozenset. To search other containers, add handlers to iterate over their contents: handlers = {SomeContainerClass: iter, OtherContainerClass: OtherContainerClass.get_elements} """ def dict_handler(d): return chain.from_iterable(d.items()) all_handlers = {tuple: iter, list: iter, deque: iter, dict: dict_handler, set: iter, frozenset: iter, } all_handlers.update(handlers) # user handlers take precedence seen = set() # track which object id's have already been seen # estimate sizeof object without __sizeof__ default_size = getsizeof(0) def sizeof(o): if id(o) in seen: # do not double count the same object return 0 seen.add(id(o)) s = 0 for typ, handler in obj_handlers.items(): if isinstance(o, typ): s = handler(o) break if s == 0: s = getsizeof(o, default_size) if verbose: print(s, type(o), repr(o), file=stderr) for typ, handler in all_handlers.items(): if isinstance(o, typ): s += sum(map(sizeof, handler(o))) break return s return sizeof(o) # Example call if __name__ == '__main__': d = dict(a=1, b=2, c=3, d=[4, 5, 6, 7], e='a string of chars') print(total_size(d, verbose=True))
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from .hist_states import HistState
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import math import queue from collections import defaultdict, Counter from datetime import datetime import numpy as np import torch import torch.multiprocessing as _mp import msgpack import msgpack_numpy from .size_utils import total_size mp = _mp.get_context('spawn') msgpack_numpy.patch() def dumps(obj): return msgpack.dumps(obj, use_bin_type=True) def loads(buf): return msgpack.loads(buf) def npimg_convert(img): img = img.astype('float32') / 255.0 img = np.transpose(img, (2, 0, 1)) return img def check_done_flag(done_flag): if done_flag is not None: with done_flag.get_lock(): return done_flag.value return False def islambda(v): def LAMBDA(): return 0 return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__ def queue_get(q, done_flag=None, fail_comment=None): if done_flag is None: return q.get() done = False while not done: try: return q.get(True, 0.01) except queue.Empty: if fail_comment is not None: print(fail_comment) if check_done_flag(done_flag): done = True # Return return None def queue_put(q, item, done_flag=None, fail_comment=None): if done_flag is None: q.put(item) return True done = False while not done: try: q.put(item, True, 0.01) return True except queue.Full: if fail_comment is not None: print(fail_comment) if check_done_flag(done_flag): done = True return False class Switch: def __init__(self, val=True): self.val = mp.Value("b", val) self.lock = mp.Lock() def get(self): with self.lock: return self.val.value def set(self, v): with self.lock: self.val.value = v class Timer: def __init__(self): self.reset() def __call__(self, name): self.curr_name = name return self def __enter__(self): self.before[self.curr_name] = datetime.now() def __exit__(self, t, value, traceback): after = datetime.now() elapsed = (after - self.before[self.curr_name]).total_seconds() * 1000 self.records[self.curr_name][0] += elapsed self.records[self.curr_name][1] += 1 def summary(self): rets = [] for name, record in self.records.items(): cumtime, count = record aver_time = float(cumtime) / count rets.append("[%s] %.3f ms [%d]" % (name, aver_time, count)) return rets def reset(self): self.records = defaultdict(lambda: [0, 0]) self.before = {} class CategoryCounter: def __init__(self, name=None): self.name = name self.reset() def reset(self): self.counter = Counter() def feed(self, data): for v in data: self.counter[v] += 1 def summary(self, info=""): n = sum(self.counter.values()) prompt = "[%s] n = %d " % (info, n) if n > 0: return prompt + "\n" + \ "\n".join([" \"%s\": %d (%.2lf%%)" % (k, v, 100.0 * v / n) for k, v in self.counter.items()]) else: return prompt class DelayedStats: def __init__(self, prefix, max_delay=5): ''' self.entries[key][t] gives the value of key at time t ''' self.prefix = prefix self.max_delay = max_delay self.reset() def reset(self): # self.entries[key][t_id] -> value self.entries = defaultdict(dict) self.predicted_entries = [ defaultdict(dict) for i in range( self.max_delay)] self.baseline_entries = [ defaultdict(dict) for i in range( self.max_delay)] def feed(self, ts, ids, curr, pred_diff, curr_cb=None, diff_cb=None): """Check keys in curr and pred, if there is any key starts with 'fa_', collect them and compare against each other. For example (suppose we are at time t): num_unit_T2: predicted difference: curr["num_unit"] (at time t + 2) - curr["num_unit"] (at time t). """ # curr[key][i] -> value, ids[i] -> id for k, v in curr.items(): if not k.startswith(self.prefix): continue key = k[len(self.prefix):] history = self.entries[key] history.update({ str(t) + "_" + str(d): (v[i] if not curr_cb else curr_cb(v[i])) for i, (t, d) in enumerate(zip(ts, ids)) }) for k, v in pred_diff.items(): if not k.startswith(self.prefix): continue key = k[len(self.prefix):] idx = key.rfind("_") delay = int(key[idx + 2:]) if delay >= self.max_delay: continue key = key[:idx] # Save it history = self.predicted_entries[delay][key] history.update({ str(t + delay) + "_" + str(d): ( self.entries[key][str(t) + "_" + str(d)] + (v[i] if not diff_cb else diff_cb(v[i])) ) for i, (t, d) in enumerate(zip(ts, ids)) }) history2 = self.baseline_entries[delay][key] history2.update({ str(t + delay) + "_" + str(d): self.entries[key][str(t) + "_" + str(d)] for t, d in zip(ts, ids) }) def _compare_history(self, h1, h2): summation = 0 counter = 0 # h1[t_id] -> val for t_id, v1 in h1.items(): if not (t_id in h2): continue v2 = h2[t_id] summation += (v1 - v2) ** 2 counter += 1 return summation / (counter + 1e-8), counter def summary(self, info=""): for k, v in self.entries.items(): for i in range(1, self.max_delay): # Difference avgMSE, counter = self._compare_history( self.predicted_entries[i][k], v) avgMSE_bl, counter = self._compare_history( self.baseline_entries[i][k], v) print( "[%s][%s_T%d] RMS: %.4lf, Baseline: %.4lf [cnt=%d]" % (info, k, i, math.sqrt(avgMSE), math.sqrt(avgMSE_bl), counter) ) def print_dict(prompt, d, func=str, tight=False): dem = ", " if tight else "\n" print(prompt, end='') if not tight: print("") print(dem.join(["%s: %s" % (k, func(d[k])) for k in sorted(d.keys())])) if not tight: print("") def print_dict2(prompt, d1, d2, func=lambda x, y: str(x) + "_" + str(y)): print(prompt) items = [] for k in sorted(d1.keys()): if not (k in d2): continue v1 = d1[k] v2 = d2[k] items.append("%s: %s" % (k, func(v1, v2))) print("\n".join(items)) print("") def is_viskey(k): return k.startswith("_") or k.startswith("fa_") def get_avg_str(l): return ", ".join(["[%d]: %.2lf [cnt=%d]" % (i, math.sqrt( v / (c + 1e-10)), c) for i, (v, c) in enumerate(zip(l[::2], l[1::2]))]) def get_avg_str2(l, l_bl): items = [] for i, (v1, c1, v2, c2) in enumerate( zip(l[::2], l[1::2], l_bl[::2], l_bl[1::2])): r1 = math.sqrt(v1 / (c1 + 1e-10)) r2 = math.sqrt(v2 / (c2 + 1e-10)) items.append("[%d]: %.2lf=%.2lf/%.2lf(%d)" % (i, r1 / (r2 + 1e-10), r1, r2, c1)) return ", ".join(items) class ForwardTracker: def __init__(self, max_delay=6): # prediction[key][t] -> value self.max_delay = max_delay self.sum_sqr_err = defaultdict(lambda: [0] * (2 * self.max_delay)) self.sum_sqr_err_bl = defaultdict(lambda: [0] * (2 * self.max_delay)) self.reset() def reset(self): self.prediction = defaultdict( lambda: defaultdict(lambda: { "pred": [0] * self.max_delay, "baseline": [0] * self.max_delay }) ) def feed(self, batch_states, curr_batch, forwarded): # Dump all entries with _, and with fa_ if curr_batch is None and forwarded is None: state_info = { k: v for k, v in batch_states[0].items() if is_viskey(k)} print_dict("[batch states]: ", state_info, tight=True) return batch_info = { k: v if isinstance( v, (int, float, str)) else v[0] for k, v in curr_batch.items() if is_viskey(k)} fd_info = {k: v.data[0] for k, v in forwarded.items() if is_viskey(k)} t0 = batch_info["_seq"] additional_info = {} used_fd_info = defaultdict(lambda: [0] * self.max_delay) for k, v in batch_info.items(): pred = self.prediction[k] # If there is prediction of the current value, also show them. if t0 in pred: cp = pred[t0] # Also compute th error. for delay, p in enumerate(cp["pred"]): self.sum_sqr_err[k][2 * delay] += (p - v) ** 2 self.sum_sqr_err[k][2 * delay + 1] += 1 for delay, p in enumerate(cp["baseline"]): self.sum_sqr_err_bl[k][2 * delay] += (p - v) ** 2 self.sum_sqr_err_bl[k][2 * delay + 1] += 1 additional_info[k + "_pred"] = ", ".join([ "[%d] %.2f" % (delay, p) for delay, p in enumerate(cp["pred"]) if delay != 0 ]) additional_info[k + "_bl"] = ", ".join([ "[%d] %.2f" % (delay, p) for delay, p in enumerate(cp["baseline"]) if delay != 0 ]) del pred[t0] for t in range(1, self.max_delay): k_f = k + "_T" + str(t) if not (k_f in fd_info): continue predictions = pred[t0 + t] predictions["pred"][t] = fd_info[k_f] + v predictions["baseline"][t] = v used_fd_info[k][t] = fd_info[k_f] batch_info.update(additional_info) used_fd_info = { k: ", ".join([ "[%d] %.2f" % (i, vv) for i, vv in enumerate(v) if i != 0 ]) for k, v in used_fd_info.items() } # print("--------------") # print_dict2("[statistics]:", self.sum_sqr_err, self.sum_sqr_err_bl, # func=get_avg_str2) # print_dict("[batch after _make_batch]: ", batch_info) # print_dict("[state_curr after forward]: ", used_fd_info) class SeqStats: def __init__(self, name="seq", seq_limits=None): # Stats. self.stats_seq = Counter() self.clear_stats() self.name = name if seq_limits is None: self.limits = [ 1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000, 4000, 5000, float("inf")] else: self.limits = seq_limits if not np.isinf(self.limits[-1]): self.limits.append(float("inf")) def feed(self, seqs): for seq_num in seqs: bin_idx = None for i, limit in enumerate(self.limits[1:]): if int(seq_num) < limit: bin_idx = i break if seq_num > self.max_seq: self.max_seq = seq_num if seq_num < self.min_seq: self.min_seq = seq_num name = "[" + str(self.limits[bin_idx]) + ", " + \ str(self.limits[bin_idx + 1]) + ")" self.stats_seq[name] += 1 def print_stats(self, reset=False): total_counts = sum(self.stats_seq.values()) if total_counts > 0: print( "Distribution of %s [min = %d / max = %d / #count = %d]:" % (self.name, self.min_seq, self.max_seq, total_counts)) s = "" for r in sorted(self.stats_seq.keys(), key=lambda x: float(x.split(",")[0][1:])): s += "%s: %d [%.2lf%%]\n" % ( r, self.stats_seq[r], 100.0 * self.stats_seq[r] / total_counts) print(s) else: print( "Distribution of %s [#count = %d]:" % (self.name, total_counts)) if reset: self.clear_stats() def clear_stats(self): self.stats_seq.clear() self.max_seq = 0 self.min_seq = float('inf') def agent2sender(agent_name): return agent_name[:-5].encode('ascii') def sender2agent(sender, i): return sender + "-%04d" % i def npimgs2cudatensor(imgs): imgs = torch.from_numpy(imgs) imgs = imgs.float().div(255) imgs = imgs.transpose(0, 1).transpose(0, 2).contiguous() imgs.cuda() return imgs def print_binary(m): # Print a binary matrix. if len(m.size()) != 2: print("Err! cannot print matrix of size " + str(m.size())) return s = "" for i in range(m.size(0)): for j in range(m.size(2)): if m[i, j] != 0: s += "x" else: s += "." s += "\n" print(s) def get_total_size(o): def get_tensor_size(t): return t.numel() * t.element_size() tensor_objects = [ torch.ByteTensor, torch.FloatTensor, torch.DoubleTensor, torch.IntTensor, torch.LongTensor, torch.cuda.ByteTensor, torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.IntTensor, torch.cuda.LongTensor, ] obj_handlers = {obj: get_tensor_size for obj in tensor_objects} return total_size(o, obj_handlers=obj_handlers)
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from collections import defaultdict, deque class HistState: def __init__(self, T, init_state_func=None): self.hs = defaultdict(lambda: deque()) self.T = T self.init_state_func = init_state_func def preprocess(self, ids, seqs): for id, seq in zip(ids, seqs): q = self.hs[id] if seq == 0: # clear the queue (which might contain old states of the last # game) q.clear() if self.init_state_func is not None: q.append(self.init_state_func()) def feed(self, ids, hiddens): ''' h[0] is the oldest element (left-most), h[-1] is the newest element (right-most) ''' for id, h in zip(ids, hiddens): q = self.hs[id] # Put the newest element from the right. q.append(h) # Pop the oldest element from the left. if len(q) > self.T: q.popleft() def _get_batch(self, ids, t, default=None): list_output = False if default is None: templ = self.hs[ids[0]][t] if isinstance(templ, (dict, list)): data = [] list_output = True else: data = templ.clone().resize_(len(ids), templ.size(0)) else: data = default.clone() for i, id in enumerate(ids): if id in self.hs: if not list_output: data[i, :] = self.hs[id][t] else: data.append(self.hs[id][t]) return data def newest(self, ids, t, default=None): return self._get_batch(ids, -t - 1, default=default) def oldest(self, ids, t, default=None): return self._get_batch(ids, t, default=default) def map(self, ids, func): hs = self.hs[ids[0]][0].clone() hs.resize_(len(ids), *list(hs.size())) for t in range(self.T): # Collect the data. for i, id in enumerate(ids): if t < len(self.hs[id]): hs[i, :] = self.hs[id][t] output = func(hs) # Update the state. for id, h in zip(ids, output): if t < len(self.hs[id]): self.hs[id][t] = h
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch from rlpytorch import Model def apply_nonrecursive(module, fn): """Applies a given function only to parameters and buffers of a module. Adapted from torch.nn.Module._apply. """ for param in module._parameters.values(): if param is not None: # Tensors stored in modules are graph leaves, and we don't # want to create copy nodes, so we have to unpack the data. param.data = fn(param.data) if param._grad is not None: param._grad.data = fn(param._grad.data) for key, buf in module._buffers.items(): if buf is not None: module._buffers[key] = fn(buf) return module class FP16Model(Model): def __init__(self, option_map, params, model): super().__init__(option_map, params) self.model = model.float() for module in model.modules(): if not isinstance(module, torch.nn.modules.batchnorm._BatchNorm): apply_nonrecursive( module, lambda t: t.half() if t.is_floating_point() else t) def forward(self, input): fp16_input = input.half() return self.model(fp16_input)