Spaces:
Sleeping
Sleeping
| """ | |
| Feature extractor class for Vits | |
| """ | |
| import copy | |
| from typing import Any, Dict, List, Optional, Tuple, Union | |
| import numpy as np | |
| from transformers import is_torch_available | |
| from transformers.audio_utils import mel_filter_bank | |
| from transformers.feature_extraction_sequence_utils import SequenceFeatureExtractor | |
| from transformers.feature_extraction_utils import BatchFeature | |
| from transformers.utils import TensorType, logging | |
| MAX_WAV_VALUE = 32768.0 | |
| if is_torch_available(): | |
| import torch | |
| logger = logging.get_logger(__name__) | |
| class VitsFeatureExtractor(SequenceFeatureExtractor): | |
| r""" | |
| Constructs a Vits feature extractor. | |
| This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains | |
| most of the main methods. Users should refer to this superclass for more information regarding those methods. | |
| This class extracts `Short Time Fourier Transform` from raw speech using a custom numpy implementation which should | |
| match pytorch's `torch.stft`. | |
| Args: | |
| feature_size (`int`, defaults to 80): | |
| The feature dimension of the extracted features. | |
| sampling_rate (`int`, defaults to 22050): | |
| The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). | |
| hop_length (`int`, defaults to 256): | |
| Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients. | |
| n_fft (`int`, defaults to 1024): | |
| Size of the Fourier transform. | |
| padding_value (`float`, *optional*, defaults to 0.0): | |
| Padding value used to pad the audio. Should correspond to silences. | |
| return_attention_mask (`bool`, *optional*, defaults to `False`): | |
| Whether to return the attention mask. | |
| [What are attention masks?](../glossary#attention-mask) | |
| <Tip> | |
| For Vits finetuning, `attention_mask` should always be passed for batched inference, to avoid subtle bugs. | |
| </Tip> | |
| max_wav_value (`float`, defaults to 32768.0): | |
| Maximum wav value. Used to normalize the input waveforms if `do_normalize=True` in the forward pass of this | |
| feature extractor. | |
| """ | |
| model_input_names = ["input_features"] | |
| def __init__( | |
| self, | |
| feature_size=80, | |
| sampling_rate=16000, | |
| hop_length=256, | |
| n_fft=1024, | |
| padding_value=0.0, | |
| return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask, | |
| max_wav_value=32768.0, | |
| **kwargs, | |
| ): | |
| super().__init__( | |
| feature_size=feature_size, | |
| sampling_rate=sampling_rate, | |
| padding_value=padding_value, | |
| return_attention_mask=return_attention_mask, | |
| **kwargs, | |
| ) | |
| self.n_fft = n_fft | |
| self.hop_length = hop_length | |
| self.sampling_rate = sampling_rate | |
| self.mel_filters = mel_filter_bank( | |
| num_frequency_bins=1 + n_fft // 2, | |
| num_mel_filters=feature_size, | |
| min_frequency=0.0, | |
| max_frequency=sampling_rate // 2, | |
| sampling_rate=sampling_rate, | |
| norm="slaney", | |
| mel_scale="slaney", | |
| ) | |
| self.max_wav_value = max_wav_value | |
| def _torch_extract_fbank_features(self, waveform: np.array) -> Tuple[torch.Tensor]: | |
| """ | |
| Compute the log-mel spectrogram of the provided audio using the PyTorch STFT implementation. | |
| """ | |
| if len(waveform.shape) == 1: | |
| waveform = waveform.unsqueeze(0) | |
| waveform = torch.nn.functional.pad( | |
| waveform, | |
| (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)), | |
| mode="reflect", | |
| ) | |
| window = torch.hann_window(self.n_fft).to(waveform.device) | |
| stft = torch.stft( | |
| waveform, | |
| self.n_fft, | |
| hop_length=self.hop_length, | |
| win_length=self.n_fft, | |
| window=window, | |
| center=False, | |
| pad_mode="reflect", | |
| normalized=False, | |
| onesided=True, | |
| return_complex=False, | |
| ) | |
| magnitudes = torch.sqrt(stft.pow(2).sum(-1) + 1e-6) | |
| mel_filters = torch.from_numpy(self.mel_filters).type(torch.float32).to(waveform.device) | |
| mel_spec = mel_filters.T @ magnitudes | |
| log_spec = torch.clamp(mel_spec, min=1e-5).log() | |
| return magnitudes, log_spec | |
| def __call__( | |
| self, | |
| raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], | |
| truncation: bool = False, | |
| pad_to_multiple_of: Optional[int] = None, | |
| return_tensors: Optional[Union[str, TensorType]] = None, | |
| return_attention_mask: Optional[bool] = True, | |
| padding: Optional[str] = True, | |
| max_length: Optional[int] = None, | |
| sampling_rate: Optional[int] = None, | |
| do_normalize: Optional[bool] = None, | |
| **kwargs, | |
| ) -> BatchFeature: | |
| """ | |
| Main method to featurize and prepare for the model one or several sequence(s). | |
| Args: | |
| raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): | |
| The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float | |
| values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not | |
| stereo, i.e. single float per timestep. | |
| truncation (`bool`, *optional*, default to `False`): | |
| Activates truncation to cut input sequences longer than *max_length* to *max_length*. | |
| pad_to_multiple_of (`int`, *optional*, defaults to None): | |
| If set will pad the sequence to a multiple of the provided value. | |
| This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability | |
| `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. | |
| return_tensors (`str` or [`~utils.TensorType`], *optional*): | |
| If set, will return tensors instead of list of python integers. Acceptable values are: | |
| - `'tf'`: Return TensorFlow `tf.constant` objects. | |
| - `'pt'`: Return PyTorch `torch.Tensor` objects. | |
| - `'np'`: Return Numpy `np.ndarray` objects. | |
| return_attention_mask (`bool`, *optional*, defaults to `True`): | |
| Whether to return the attention mask. If left to the default, will return the attention mask according | |
| to the specific feature_extractor's default. | |
| [What are attention masks?](../glossary#attention-mask) | |
| <Tip> | |
| For Vits finetuning, `attention_mask` should always be passed for batched inference, to avoid subtle | |
| bugs. | |
| </Tip> | |
| padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): | |
| Select a strategy to pad the returned sequences (according to the model's padding side and padding | |
| index) among: | |
| - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single | |
| sequence if provided). | |
| - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum | |
| acceptable input length for the model if that argument is not provided. | |
| - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different | |
| lengths). | |
| max_length (`int`, *optional*): | |
| Maximum length of the returned list and optionally padding length (see above). | |
| sampling_rate (`int`, *optional*): | |
| The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass | |
| `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition | |
| pipeline. | |
| do_normalize (`bool`, *optional*): | |
| Whether or not to divide the input waveform by `self.max_wav_value`. | |
| """ | |
| if sampling_rate is not None: | |
| if sampling_rate != self.sampling_rate: | |
| raise ValueError( | |
| f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" | |
| f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" | |
| f" was sampled with {self.sampling_rate} and not {sampling_rate}." | |
| ) | |
| else: | |
| logger.warning( | |
| "It is strongly recommended to pass the `sampling_rate` argument to this function. " | |
| "Failing to do so can result in silent errors that might be hard to debug." | |
| ) | |
| is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 | |
| if is_batched_numpy and len(raw_speech.shape) > 2: | |
| raise ValueError(f"Only mono-channel audio is supported for input to {self}") | |
| is_batched = is_batched_numpy or ( | |
| isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) | |
| ) | |
| if is_batched: | |
| raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] | |
| elif not is_batched and not isinstance(raw_speech, np.ndarray): | |
| raw_speech = np.asarray(raw_speech, dtype=np.float32) | |
| elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): | |
| raw_speech = raw_speech.astype(np.float32) | |
| # always return batch | |
| if not is_batched: | |
| raw_speech = [np.asarray([raw_speech]).T] | |
| if self.max_wav_value is not None and do_normalize: | |
| raw_speech = [ | |
| speech if self.max_wav_value is None else speech / self.max_wav_value for speech in raw_speech | |
| ] | |
| batched_speech = BatchFeature({"input_features": raw_speech}) | |
| # convert into correct format for padding | |
| padded_inputs = self.pad( | |
| batched_speech, | |
| padding=padding, | |
| max_length=max_length, | |
| truncation=truncation, | |
| pad_to_multiple_of=pad_to_multiple_of, | |
| return_attention_mask=return_attention_mask or do_normalize, | |
| return_tensors="pt", | |
| ) | |
| # make sure list is in array format | |
| if isinstance(padded_inputs.get("input_features"),list): | |
| input_features = torch.tensor(padded_inputs.get("input_features")).transpose(1, 2).transpose(0, 1) | |
| else: | |
| input_features = padded_inputs.get("input_features").clone().detach().transpose(1, 2).transpose(0, 1) | |
| input_features = self._torch_extract_fbank_features(input_features[0]) | |
| mel_scaled_input_features = input_features[1] | |
| input_features = input_features[0] | |
| padded_inputs["input_features"] = input_features | |
| padded_inputs["mel_scaled_input_features"] = mel_scaled_input_features | |
| if return_attention_mask: | |
| # rescale from sample (48000) to feature (3000) | |
| padded_inputs["attention_mask"] = padded_inputs["attention_mask"][:, :: self.hop_length] | |
| if return_tensors is not None: | |
| padded_inputs = padded_inputs.convert_to_tensors(return_tensors) | |
| return padded_inputs | |
| def to_dict(self) -> Dict[str, Any]: | |
| """ | |
| Serializes this instance to a Python dictionary. | |
| Returns: | |
| `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. | |
| """ | |
| output = copy.deepcopy(self.__dict__) | |
| output["feature_extractor_type"] = self.__class__.__name__ | |
| if "mel_filters" in output: | |
| del output["mel_filters"] | |
| return output | |