python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import os
from typing import Dict
from typing import List
import numpy as np
from dlrm.data.defaults import CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, \
TRAIN_MAPPING, TEST_MAPPING, \
TYPE_SELECTOR, FEATURES_SELECTOR, FILES_SELECTOR, CARDINALITY_SELECTOR, DTYPE_SELECTOR, \
SPLIT_BINARY, \
get_categorical_feature_type
""" For performance reasons, numerical features are required to appear in the same order
in both source_spec and channel_spec.
For more detailed requirements, see the check_feature_spec method"""
class FeatureSpec:
def __init__(self, feature_spec=None, source_spec=None, channel_spec=None, metadata=None, base_directory=None):
self.feature_spec: Dict = feature_spec if feature_spec is not None else {}
self.source_spec: Dict = source_spec if source_spec is not None else {}
self.channel_spec: Dict = channel_spec if channel_spec is not None else {}
self.metadata: Dict = metadata if metadata is not None else {}
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self) -> Dict:
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
def get_number_of_numerical_features(self) -> int:
numerical_features = self.channel_spec[NUMERICAL_CHANNEL]
return len(numerical_features)
def cat_positions_to_names(self, positions: List[int]):
# Ordering needs to correspond to the one in get_categorical_sizes()
feature_names = self.get_categorical_feature_names()
return [feature_names[i] for i in positions]
def get_categorical_feature_names(self):
""" Provides the categorical feature names. The returned order should me maintained."""
return self.channel_spec[CATEGORICAL_CHANNEL]
def get_categorical_sizes(self) -> List[int]:
"""For a given feature spec, this function is expected to return the sizes in the order corresponding to the
order in the channel_spec section """
categorical_features = self.get_categorical_feature_names()
cardinalities = [self.feature_spec[feature_name][CARDINALITY_SELECTOR] for feature_name in
categorical_features]
return cardinalities
def check_feature_spec(self):
# TODO check if cardinality fits in dtype, check if base directory is set
# TODO split into two checking general and model specific requirements
# check that mappings are the ones expected
mapping_name_list = list(self.source_spec.keys())
assert sorted(mapping_name_list) == sorted([TEST_MAPPING, TRAIN_MAPPING])
# check that channels are the ones expected
channel_name_list = list(self.channel_spec.keys())
assert sorted(channel_name_list) == sorted([CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL])
categorical_features_list = self.channel_spec[CATEGORICAL_CHANNEL]
numerical_features_list = self.channel_spec[NUMERICAL_CHANNEL]
label_features_list = self.channel_spec[LABEL_CHANNEL]
set_of_categorical_features = set(categorical_features_list)
set_of_numerical_features = set(numerical_features_list)
# check that exactly one label feature is selected
assert len(label_features_list) == 1
label_feature_name = label_features_list[0]
# check that lists in channel spec contain unique names
assert sorted(list(set_of_categorical_features)) == sorted(categorical_features_list)
assert sorted(list(set_of_numerical_features)) == sorted(numerical_features_list)
# check that all features used in channel spec are exactly ones defined in feature_spec
feature_spec_features = list(self.feature_spec.keys())
channel_spec_features = list(set.union(set_of_categorical_features,
set_of_numerical_features,
{label_feature_name}))
assert sorted(feature_spec_features) == sorted(channel_spec_features)
# check that correct dtypes are provided for all features
for feature_dict in self.feature_spec.values():
assert DTYPE_SELECTOR in feature_dict
try:
np.dtype(feature_dict[DTYPE_SELECTOR])
except TypeError:
assert False, "Type not understood by numpy"
# check that categorical features have cardinality provided
for feature_name, feature_dict in self.feature_spec.items():
if feature_name in set_of_categorical_features:
assert CARDINALITY_SELECTOR in feature_dict
assert isinstance(feature_dict[CARDINALITY_SELECTOR], int)
for mapping_name in [TRAIN_MAPPING, TEST_MAPPING]:
mapping = self.source_spec[mapping_name]
mapping_features = set()
for chunk in mapping:
# check that chunk has the correct type
assert chunk[TYPE_SELECTOR] == SPLIT_BINARY
contained_features = chunk[FEATURES_SELECTOR]
containing_files = chunk[FILES_SELECTOR]
# check that features are unique in mapping
for feature in contained_features:
assert feature not in mapping_features
mapping_features.add(feature)
# check that chunk has at least one features
assert len(contained_features) >= 1
# check that chunk has exactly file
assert len(containing_files) == 1
first_feature = contained_features[0]
if first_feature in set_of_categorical_features:
# check that each categorical feature is in a different file
assert len(contained_features) == 1
elif first_feature in set_of_numerical_features:
# check that numerical features are all in one chunk
assert sorted(contained_features) == sorted(numerical_features_list)
# check that ordering is exactly same as in channel spec - required for performance
assert contained_features == numerical_features_list
# check numerical dtype
for feature in contained_features:
assert np.dtype(self.feature_spec[feature][DTYPE_SELECTOR]) == np.float16
elif first_feature == label_feature_name:
# check that label feature is in a separate file
assert len(contained_features) == 1
# check label dtype
assert np.dtype(self.feature_spec[first_feature][DTYPE_SELECTOR]) == np.bool
else:
assert False, "Feature of unknown type"
# check that all features appeared in mapping
assert sorted(mapping_features) == sorted(feature_spec_features)
@staticmethod
def get_default_feature_spec(number_of_numerical_features, categorical_feature_cardinalities):
numerical_feature_fstring = "num_{}"
categorical_feature_fstring = "cat_{}.bin"
label_feature_name = "label"
numerical_file_name = "numerical.bin"
categorical_file_fstring = "{}" # TODO remove .bin from feature name, add to file name
label_file_name = "label.bin"
number_of_categorical_features = len(categorical_feature_cardinalities)
numerical_feature_names = [numerical_feature_fstring.format(i) for i in range(number_of_numerical_features)]
categorical_feature_names = [categorical_feature_fstring.format(i) for i in
range(number_of_categorical_features)]
cat_feature_types = [get_categorical_feature_type(int(cat_size)) for cat_size in
categorical_feature_cardinalities]
feature_dict = {f_name: {DTYPE_SELECTOR: str(np.dtype(f_type)), CARDINALITY_SELECTOR: f_size}
for f_name, f_type, f_size in
zip(categorical_feature_names, cat_feature_types, categorical_feature_cardinalities)}
for f_name in numerical_feature_names:
feature_dict[f_name] = {DTYPE_SELECTOR: str(np.dtype(np.float16))}
feature_dict[label_feature_name] = {DTYPE_SELECTOR: str(np.dtype(np.bool))}
channel_spec = {CATEGORICAL_CHANNEL: categorical_feature_names,
NUMERICAL_CHANNEL: numerical_feature_names,
LABEL_CHANNEL: [label_feature_name]}
source_spec = {}
for filename in (TRAIN_MAPPING, TEST_MAPPING):
source_spec[filename] = []
dst_folder = filename
numerical_file_path = os.path.join(dst_folder, numerical_file_name)
source_spec[filename].append({TYPE_SELECTOR: SPLIT_BINARY,
FEATURES_SELECTOR: numerical_feature_names,
FILES_SELECTOR: [numerical_file_path]})
label_file_path = os.path.join(dst_folder, label_file_name)
source_spec[filename].append({TYPE_SELECTOR: SPLIT_BINARY,
FEATURES_SELECTOR: [label_feature_name],
FILES_SELECTOR: [label_file_path]})
for feature_name in categorical_feature_names:
categorical_file_name = categorical_file_fstring.format(feature_name)
categorical_file_path = os.path.join(dst_folder, categorical_file_name)
source_spec[filename].append({TYPE_SELECTOR: SPLIT_BINARY,
FEATURES_SELECTOR: [feature_name],
FILES_SELECTOR: [categorical_file_path]})
return FeatureSpec(feature_spec=feature_dict, source_spec=source_spec, channel_spec=channel_spec, metadata={})
def get_mapping_paths(self, mapping_name: str):
label_feature_name = self.channel_spec[LABEL_CHANNEL][0]
set_of_categorical_features = set(self.channel_spec[CATEGORICAL_CHANNEL])
set_of_numerical_features = set(self.channel_spec[NUMERICAL_CHANNEL])
label_path = None
numerical_path = None
categorical_paths = dict()
for chunk in self.source_spec[mapping_name]:
local_path = os.path.join(self.base_directory, chunk[FILES_SELECTOR][0])
if chunk[FEATURES_SELECTOR][0] in set_of_numerical_features:
numerical_path = local_path
elif chunk[FEATURES_SELECTOR][0] in set_of_categorical_features:
local_feature = chunk[FEATURES_SELECTOR][0]
categorical_paths[local_feature] = local_path
elif chunk[FEATURES_SELECTOR][0] == label_feature_name:
label_path = local_path
return label_path, numerical_path, categorical_paths
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/data/feature_spec.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent
import math
import os
import queue
import torch
import numpy as np
from torch.utils.data import Dataset
from typing import Optional, Sequence, Tuple, List
from dlrm.data.defaults import CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, \
DTYPE_SELECTOR, FEATURES_SELECTOR, FILES_SELECTOR
from dlrm.data.feature_spec import FeatureSpec
class SyntheticDataset(Dataset):
"""Synthetic dataset version of criteo dataset."""
def __init__(
self,
num_entries: int,
device: str = 'cuda',
batch_size: int = 32768,
numerical_features: Optional[int] = None,
categorical_feature_sizes: Optional[Sequence[int]] = None # features are returned in this order
):
cat_features_count = len(categorical_feature_sizes) if categorical_feature_sizes is not None else 0
num_features_count = numerical_features if numerical_features is not None else 0
self._batches_per_epoch = math.ceil(num_entries / batch_size)
self._num_tensor = torch.rand(size=(batch_size, num_features_count), device=device, dtype=torch.float32) \
if num_features_count > 0 else None
self._label_tensor = torch.randint(low=0, high=2, size=(batch_size,), device=device, dtype=torch.float32)
self._cat_tensor = torch.cat(
[torch.randint(low=0, high=cardinality, size=(batch_size, 1), device=device, dtype=torch.long)
for cardinality in categorical_feature_sizes], dim=1) if cat_features_count > 0 else None
def __len__(self):
return self._batches_per_epoch
def __getitem__(self, idx: int):
if idx >= self._batches_per_epoch:
raise IndexError()
return self._num_tensor, self._cat_tensor, self._label_tensor
class ParametricDataset(Dataset):
def __init__(
self,
feature_spec: FeatureSpec,
mapping: str,
batch_size: int = 1,
numerical_features_enabled: bool = False,
categorical_features_to_read: List[str] = None, # This parameter dictates order of returned features
prefetch_depth: int = 10,
drop_last_batch: bool = False,
**kwargs
):
self._feature_spec = feature_spec
self._batch_size = batch_size
self._mapping = mapping
feature_spec.check_feature_spec()
categorical_features = feature_spec.channel_spec[CATEGORICAL_CHANNEL]
numerical_features = feature_spec.channel_spec[NUMERICAL_CHANNEL]
label_features = feature_spec.channel_spec[LABEL_CHANNEL]
set_of_categorical_features = set(categorical_features)
set_of_numerical_features = set(numerical_features)
set_of_label_features = set(label_features)
set_of_categoricals_to_read = set(categorical_features_to_read)
bytes_per_feature = {feature_name: np.dtype(feature_spec.feature_spec[feature_name][DTYPE_SELECTOR]).itemsize
for feature_name in feature_spec.feature_spec.keys()}
self._numerical_features_file = None
self._label_file = None
self._numerical_bytes_per_batch = bytes_per_feature[numerical_features[0]] * \
len(numerical_features) * batch_size
self._label_bytes_per_batch = np.dtype(np.bool).itemsize * batch_size
self._number_of_numerical_features = len(numerical_features)
chosen_mapping = feature_spec.source_spec[mapping]
categorical_feature_files = {}
root_path = feature_spec.base_directory
number_of_batches = None
for chunk in chosen_mapping:
contained_features = chunk[FEATURES_SELECTOR]
containing_file = chunk[FILES_SELECTOR][0]
first_feature = contained_features[0]
if first_feature in set_of_categorical_features:
# Load categorical
if first_feature not in set_of_categoricals_to_read:
continue # skip chunk
path_to_open = os.path.join(root_path, containing_file)
cat_file = os.open(path_to_open, os.O_RDONLY)
bytes_per_batch = bytes_per_feature[first_feature] * self._batch_size
batch_num_float = os.fstat(cat_file).st_size / bytes_per_batch
categorical_feature_files[first_feature] = cat_file
elif first_feature in set_of_numerical_features:
# Load numerical
if not numerical_features_enabled:
continue # skip chunk
path_to_open = os.path.join(root_path, containing_file)
self._numerical_features_file = os.open(path_to_open, os.O_RDONLY)
batch_num_float = os.fstat(self._numerical_features_file).st_size / self._numerical_bytes_per_batch
elif first_feature in set_of_label_features:
# Load label
path_to_open = os.path.join(root_path, containing_file)
self._label_file = os.open(path_to_open, os.O_RDONLY)
batch_num_float = os.fstat(self._label_file).st_size / self._label_bytes_per_batch
else:
raise ValueError("Unknown chunk type")
local_number_of_batches = math.ceil(batch_num_float) if not drop_last_batch else math.floor(batch_num_float)
if number_of_batches is not None:
if local_number_of_batches != number_of_batches:
raise ValueError("Size mismatch in data files")
else:
number_of_batches = local_number_of_batches
self._categorical_features_files = None
if len(categorical_features_to_read) > 0:
self._categorical_features_files = [categorical_feature_files[feature] for feature in
categorical_features_to_read]
self._categorical_bytes_per_batch = [bytes_per_feature[feature] * self._batch_size for feature in
categorical_features_to_read]
self._categorical_types = [feature_spec.feature_spec[feature][DTYPE_SELECTOR] for feature in
categorical_features_to_read]
self._num_entries = number_of_batches
self._prefetch_depth = min(prefetch_depth, self._num_entries)
self._prefetch_queue = queue.Queue()
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def __len__(self):
return self._num_entries
def __getitem__(self, idx: int):
""" Numerical features are returned in the order they appear in the channel spec section
For performance reasons, this is required to be the order they are saved in, as specified
by the relevant chunk in source spec.
Categorical features are returned in the order they appear in the channel spec section """
if idx >= self._num_entries:
raise IndexError()
if self._prefetch_depth <= 1:
return self._get_item(idx)
# At the start, fill up the prefetching queue
if idx == 0:
for i in range(self._prefetch_depth):
self._prefetch_queue.put(self._executor.submit(self._get_item, (i)))
# Extend the prefetching window by one if not at the end of the dataset
if idx < self._num_entries - self._prefetch_depth:
self._prefetch_queue.put(self._executor.submit(self._get_item, (idx + self._prefetch_depth)))
return self._prefetch_queue.get().result()
def _get_item(self, idx: int) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
click = self._get_label(idx)
numerical_features = self._get_numerical_features(idx)
categorical_features = self._get_categorical_features(idx)
return numerical_features, categorical_features, click
def _get_label(self, idx: int) -> torch.Tensor:
raw_label_data = os.pread(self._label_file, self._label_bytes_per_batch,
idx * self._label_bytes_per_batch)
array = np.frombuffer(raw_label_data, dtype=np.bool)
return torch.from_numpy(array).to(torch.float32)
def _get_numerical_features(self, idx: int) -> Optional[torch.Tensor]:
if self._numerical_features_file is None:
return None
raw_numerical_data = os.pread(self._numerical_features_file, self._numerical_bytes_per_batch,
idx * self._numerical_bytes_per_batch)
array = np.frombuffer(raw_numerical_data, dtype=np.float16)
return torch.from_numpy(array).view(-1, self._number_of_numerical_features)
def _get_categorical_features(self, idx: int) -> Optional[torch.Tensor]:
if self._categorical_features_files is None:
return None
categorical_features = []
for cat_bytes, cat_type, cat_file in zip(self._categorical_bytes_per_batch,
self._categorical_types,
self._categorical_features_files):
raw_cat_data = os.pread(cat_file, cat_bytes, idx * cat_bytes)
array = np.frombuffer(raw_cat_data, dtype=cat_type)
tensor = torch.from_numpy(array).unsqueeze(1).to(torch.long)
categorical_features.append(tensor)
return torch.cat(categorical_features, dim=1)
def __del__(self):
data_files = [self._label_file, self._numerical_features_file]
if self._categorical_features_files is not None:
data_files += self._categorical_features_files
for data_file in data_files:
if data_file is not None:
os.close(data_file)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/data/datasets.py |
DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/data/__init__.py |
|
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Tuple, Optional, Callable, Dict
import torch
from torch.utils.data import Dataset, Sampler, RandomSampler
from dlrm.data.datasets import SyntheticDataset, ParametricDataset
from dlrm.data.defaults import TEST_MAPPING, TRAIN_MAPPING
from dlrm.data.feature_spec import FeatureSpec
from dlrm.data.samplers import RandomDistributedSampler
from dlrm.data.utils import collate_split_tensors
from dlrm.utils.distributed import is_distributed, get_rank
class DatasetFactory:
def __init__(self, flags, device_mapping: Optional[Dict] = None):
self._flags = flags
self._device_mapping = device_mapping
def create_collate_fn(self) -> Optional[Callable]:
raise NotImplementedError()
def create_datasets(self) -> Tuple[Dataset, Dataset]:
raise NotImplementedError()
def create_sampler(self, dataset: Dataset) -> Optional[Sampler]:
return RandomDistributedSampler(dataset) if is_distributed() else RandomSampler(dataset)
def create_data_loader(
self,
dataset,
collate_fn: Optional[Callable] = None,
sampler: Optional[Sampler] = None):
return torch.utils.data.DataLoader(
dataset, collate_fn=collate_fn, sampler=sampler, batch_size=None,
num_workers=0, pin_memory=False
)
class SyntheticGpuDatasetFactory(DatasetFactory):
def __init__(self, flags, local_numerical_features_num, local_categorical_feature_sizes):
self.local_numerical_features = local_numerical_features_num
self.local_categorical_features = local_categorical_feature_sizes
super().__init__(flags)
def create_collate_fn(self) -> Optional[Callable]:
return None
def create_sampler(self, dataset) -> Optional[Sampler]:
return None
def create_datasets(self) -> Tuple[Dataset, Dataset]:
flags = self._flags
dataset_train = SyntheticDataset(num_entries=flags.synthetic_dataset_num_entries,
batch_size=flags.batch_size,
numerical_features=self.local_numerical_features,
categorical_feature_sizes=self.local_categorical_features)
dataset_test = SyntheticDataset(num_entries=flags.synthetic_dataset_num_entries,
batch_size=flags.test_batch_size,
numerical_features=self.local_numerical_features,
categorical_feature_sizes=self.local_categorical_features)
return dataset_train, dataset_test
class ParametricDatasetFactory(DatasetFactory):
def __init__(self, flags, feature_spec: FeatureSpec, numerical_features_enabled, categorical_features_to_read):
super().__init__(flags)
self._base_device = flags.base_device
self._train_batch_size = flags.batch_size
self._test_batch_size = flags.test_batch_size
self._feature_spec = feature_spec
self._numerical_features_enabled = numerical_features_enabled
self._categorical_features_to_read = categorical_features_to_read
def create_collate_fn(self):
orig_stream = torch.cuda.current_stream() if self._base_device == 'cuda' else None
return functools.partial(
collate_split_tensors,
device=self._base_device,
orig_stream=orig_stream,
numerical_type=torch.float32
)
def create_datasets(self) -> Tuple[Dataset, Dataset]:
# prefetching is currently unsupported if using the batch-wise shuffle
prefetch_depth = 0 if self._flags.shuffle_batch_order else 10
dataset_train = ParametricDataset(
feature_spec=self._feature_spec,
mapping=TRAIN_MAPPING,
batch_size=self._train_batch_size,
numerical_features_enabled=self._numerical_features_enabled,
categorical_features_to_read=self._categorical_features_to_read,
prefetch_depth=prefetch_depth
)
dataset_test = ParametricDataset(
feature_spec=self._feature_spec,
mapping=TEST_MAPPING,
batch_size=self._test_batch_size,
numerical_features_enabled=self._numerical_features_enabled,
categorical_features_to_read=self._categorical_features_to_read,
prefetch_depth=prefetch_depth
)
return dataset_train, dataset_test
def create_dataset_factory(flags, feature_spec: FeatureSpec, device_mapping: Optional[dict] = None) -> DatasetFactory:
"""
By default each dataset can be used in single GPU or distributed setting - please keep that in mind when adding
new datasets. Distributed case requires selection of categorical features provided in `device_mapping`
(see `DatasetFactory#create_collate_fn`).
:param flags:
:param device_mapping: dict, information about model bottom mlp and embeddings devices assignment
:return:
"""
dataset_type = flags.dataset_type
num_numerical_features = feature_spec.get_number_of_numerical_features()
if is_distributed() or device_mapping:
assert device_mapping is not None, "Distributed dataset requires information about model device mapping."
rank = get_rank()
local_categorical_positions = device_mapping["embedding"][rank]
numerical_features_enabled = device_mapping["bottom_mlp"] == rank
else:
local_categorical_positions = list(range(len(feature_spec.get_categorical_feature_names())))
numerical_features_enabled = True
if dataset_type == "parametric":
local_categorical_names = feature_spec.cat_positions_to_names(local_categorical_positions)
return ParametricDatasetFactory(flags=flags, feature_spec=feature_spec,
numerical_features_enabled=numerical_features_enabled,
categorical_features_to_read=local_categorical_names
)
if dataset_type == "synthetic_gpu":
local_numerical_features = num_numerical_features if numerical_features_enabled else 0
world_categorical_sizes = feature_spec.get_categorical_sizes()
local_categorical_sizes = [world_categorical_sizes[i] for i in local_categorical_positions]
return SyntheticGpuDatasetFactory(flags, local_numerical_features_num=local_numerical_features,
local_categorical_feature_sizes=local_categorical_sizes)
raise NotImplementedError(f"unknown dataset type: {dataset_type}")
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/data/factories.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Tuple, Optional, List
import numpy as np
import torch
from torch import Tensor
from torch.cuda import Stream
from torch.utils.data import Dataset, DataLoader
import tqdm
from dlrm.data.defaults import TRAIN_MAPPING, TEST_MAPPING, DTYPE_SELECTOR
from dlrm.data.feature_spec import FeatureSpec
def collate_split_tensors(
tensors: Tuple[Tensor, Tensor, Tensor],
device: str,
orig_stream: Stream,
numerical_type: torch.dtype = torch.float32
):
tensors = [tensor.to(device, non_blocking=True) if tensor is not None else None for tensor in
tensors]
if device == 'cuda':
for tensor in tensors:
if tensor is not None:
tensor.record_stream(orig_stream)
numerical_features, categorical_features, click = tensors
if numerical_features is not None:
numerical_features = numerical_features.to(numerical_type)
return numerical_features, categorical_features, click
def collate_array(
array: np.array,
device: str,
orig_stream: Stream,
num_numerical_features: int,
selected_categorical_features: Optional[Tensor] = None
):
# numerical features are encoded as float32
numerical_features = array[:, 1:1 + num_numerical_features].view(dtype=np.float32)
numerical_features = torch.from_numpy(numerical_features)
categorical_features = torch.from_numpy(array[:, 1 + num_numerical_features:])
click = torch.from_numpy(array[:, 0])
categorical_features = categorical_features.to(device, non_blocking=True).to(torch.long)
numerical_features = numerical_features.to(device, non_blocking=True)
click = click.to(torch.float32).to(device, non_blocking=True)
if selected_categorical_features is not None:
categorical_features = categorical_features[:, selected_categorical_features]
if device == 'cuda':
numerical_features.record_stream(orig_stream)
categorical_features.record_stream(orig_stream)
click.record_stream(orig_stream)
return numerical_features, categorical_features, click
def write_dataset_to_disk(dataset_train: Dataset, dataset_test: Dataset, feature_spec: FeatureSpec,
saving_batch_size=512) -> None:
feature_spec.check_feature_spec() # We rely on the feature spec being properly formatted
categorical_features_list = feature_spec.get_categorical_feature_names()
categorical_features_types = [feature_spec.feature_spec[feature_name][DTYPE_SELECTOR]
for feature_name in categorical_features_list]
number_of_numerical_features = feature_spec.get_number_of_numerical_features()
number_of_categorical_features = len(categorical_features_list)
for mapping_name, dataset in zip((TRAIN_MAPPING, TEST_MAPPING),
(dataset_train, dataset_test)):
file_streams = []
label_path, numerical_path, categorical_paths = feature_spec.get_mapping_paths(mapping_name)
try:
os.makedirs(os.path.dirname(numerical_path), exist_ok=True)
numerical_f = open(numerical_path, "wb+")
file_streams.append(numerical_f)
os.makedirs(os.path.dirname(label_path), exist_ok=True)
label_f = open(label_path, 'wb+')
file_streams.append(label_f)
categorical_fs = []
for feature_name in categorical_features_list:
local_path = categorical_paths[feature_name]
os.makedirs(os.path.dirname(local_path), exist_ok=True)
fs = open(local_path, 'wb+')
categorical_fs.append(fs)
file_streams.append(fs)
for numerical, categorical, label in tqdm.tqdm(
DataLoader(dataset, saving_batch_size),
desc=mapping_name + " dataset saving",
unit_scale=saving_batch_size
):
assert (numerical.shape[-1] == number_of_numerical_features)
assert (categorical.shape[-1] == number_of_categorical_features)
numerical_f.write(numerical.to(torch.float16).cpu().numpy().tobytes())
label_f.write(label.to(torch.bool).cpu().numpy().tobytes())
for cat_idx, cat_feature_type in enumerate(categorical_features_types):
categorical_fs[cat_idx].write(
categorical[:, :, cat_idx].cpu().numpy().astype(cat_feature_type).tobytes())
finally:
for stream in file_streams:
stream.close()
feature_spec.to_yaml()
def prefetcher(load_iterator, prefetch_stream):
def _prefetch():
with torch.cuda.stream(prefetch_stream):
try:
data_batch = next(load_iterator)
except StopIteration:
return None
return data_batch
next_data_batch = _prefetch()
while next_data_batch is not None:
torch.cuda.current_stream().wait_stream(prefetch_stream)
data_batch = next_data_batch
next_data_batch = _prefetch()
yield data_batch
def get_embedding_sizes(fspec: FeatureSpec, max_table_size: Optional[int]) -> List[int]:
if max_table_size is not None:
return [min(s, max_table_size) for s in fspec.get_categorical_sizes()]
else:
return fspec.get_categorical_sizes()
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/data/utils.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
CATEGORICAL_CHANNEL = "categorical"
NUMERICAL_CHANNEL = "numerical"
LABEL_CHANNEL = "label"
SPLIT_BINARY = "split_binary"
TRAIN_MAPPING = "train"
TEST_MAPPING = "test"
TYPE_SELECTOR = "type"
FEATURES_SELECTOR = "features"
FILES_SELECTOR = "files"
DTYPE_SELECTOR = "dtype"
CARDINALITY_SELECTOR = "cardinality"
def get_categorical_feature_type(size: int):
"""This function works both when max value and cardinality is passed.
Consistency by the user is required"""
types = (np.int8, np.int16, np.int32)
for numpy_type in types:
if size < np.iinfo(numpy_type).max:
return numpy_type
raise RuntimeError(f"Categorical feature of size {size} is too big for defined types")
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/data/defaults.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch.utils.data import RandomSampler
from dlrm.utils.distributed import get_local_rank
class RandomDistributedSampler(RandomSampler):
_SAMPLE_FILE = "/tmp/dlrm_training_sample.npy"
def __iter__(self):
"""
To guarantee all ranks have the same same permutation, generating it from rank 0 and sync
to other rank by writing to disk
"""
if get_local_rank() == 0:
np.save(self._SAMPLE_FILE, np.array(list(super().__iter__())))
torch.distributed.barrier()
sample = np.load(self._SAMPLE_FILE)
return iter(sample)
| DeepLearningExamples-master | PyTorch/Recommendation/DLRM/dlrm/data/samplers.py |
# Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import os
from feature_spec import FeatureSpec
from neumf_constants import USER_CHANNEL_NAME, ITEM_CHANNEL_NAME, LABEL_CHANNEL_NAME, TEST_SAMPLES_PER_SERIES
class TorchTensorDataset:
""" Warning! This dataset/loader uses torch.load. Torch.load implicitly uses pickle. Pickle is insecure.
It is trivial to achieve arbitrary code execution using a prepared pickle payload. Only unpickle data you trust."""
def __init__(self, feature_spec: FeatureSpec, mapping_name: str, args):
self.local_rank = args.local_rank
self.mapping_name = mapping_name
self.features = dict()
self.feature_spec = feature_spec
self._load_features()
def _load_features(self):
chunks = self.feature_spec.source_spec[self.mapping_name]
for chunk in chunks:
assert chunk['type'] == 'torch_tensor', "Only torch_tensor files supported in this loader"
files_list = chunk['files']
assert len(files_list) == 1, "Only one file per chunk supported in this loader"
file_relative_path = files_list[0]
path_to_load = os.path.join(self.feature_spec.base_directory, file_relative_path)
chunk_data = torch.load(path_to_load, map_location=torch.device('cuda:{}'.format(self.local_rank)))
running_pos = 0
for feature_name in chunk['features']:
next_running_pos = running_pos + 1
feature_data = chunk_data[:, running_pos:next_running_pos]
# This is needed because slicing instead of indexing keeps the data 2-dimensional
feature_data = feature_data.reshape(-1, 1)
running_pos = next_running_pos
self.features[feature_name] = feature_data
class TestDataLoader:
def __init__(self, dataset: TorchTensorDataset, args):
self.dataset = dataset
self.feature_spec = dataset.feature_spec
self.channel_spec = self.feature_spec.channel_spec
self.samples_in_series = self.feature_spec.metadata[TEST_SAMPLES_PER_SERIES]
self.raw_dataset_length = None # First feature loaded sets this. Total length before splitting across cards
self.data = dict()
self.world_size = args.world_size
self.local_rank = args.local_rank
self.batch_size = args.valid_batch_size
self._build_channel_dict()
self._deduplication_augmentation()
self._split_between_devices()
self._split_into_batches()
def _build_channel_dict(self):
for channel_name, channel_features in self.channel_spec.items():
channel_tensors = dict()
for feature_name in channel_features:
channel_tensors[feature_name] = self.dataset.features[feature_name]
if not self.raw_dataset_length:
self.raw_dataset_length = channel_tensors[feature_name].shape[0]
else:
assert self.raw_dataset_length == channel_tensors[feature_name].shape[0]
self.data[channel_name] = channel_tensors
def _deduplication_augmentation(self):
# Augmentation
# This adds a duplication mask tensor.
# This is here to exactly replicate the MLPerf training regime. Moving this deduplication to the candidate item
# generation stage increases the real diversity of the candidates, which makes the ranking task harder
# and results in a drop in HR@10 of approx 0.01. This has been deemed unacceptable (May 2021).
# We need the duplication mask to determine if a given item should be skipped during ranking
# If an item with label 1 is duplicated in the sampled ones, we need to be careful to not mark the one with
# label 1 as a duplicate. If an item appears repeatedly only with label 1, no duplicates are marked.
# To easily compute candidates, we sort the items. This will impact the distribution of examples between
# devices, but should not influence the numerics or performance meaningfully.
# We need to assure that the positive item, which we don't want to mark as a duplicate, appears first.
# We do this by adding labels as a secondary factor
# Reshape the tensors to have items for a given user in a single row
user_feature_name = self.channel_spec[USER_CHANNEL_NAME][0]
item_feature_name = self.channel_spec[ITEM_CHANNEL_NAME][0]
label_feature_name = self.channel_spec[LABEL_CHANNEL_NAME][0]
self.ignore_mask_channel_name = 'mask_ch'
self.ignore_mask_feature_name = 'mask'
items = self.data[ITEM_CHANNEL_NAME][item_feature_name].view(-1, self.samples_in_series)
users = self.data[USER_CHANNEL_NAME][user_feature_name].view(-1, self.samples_in_series)
labels = self.data[LABEL_CHANNEL_NAME][label_feature_name].view(-1, self.samples_in_series)
sorting_weights = items.float() - labels.float() * 0.5
_, indices = torch.sort(sorting_weights)
# The gather reorders according to the indices decided by the sort above
sorted_items = torch.gather(items, 1, indices)
sorted_labels = torch.gather(labels, 1, indices)
sorted_users = torch.gather(users, 1, indices)
dup_mask = sorted_items[:, 0:-1] == sorted_items[:, 1:] # This says if a given item is equal to the next one
dup_mask = dup_mask.type(torch.bool)
# The first item for a given user can never be a duplicate:
dup_mask = torch.cat((torch.zeros_like(dup_mask[:, 0:1]), dup_mask), dim=1)
# Reshape them back
self.data[ITEM_CHANNEL_NAME][item_feature_name] = sorted_items.view(-1, 1)
self.data[USER_CHANNEL_NAME][user_feature_name] = sorted_users.view(-1, 1)
self.data[LABEL_CHANNEL_NAME][label_feature_name] = sorted_labels.view(-1, 1)
self.data[self.ignore_mask_channel_name] = dict()
self.data[self.ignore_mask_channel_name][self.ignore_mask_feature_name] = dup_mask.view(-1, 1)
def _split_between_devices(self):
if self.world_size > 1:
# DO NOT REPLACE WITH torch.chunk (number of returned chunks can silently be lower than requested).
# It would break compatibility with small datasets.
num_test_cases = self.raw_dataset_length / self.samples_in_series
smaller_batch = (int(num_test_cases // self.world_size)) * self.samples_in_series
bigger_batch = smaller_batch + self.samples_in_series
remainder = int(num_test_cases % self.world_size)
samples_per_card = [bigger_batch] * remainder + [smaller_batch] * (self.world_size - remainder)
for channel_name, channel_dict in self.data.items():
for feature_name, feature_tensor in channel_dict.items():
channel_dict[feature_name] = \
channel_dict[feature_name].split(samples_per_card)[self.local_rank]
def _split_into_batches(self):
self.batches = None
# This is the structure of each batch, waiting to be copied and filled in with data
for channel_name, channel_dict in self.data.items():
for feature_name, feature_tensor in channel_dict.items():
feature_batches = feature_tensor.view(-1).split(self.batch_size)
if not self.batches:
self.batches = list(
{channel_name: dict() for channel_name in self.data.keys()} for _ in feature_batches)
for pos, feature_batch_data in enumerate(feature_batches):
self.batches[pos][channel_name][feature_name] = feature_batch_data
def get_epoch_data(self):
return self.batches
def get_ignore_mask(self):
return self.data[self.ignore_mask_channel_name][self.ignore_mask_feature_name]
class TrainDataloader:
def __init__(self, dataset: TorchTensorDataset, args):
self.dataset = dataset
self.local_rank = args.local_rank
if args.distributed:
self.local_batch = args.batch_size // args.world_size
else:
self.local_batch = args.batch_size
self.feature_spec = dataset.feature_spec
self.channel_spec = self.feature_spec.channel_spec
self.negative_samples = args.negative_samples
self.data = dict()
self.raw_dataset_length = None # first feature loaded sets this
self._build_channel_dict()
self.length_after_augmentation = self.raw_dataset_length * (self.negative_samples + 1)
samples_per_worker = self.length_after_augmentation / args.world_size
self.samples_begin = int(samples_per_worker * args.local_rank)
self.samples_end = int(samples_per_worker * (args.local_rank + 1))
def _build_channel_dict(self):
for channel_name, channel_features in self.channel_spec.items():
channel_tensors = dict()
for feature_name in channel_features:
channel_tensors[feature_name] = self.dataset.features[feature_name]
if not self.raw_dataset_length:
self.raw_dataset_length = channel_tensors[feature_name].shape[0]
else:
assert self.raw_dataset_length == channel_tensors[feature_name].shape[0]
self.data[channel_name] = channel_tensors
def get_epoch_data(self):
# Augment, appending args.negative_samples times the original set, now with random items end negative labels
augmented_data = {channel_name: dict() for channel_name in self.data.keys()}
user_feature_name = self.channel_spec[USER_CHANNEL_NAME][0]
item_feature_name = self.channel_spec[ITEM_CHANNEL_NAME][0]
label_feature_name = self.channel_spec[LABEL_CHANNEL_NAME][0]
# USER
user_tensor = self.data[USER_CHANNEL_NAME][user_feature_name]
neg_users = user_tensor.repeat(self.negative_samples, 1)
augmented_users = torch.cat((user_tensor, neg_users))
augmented_data[USER_CHANNEL_NAME][user_feature_name] = augmented_users
del neg_users
# ITEM
item_tensor = self.data[ITEM_CHANNEL_NAME][item_feature_name]
neg_items = torch.empty_like(item_tensor).repeat(self.negative_samples, 1) \
.random_(0, self.feature_spec.feature_spec[item_feature_name]['cardinality'])
augmented_items = torch.cat((item_tensor, neg_items))
augmented_data[ITEM_CHANNEL_NAME][item_feature_name] = augmented_items
del neg_items
# LABEL
label_tensor = self.data[LABEL_CHANNEL_NAME][label_feature_name]
neg_label = torch.zeros_like(label_tensor, dtype=torch.float32).repeat(self.negative_samples, 1)
augmented_labels = torch.cat((label_tensor, neg_label))
del neg_label
augmented_data[LABEL_CHANNEL_NAME][label_feature_name] = augmented_labels
# Labels are not shuffled between cards.
# This replicates previous behaviour.
epoch_indices = torch.randperm(self.samples_end - self.samples_begin, device='cuda:{}'.format(self.local_rank))
epoch_indices += self.samples_begin
batches = None
for channel_name, channel_dict in augmented_data.items():
for feature_name, feature_tensor in channel_dict.items():
# the last batch will almost certainly be smaller, drop it
# Warning: may not work if there's only one
feature_batches = feature_tensor.view(-1)[epoch_indices].split(self.local_batch)[:-1]
if not batches:
batches = list({channel_name: dict() for channel_name in self.data.keys()} for _ in feature_batches)
for pos, feature_batch_data in enumerate(feature_batches):
batches[pos][channel_name][feature_name] = feature_batch_data
return batches
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/dataloading.py |
# Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.jit
from apex.optimizers import FusedAdam
import os
import math
import time
import numpy as np
from argparse import ArgumentParser
import torch
import torch.nn as nn
import utils
import dataloading
from neumf import NeuMF
from feature_spec import FeatureSpec
from neumf_constants import USER_CHANNEL_NAME, ITEM_CHANNEL_NAME, LABEL_CHANNEL_NAME
import dllogger
def synchronized_timestamp():
torch.cuda.synchronize()
return time.time()
def parse_args():
parser = ArgumentParser(description="Train a Neural Collaborative"
" Filtering model")
parser.add_argument('--data', type=str,
help='Path to the directory containing the feature specification yaml')
parser.add_argument('--feature_spec_file', type=str, default='feature_spec.yaml',
help='Name of the feature specification file or path relative to the data directory.')
parser.add_argument('-e', '--epochs', type=int, default=30,
help='Number of epochs for training')
parser.add_argument('-b', '--batch_size', type=int, default=2 ** 20,
help='Number of examples for each iteration. This will be divided by the number of devices')
parser.add_argument('--valid_batch_size', type=int, default=2 ** 20,
help='Number of examples in each validation chunk. This will be the maximum size of a batch '
'on each device.')
parser.add_argument('-f', '--factors', type=int, default=64,
help='Number of predictive factors')
parser.add_argument('--layers', nargs='+', type=int,
default=[256, 256, 128, 64],
help='Sizes of hidden layers for MLP')
parser.add_argument('-n', '--negative_samples', type=int, default=4,
help='Number of negative examples per interaction')
parser.add_argument('-l', '--learning_rate', type=float, default=0.0045,
help='Learning rate for optimizer')
parser.add_argument('-k', '--topk', type=int, default=10,
help='Rank for test examples to be considered a hit')
parser.add_argument('--seed', '-s', type=int, default=None,
help='Manually set random seed for torch')
parser.add_argument('--threshold', '-t', type=float, default=1.0,
help='Stop training early at threshold')
parser.add_argument('--beta1', '-b1', type=float, default=0.25,
help='Beta1 for Adam')
parser.add_argument('--beta2', '-b2', type=float, default=0.5,
help='Beta1 for Adam')
parser.add_argument('--eps', type=float, default=1e-8,
help='Epsilon for Adam')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout probability, if equal to 0 will not use dropout at all')
parser.add_argument('--checkpoint_dir', default='', type=str,
help='Path to the directory storing the checkpoint file, '
'passing an empty path disables checkpoint saving')
parser.add_argument('--load_checkpoint_path', default=None, type=str,
help='Path to the checkpoint file to be loaded before training/evaluation')
parser.add_argument('--mode', choices=['train', 'test'], default='train', type=str,
help='Passing "test" will only run a single evaluation; '
'otherwise, full training will be performed')
parser.add_argument('--grads_accumulated', default=1, type=int,
help='Number of gradients to accumulate before performing an optimization step')
parser.add_argument('--amp', action='store_true', help='Enable mixed precision training')
parser.add_argument('--log_path', default='log.json', type=str,
help='Path for the JSON training log')
return parser.parse_args()
def init_distributed(args):
args.world_size = int(os.environ.get('WORLD_SIZE', default=1))
args.distributed = args.world_size > 1
if args.distributed:
args.local_rank = int(os.environ['LOCAL_RANK'])
'''
Set cuda device so everything is done on the right GPU.
THIS MUST BE DONE AS SOON AS POSSIBLE.
'''
torch.cuda.set_device(args.local_rank)
'''Initialize distributed communication'''
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
else:
args.local_rank = 0
def val_epoch(model, dataloader: dataloading.TestDataLoader, k, distributed=False, world_size=1):
model.eval()
user_feature_name = dataloader.channel_spec[USER_CHANNEL_NAME][0]
item_feature_name = dataloader.channel_spec[ITEM_CHANNEL_NAME][0]
label_feature_name = dataloader.channel_spec[LABEL_CHANNEL_NAME][0]
with torch.no_grad():
p = []
labels_list = []
losses = []
for batch_dict in dataloader.get_epoch_data():
user_batch = batch_dict[USER_CHANNEL_NAME][user_feature_name]
item_batch = batch_dict[ITEM_CHANNEL_NAME][item_feature_name]
label_batch = batch_dict[LABEL_CHANNEL_NAME][label_feature_name]
prediction_batch = model(user_batch, item_batch, sigmoid=True).detach()
loss_batch = torch.nn.functional.binary_cross_entropy(input=prediction_batch.reshape([-1]),
target=label_batch)
losses.append(loss_batch)
p.append(prediction_batch)
labels_list.append(label_batch)
ignore_mask = dataloader.get_ignore_mask().view(-1, dataloader.samples_in_series)
ratings = torch.cat(p).view(-1, dataloader.samples_in_series)
ratings[ignore_mask] = -1
labels = torch.cat(labels_list).view(-1, dataloader.samples_in_series)
del p, labels_list
top_indices = torch.topk(ratings, k)[1]
# Positive items are always first in a given series
labels_of_selected = torch.gather(labels, 1, top_indices)
ifzero = (labels_of_selected == 1)
hits = ifzero.sum()
ndcg = (math.log(2) / (torch.nonzero(ifzero)[:, 1].view(-1).to(torch.float) + 2).log_()).sum()
total_validation_loss = torch.mean(torch.stack(losses, dim=0))
# torch.nonzero may cause host-device synchronization
if distributed:
torch.distributed.all_reduce(hits, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(ndcg, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(total_validation_loss, op=torch.distributed.ReduceOp.SUM)
total_validation_loss = total_validation_loss / world_size
num_test_cases = dataloader.raw_dataset_length / dataloader.samples_in_series
hr = hits.item() / num_test_cases
ndcg = ndcg.item() / num_test_cases
model.train()
return hr, ndcg, total_validation_loss
def main():
args = parse_args()
init_distributed(args)
if args.local_rank == 0:
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=args.log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
else:
dllogger.init(backends=[])
dllogger.metadata('train_throughput', {"name": 'train_throughput', 'unit': 'samples/s', 'format': ":.3e"})
dllogger.metadata('best_train_throughput', {'unit': 'samples/s'})
dllogger.metadata('mean_train_throughput', {'unit': 'samples/s'})
dllogger.metadata('eval_throughput', {"name": 'eval_throughput', 'unit': 'samples/s', 'format': ":.3e"})
dllogger.metadata('best_eval_throughput', {'unit': 'samples/s'})
dllogger.metadata('mean_eval_throughput', {'unit': 'samples/s'})
dllogger.metadata('train_epoch_time', {"name": 'train_epoch_time', 'unit': 's', 'format': ":.3f"})
dllogger.metadata('validation_epoch_time', {"name": 'validation_epoch_time', 'unit': 's', 'format': ":.3f"})
dllogger.metadata('time_to_target', {'unit': 's'})
dllogger.metadata('time_to_best_model', {'unit': 's'})
dllogger.metadata('hr@10', {"name": 'hr@10', 'unit': None, 'format': ":.5f"})
dllogger.metadata('best_accuracy', {'unit': None})
dllogger.metadata('best_epoch', {'unit': None})
dllogger.metadata('validation_loss', {"name": 'validation_loss', 'unit': None, 'format': ":.5f"})
dllogger.metadata('train_loss', {"name": 'train_loss', 'unit': None, 'format': ":.5f"})
dllogger.log(data=vars(args), step='PARAMETER')
if args.seed is not None:
torch.manual_seed(args.seed)
if not os.path.exists(args.checkpoint_dir) and args.checkpoint_dir:
print("Saving results to {}".format(args.checkpoint_dir))
os.makedirs(args.checkpoint_dir, exist_ok=True)
# sync workers before timing
if args.distributed:
torch.distributed.broadcast(torch.tensor([1], device="cuda"), 0)
torch.cuda.synchronize()
main_start_time = synchronized_timestamp()
feature_spec_path = os.path.join(args.data, args.feature_spec_file)
feature_spec = FeatureSpec.from_yaml(feature_spec_path)
trainset = dataloading.TorchTensorDataset(feature_spec, mapping_name='train', args=args)
testset = dataloading.TorchTensorDataset(feature_spec, mapping_name='test', args=args)
train_loader = dataloading.TrainDataloader(trainset, args)
test_loader = dataloading.TestDataLoader(testset, args)
# make pytorch memory behavior more consistent later
torch.cuda.empty_cache()
# Create model
user_feature_name = feature_spec.channel_spec[USER_CHANNEL_NAME][0]
item_feature_name = feature_spec.channel_spec[ITEM_CHANNEL_NAME][0]
label_feature_name = feature_spec.channel_spec[LABEL_CHANNEL_NAME][0]
model = NeuMF(nb_users=feature_spec.feature_spec[user_feature_name]['cardinality'],
nb_items=feature_spec.feature_spec[item_feature_name]['cardinality'],
mf_dim=args.factors,
mlp_layer_sizes=args.layers,
dropout=args.dropout)
optimizer = FusedAdam(model.parameters(), lr=args.learning_rate,
betas=(args.beta1, args.beta2), eps=args.eps)
criterion = nn.BCEWithLogitsLoss(reduction='none') # use torch.mean() with dim later to avoid copy to host
# Move model and loss to GPU
model = model.cuda()
criterion = criterion.cuda()
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model)
local_batch = args.batch_size // args.world_size
traced_criterion = torch.jit.trace(criterion.forward,
(torch.rand(local_batch, 1), torch.rand(local_batch, 1)))
print(model)
print("{} parameters".format(utils.count_parameters(model)))
if args.load_checkpoint_path:
state_dict = torch.load(args.load_checkpoint_path)
state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}
model.load_state_dict(state_dict)
if args.mode == 'test':
start = synchronized_timestamp()
hr, ndcg, val_loss = val_epoch(model, test_loader, args.topk,
distributed=args.distributed, world_size=args.world_size)
val_time = synchronized_timestamp() - start
eval_size = test_loader.raw_dataset_length
eval_throughput = eval_size / val_time
dllogger.log(step=tuple(), data={'best_eval_throughput': eval_throughput,
'hr@10': hr,
'validation_loss': float(val_loss.item())})
return
# this should always be overridden if hr>0.
# It is theoretically possible for the hit rate to be zero in the first epoch, which would result in referring
# to an uninitialized variable.
max_hr = 0
best_epoch = 0
best_model_timestamp = synchronized_timestamp()
train_throughputs, eval_throughputs = [], []
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
for epoch in range(args.epochs):
begin = synchronized_timestamp()
batch_dict_list = train_loader.get_epoch_data()
num_batches = len(batch_dict_list)
for i in range(num_batches // args.grads_accumulated):
for j in range(args.grads_accumulated):
batch_idx = (args.grads_accumulated * i) + j
batch_dict = batch_dict_list[batch_idx]
user_features = batch_dict[USER_CHANNEL_NAME]
item_features = batch_dict[ITEM_CHANNEL_NAME]
user_batch = user_features[user_feature_name]
item_batch = item_features[item_feature_name]
label_features = batch_dict[LABEL_CHANNEL_NAME]
label_batch = label_features[label_feature_name]
with torch.cuda.amp.autocast(enabled=args.amp):
outputs = model(user_batch, item_batch)
loss = traced_criterion(outputs, label_batch.view(-1, 1))
loss = torch.mean(loss.float().view(-1), 0)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
for p in model.parameters():
p.grad = None
del batch_dict_list
train_time = synchronized_timestamp() - begin
begin = synchronized_timestamp()
epoch_samples = train_loader.length_after_augmentation
train_throughput = epoch_samples / train_time
train_throughputs.append(train_throughput)
hr, ndcg, val_loss = val_epoch(model, test_loader, args.topk,
distributed=args.distributed, world_size=args.world_size)
val_time = synchronized_timestamp() - begin
eval_size = test_loader.raw_dataset_length
eval_throughput = eval_size / val_time
eval_throughputs.append(eval_throughput)
if args.distributed:
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM)
loss = loss / args.world_size
dllogger.log(step=(epoch,),
data={'train_throughput': train_throughput,
'hr@10': hr,
'train_epoch_time': train_time,
'validation_epoch_time': val_time,
'eval_throughput': eval_throughput,
'validation_loss': float(val_loss.item()),
'train_loss': float(loss.item())})
if hr > max_hr and args.local_rank == 0:
max_hr = hr
best_epoch = epoch
print("New best hr!")
if args.checkpoint_dir:
save_checkpoint_path = os.path.join(args.checkpoint_dir, 'model.pth')
print("Saving the model to: ", save_checkpoint_path)
torch.save(model.state_dict(), save_checkpoint_path)
best_model_timestamp = synchronized_timestamp()
if args.threshold is not None:
if hr >= args.threshold:
print("Hit threshold of {}".format(args.threshold))
break
if args.local_rank == 0:
dllogger.log(data={'best_train_throughput': max(train_throughputs),
'best_eval_throughput': max(eval_throughputs),
'mean_train_throughput': np.mean(train_throughputs),
'mean_eval_throughput': np.mean(eval_throughputs),
'best_accuracy': max_hr,
'best_epoch': best_epoch,
'time_to_target': synchronized_timestamp() - main_start_time,
'time_to_best_model': best_model_timestamp - main_start_time,
'validation_loss': float(val_loss.item()),
'train_loss': float(loss.item())},
step=tuple())
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/ncf.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from feature_spec import FeatureSpec
from neumf_constants import TEST_SAMPLES_PER_SERIES
from dataloading import TorchTensorDataset
import torch
import os
import sys
def test_matches_template(path, template_path):
loaded_featurespec_string = FeatureSpec.from_yaml(path).to_string()
loaded_template_string = FeatureSpec.from_yaml(template_path).to_string()
assert loaded_template_string == loaded_featurespec_string
def mock_args():
class Obj:
pass
args = Obj()
args.__dict__['local_rank'] = 0
return args
def test_dtypes(path):
loaded_featurespec = FeatureSpec.from_yaml(path)
features = loaded_featurespec.feature_spec
declared_dtypes = {name: data['dtype'] for name, data in features.items()}
source_spec = loaded_featurespec.source_spec
for mapping in source_spec.values():
for chunk in mapping:
chunk_dtype = None
for present_feature in chunk['features']:
assert present_feature in declared_dtypes, "unknown feature in mapping"
# Check declared type
feature_dtype = declared_dtypes[present_feature]
if chunk_dtype is None:
chunk_dtype = feature_dtype
else:
assert chunk_dtype == feature_dtype
path_to_load = os.path.join(loaded_featurespec.base_directory, chunk['files'][0])
loaded_data = torch.load(path_to_load)
assert str(loaded_data.dtype) == chunk_dtype
def test_cardinalities(path):
loaded_featurespec = FeatureSpec.from_yaml(path)
features = loaded_featurespec.feature_spec
declared_cardinalities = {name: data['cardinality'] for name, data in features.items() if 'cardinality' in data}
source_spec = loaded_featurespec.source_spec
for mapping_name, mapping in source_spec.items():
dataset = TorchTensorDataset(loaded_featurespec, mapping_name, mock_args())
for feature_name, cardinality in declared_cardinalities.items():
feature_data = dataset.features[feature_name]
biggest_num = feature_data.max().item()
assert biggest_num < cardinality
def test_samples_in_test_series(path):
loaded_featurespec = FeatureSpec.from_yaml(path)
series_length = loaded_featurespec.metadata[TEST_SAMPLES_PER_SERIES]
dataset = TorchTensorDataset(loaded_featurespec, 'test', mock_args())
for feature in dataset.features.values():
assert len(feature) % series_length == 0
if __name__ == '__main__':
tested_spec = sys.argv[1]
template = sys.argv[2]
test_cardinalities(tested_spec)
test_dtypes(tested_spec)
test_samples_in_test_series(tested_spec)
test_matches_template(tested_spec, template)
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/test_featurespec_correctness.py |
# Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import pandas as pd
from load import implicit_load
from feature_spec import FeatureSpec
from neumf_constants import USER_CHANNEL_NAME, ITEM_CHANNEL_NAME, LABEL_CHANNEL_NAME, TEST_SAMPLES_PER_SERIES
import torch
import os
import tqdm
TEST_1 = 'test_data_1.pt'
TEST_0 = 'test_data_0.pt'
TRAIN_1 = 'train_data_1.pt'
TRAIN_0 = 'train_data_0.pt'
USER_COLUMN = 'user_id'
ITEM_COLUMN = 'item_id'
def parse_args():
parser = ArgumentParser()
parser.add_argument('--path', type=str, default='/data/ml-20m/ratings.csv',
help='Path to reviews CSV file from MovieLens')
parser.add_argument('--output', type=str, default='/data',
help='Output directory for train and test files')
parser.add_argument('--valid_negative', type=int, default=100,
help='Number of negative samples for each positive test example')
parser.add_argument('--seed', '-s', type=int, default=1,
help='Manually set random seed for torch')
return parser.parse_args()
class _TestNegSampler:
def __init__(self, train_ratings, nb_neg):
self.nb_neg = nb_neg
self.nb_users = int(train_ratings[:, 0].max()) + 1
self.nb_items = int(train_ratings[:, 1].max()) + 1
# compute unique ids for quickly created hash set and fast lookup
ids = (train_ratings[:, 0] * self.nb_items) + train_ratings[:, 1]
self.set = set(ids)
def generate(self, batch_size=128 * 1024):
users = torch.arange(0, self.nb_users).reshape([1, -1]).repeat([self.nb_neg, 1]).transpose(0, 1).reshape(-1)
items = [-1] * len(users)
random_items = torch.LongTensor(batch_size).random_(0, self.nb_items).tolist()
print('Generating validation negatives...')
for idx, u in enumerate(tqdm.tqdm(users.tolist())):
if not random_items:
random_items = torch.LongTensor(batch_size).random_(0, self.nb_items).tolist()
j = random_items.pop()
while u * self.nb_items + j in self.set:
if not random_items:
random_items = torch.LongTensor(batch_size).random_(0, self.nb_items).tolist()
j = random_items.pop()
items[idx] = j
items = torch.LongTensor(items)
return items
def save_feature_spec(user_cardinality, item_cardinality, dtypes, test_negative_samples, output_path,
user_feature_name='user',
item_feature_name='item',
label_feature_name='label'):
feature_spec = {
user_feature_name: {
'dtype': dtypes[user_feature_name],
'cardinality': int(user_cardinality)
},
item_feature_name: {
'dtype': dtypes[item_feature_name],
'cardinality': int(item_cardinality)
},
label_feature_name: {
'dtype': dtypes[label_feature_name],
}
}
metadata = {
TEST_SAMPLES_PER_SERIES: test_negative_samples + 1
}
train_mapping = [
{
'type': 'torch_tensor',
'features': [
user_feature_name,
item_feature_name
],
'files': [TRAIN_0]
},
{
'type': 'torch_tensor',
'features': [
label_feature_name
],
'files': [TRAIN_1]
}
]
test_mapping = [
{
'type': 'torch_tensor',
'features': [
user_feature_name,
item_feature_name
],
'files': [TEST_0],
},
{
'type': 'torch_tensor',
'features': [
label_feature_name
],
'files': [TEST_1],
}
]
channel_spec = {
USER_CHANNEL_NAME: [user_feature_name],
ITEM_CHANNEL_NAME: [item_feature_name],
LABEL_CHANNEL_NAME: [label_feature_name]
}
source_spec = {'train': train_mapping, 'test': test_mapping}
feature_spec = FeatureSpec(feature_spec=feature_spec, metadata=metadata, source_spec=source_spec,
channel_spec=channel_spec, base_directory="")
feature_spec.to_yaml(output_path=output_path)
def main():
args = parse_args()
if args.seed is not None:
torch.manual_seed(args.seed)
print("Loading raw data from {}".format(args.path))
df = implicit_load(args.path, sort=False)
print("Mapping original user and item IDs to new sequential IDs")
df[USER_COLUMN] = pd.factorize(df[USER_COLUMN])[0]
df[ITEM_COLUMN] = pd.factorize(df[ITEM_COLUMN])[0]
user_cardinality = df[USER_COLUMN].max() + 1
item_cardinality = df[ITEM_COLUMN].max() + 1
# Need to sort before popping to get last item
df.sort_values(by='timestamp', inplace=True)
# clean up data
del df['rating'], df['timestamp']
df = df.drop_duplicates() # assuming it keeps order
# Test set is the last interaction for a given user
grouped_sorted = df.groupby(USER_COLUMN, group_keys=False)
test_data = grouped_sorted.tail(1).sort_values(by=USER_COLUMN)
# Train set is all interactions but the last one
train_data = grouped_sorted.apply(lambda x: x.iloc[:-1])
sampler = _TestNegSampler(train_data.values, args.valid_negative)
test_negs = sampler.generate().cuda()
test_negs = test_negs.reshape(-1, args.valid_negative)
# Reshape train set into user,item,label tabular and save
train_ratings = torch.from_numpy(train_data.values).cuda()
train_labels = torch.ones_like(train_ratings[:, 0:1], dtype=torch.float32)
torch.save(train_ratings, os.path.join(args.output, TRAIN_0))
torch.save(train_labels, os.path.join(args.output, TRAIN_1))
# Reshape test set into user,item,label tabular and save
# All users have the same number of items, items for a given user appear consecutively
test_ratings = torch.from_numpy(test_data.values).cuda()
test_users_pos = test_ratings[:, 0:1] # slicing instead of indexing to keep dimensions
test_items_pos = test_ratings[:, 1:2]
test_users = test_users_pos.repeat_interleave(args.valid_negative + 1, dim=0)
test_items = torch.cat((test_items_pos.reshape(-1, 1), test_negs), dim=1).reshape(-1, 1)
positive_labels = torch.ones_like(test_users_pos, dtype=torch.float32)
negative_labels = torch.zeros_like(test_users_pos, dtype=torch.float32).repeat(1, args.valid_negative)
test_labels = torch.cat((positive_labels, negative_labels), dim=1).reshape(-1, 1)
dtypes = {'user': str(test_users.dtype), 'item': str(test_items.dtype), 'label': str(test_labels.dtype)}
test_tensor = torch.cat((test_users, test_items), dim=1)
torch.save(test_tensor, os.path.join(args.output, TEST_0))
torch.save(test_labels, os.path.join(args.output, TEST_1))
save_feature_spec(user_cardinality=user_cardinality, item_cardinality=item_cardinality, dtypes=dtypes,
test_negative_samples=args.valid_negative, output_path=args.output + '/feature_spec.yaml')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/convert.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import os
from typing import List, Dict
class FeatureSpec:
def __init__(self, feature_spec, source_spec, channel_spec, metadata, base_directory):
self.feature_spec: Dict = feature_spec
self.source_spec: Dict = source_spec
self.channel_spec: Dict = channel_spec
self.metadata: Dict = metadata
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self) -> Dict:
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/feature_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import os
import torch
import pandas as pd
from feature_spec import FeatureSpec
from neumf_constants import USER_CHANNEL_NAME, ITEM_CHANNEL_NAME, LABEL_CHANNEL_NAME
def parse_args():
parser = ArgumentParser()
parser.add_argument('--path', type=str, default='',
help='Path to input data directory')
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file, or path relative to data directory.')
parser.add_argument('--output', type=str, default='/data',
help='Path to output data directory')
parser.add_argument('--feature_spec_out', type=str, default='feature_spec.yaml',
help='Name of the output feature specification file, or path relative to data directory.')
return parser.parse_args()
def main():
args = parse_args()
args_output = args.output
args_path = args.path
args_feature_spec_in = args.feature_spec_in
args_feature_spec_out = args.feature_spec_out
feature_spec_path = os.path.join(args_path, args_feature_spec_in)
feature_spec = FeatureSpec.from_yaml(feature_spec_path)
# Only three features are transcoded - this is NCF specific
user_feature_name = feature_spec.channel_spec[USER_CHANNEL_NAME][0]
item_feature_name = feature_spec.channel_spec[ITEM_CHANNEL_NAME][0]
label_feature_name = feature_spec.channel_spec[LABEL_CHANNEL_NAME][0]
categorical_features = [user_feature_name, item_feature_name]
found_cardinalities = {f: 0 for f in categorical_features}
new_source_spec = {}
for mapping_name, mapping in feature_spec.source_spec.items():
# Load all chunks and link into one df
chunk_dfs = []
for chunk in mapping:
assert chunk['type'] == 'csv', "Only csv files supported in this transcoder"
file_dfs = []
for file in chunk['files']:
path_to_load = os.path.join(feature_spec.base_directory, file)
file_dfs.append(pd.read_csv(path_to_load, header=None))
chunk_df = pd.concat(file_dfs, ignore_index=True)
chunk_df.columns = chunk['features']
chunk_df.reset_index(drop=True, inplace=True)
chunk_dfs.append(chunk_df)
mapping_df = pd.concat(chunk_dfs, axis=1) # This takes care of making sure feature names are unique
for feature in categorical_features:
mapping_cardinality = mapping_df[feature].max() + 1
previous_cardinality = found_cardinalities[feature]
found_cardinalities[feature] = max(previous_cardinality, mapping_cardinality)
# We group together users and items, while separating labels. This is because of the target dtypes: ids are int,
# while labels are float to compute loss.
ints_tensor = torch.from_numpy(mapping_df[[user_feature_name, item_feature_name]].values).long()
ints_file = f"{mapping_name}_data_0.pt"
ints_chunk = {"type": "torch_tensor",
"features": [user_feature_name, item_feature_name],
"files": [ints_file]}
torch.save(ints_tensor, os.path.join(args_output, ints_file))
floats_tensor = torch.from_numpy(mapping_df[[label_feature_name]].values).float()
floats_file = f"{mapping_name}_data_1.pt"
floats_chunk = {"type": "torch_tensor",
"features": [label_feature_name],
"files": [floats_file]}
torch.save(floats_tensor, os.path.join(args_output, floats_file))
new_source_spec[mapping_name] = [ints_chunk, floats_chunk]
for feature in categorical_features:
found_cardinality = found_cardinalities[feature]
declared_cardinality = feature_spec.feature_spec[feature].get('cardinality', 'auto')
if declared_cardinality != "auto":
declared = int(declared_cardinality)
assert declared >= found_cardinality, "Specified cardinality conflicts data"
found_cardinalities[feature] = declared
new_inner_feature_spec = {
user_feature_name: {
"dtype": "torch.int64",
"cardinality": int(found_cardinalities[user_feature_name])
},
item_feature_name: {
"dtype": "torch.int64",
"cardinality": int(found_cardinalities[item_feature_name])
},
label_feature_name: {
"dtype": "torch.float32"
}
}
new_feature_spec = FeatureSpec(feature_spec=new_inner_feature_spec,
source_spec=new_source_spec,
channel_spec=feature_spec.channel_spec,
metadata=feature_spec.metadata,
base_directory="")
feature_spec_save_path = os.path.join(args_output, args_feature_spec_out)
new_feature_spec.to_yaml(output_path=feature_spec_save_path)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/transcode.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import reduce
def count_parameters(model):
c = map(lambda p: reduce(lambda x, y: x * y, p.size()), model.parameters())
return sum(c)
def save_result(result, path):
write_heading = not os.path.exists(path)
with open(path, mode='a') as out:
if write_heading:
out.write(",".join([str(k) for k, v in result.items()]) + '\n')
out.write(",".join([str(v) for k, v in result.items()]) + '\n')
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
USER_CHANNEL_NAME = 'user_ch'
ITEM_CHANNEL_NAME = 'item_ch'
LABEL_CHANNEL_NAME = 'label_ch'
TEST_SAMPLES_PER_SERIES = 'test_samples_per_series'
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/neumf_constants.py |
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.jit
import time
from argparse import ArgumentParser
import numpy as np
import torch
from neumf import NeuMF
import dllogger
def parse_args():
parser = ArgumentParser(description="Benchmark inference performance of the NCF model")
parser.add_argument('--load_checkpoint_path', default=None, type=str,
help='Path to the checkpoint file to be loaded before training/evaluation')
parser.add_argument('--n_users', default=138493, type=int,
help='Number of users. Defaults to the number of users in the ml-20m dataset after preprocessing')
parser.add_argument('--n_items', default=26744, type=int,
help='Number of items. Defaults to the number of users in the ml-20m dataset after preprocessing')
parser.add_argument('-f', '--factors', type=int, default=64,
help='Number of predictive factors')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout probability, if equal to 0 will not use dropout at all')
parser.add_argument('--layers', nargs='+', type=int,
default=[256, 256, 128, 64],
help='Sizes of hidden layers for MLP')
parser.add_argument('--batch_sizes', default='1,4,16,64,256,1024,4096,16384,65536,262144,1048576', type=str,
help='A list of comma-separated batch size values to benchmark')
parser.add_argument('--num_batches', default=200, type=int,
help='Number of batches for which to measure latency and throughput')
parser.add_argument('--fp16', action='store_true', help='Cast the model to FP16 precision', default=False)
parser.add_argument('--log_path', default='log.json', type=str,
help='Path for the JSON training log')
return parser.parse_args()
def main():
args = parse_args()
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=args.log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
dllogger.log(data=vars(args), step='PARAMETER')
model = NeuMF(nb_users=args.n_users, nb_items=args.n_items, mf_dim=args.factors,
mlp_layer_sizes=args.layers, dropout=args.dropout)
model = model.cuda()
if args.load_checkpoint_path:
state_dict = torch.load(args.load_checkpoint_path)
model.load_state_dict(state_dict)
if args.fp16:
model.half()
model.eval()
batch_sizes = args.batch_sizes.split(',')
batch_sizes = [int(s) for s in batch_sizes]
result_data = {}
for batch_size in batch_sizes:
print('benchmarking batch size: ', batch_size)
users = torch.cuda.LongTensor(batch_size).random_(0, args.n_users)
items = torch.cuda.LongTensor(batch_size).random_(0, args.n_items)
latencies = []
for i in range(args.num_batches):
torch.cuda.synchronize()
start = time.time()
_ = model(users, items, sigmoid=True)
torch.cuda.synchronize()
end_time = time.time()
if i < 10: # warmup iterations
continue
latencies.append(end_time - start)
result_data[f'batch_{batch_size}_mean_throughput'] = batch_size / np.mean(latencies)
result_data[f'batch_{batch_size}_mean_latency'] = np.mean(latencies)
result_data[f'batch_{batch_size}_p90_latency'] = np.percentile(latencies, 90)
result_data[f'batch_{batch_size}_p95_latency'] = np.percentile(latencies, 95)
result_data[f'batch_{batch_size}_p99_latency'] = np.percentile(latencies, 99)
for batch_size in batch_sizes:
dllogger.metadata(f'batch_{batch_size}_mean_throughput', {'unit': 'samples/s'})
for p in ['mean', 'p90', 'p95', 'p99']:
dllogger.metadata(f'batch_{batch_size}_{p}_latency', {'unit': 's'})
dllogger.log(data=result_data, step=tuple())
dllogger.flush()
return
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/inference.py |
# Copyright (c) 2018, deepakn94. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import pandas as pd
RatingData = namedtuple('RatingData',
['items', 'users', 'ratings', 'min_date', 'max_date'])
def describe_ratings(ratings):
info = RatingData(items=len(ratings['item_id'].unique()),
users=len(ratings['user_id'].unique()),
ratings=len(ratings),
min_date=ratings['timestamp'].min(),
max_date=ratings['timestamp'].max())
print("{ratings} ratings on {items} items from {users} users"
" from {min_date} to {max_date}"
.format(**(info._asdict())))
return info
def process_movielens(ratings, sort=True):
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
if sort:
ratings.sort_values(by='timestamp', inplace=True)
describe_ratings(ratings)
return ratings
def load_ml_100k(filename, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = pd.read_csv(filename, sep='\t', names=names)
return process_movielens(ratings, sort=sort)
def load_ml_1m(filename, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = pd.read_csv(filename, sep='::', names=names, engine='python')
return process_movielens(ratings, sort=sort)
def load_ml_10m(filename, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = pd.read_csv(filename, sep='::', names=names, engine='python')
return process_movielens(ratings, sort=sort)
def load_ml_20m(filename, sort=True):
ratings = pd.read_csv(filename)
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
names = {'userId': 'user_id', 'movieId': 'item_id'}
ratings.rename(columns=names, inplace=True)
return process_movielens(ratings, sort=sort)
def load_unknown(filename, sort=True):
names = ['user_id', 'item_id', 'timestamp']
ratings = pd.read_csv(filename, names=names, header=0, engine='python')
ratings['rating'] = 5
return process_movielens(ratings, sort=sort)
DATASETS = [k.replace('load_', '') for k in locals().keys() if "load_" in k]
def get_dataset_name(filename):
for dataset in DATASETS:
if dataset in filename.replace('-', '_').lower():
return dataset
print("Unknown dataset. Expecting `user_id`, `item_id` , and `timestamp`")
return "unknown"
def implicit_load(filename, sort=True):
func = globals()["load_" + get_dataset_name(filename)]
return func(filename, sort=sort)
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/load.py |
# Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import pandas as pd
import numpy as np
from load import implicit_load
from convert import save_feature_spec, _TestNegSampler, TEST_0, TEST_1, TRAIN_0, TRAIN_1
import torch
import os
USER_COLUMN = 'user_id'
ITEM_COLUMN = 'item_id'
def parse_args():
parser = ArgumentParser()
parser.add_argument('--path', type=str, default='/data/ml-20m/ratings.csv',
help='Path to reviews CSV file from MovieLens')
parser.add_argument('--output', type=str, default='/data',
help='Output directory for train and test files')
parser.add_argument('--valid_negative', type=int, default=100,
help='Number of negative samples for each positive test example')
parser.add_argument('--seed', '-s', type=int, default=1,
help='Manually set random seed for torch')
parser.add_argument('--test', type=str, help='select modification to be applied to the set')
return parser.parse_args()
def main():
args = parse_args()
if args.seed is not None:
torch.manual_seed(args.seed)
print("Loading raw data from {}".format(args.path))
df = implicit_load(args.path, sort=False)
if args.test == 'less_user':
to_drop = set(list(df[USER_COLUMN].unique())[-100:])
df = df[~df[USER_COLUMN].isin(to_drop)]
if args.test == 'less_item':
to_drop = set(list(df[ITEM_COLUMN].unique())[-100:])
df = df[~df[ITEM_COLUMN].isin(to_drop)]
if args.test == 'more_user':
sample = df.sample(frac=0.2).copy()
sample[USER_COLUMN] = sample[USER_COLUMN] + 10000000
df = df.append(sample)
users = df[USER_COLUMN]
df = df[users.isin(users[users.duplicated(keep=False)])] # make sure something remains in the train set
if args.test == 'more_item':
sample = df.sample(frac=0.2).copy()
sample[ITEM_COLUMN] = sample[ITEM_COLUMN] + 10000000
df = df.append(sample)
print("Mapping original user and item IDs to new sequential IDs")
df[USER_COLUMN] = pd.factorize(df[USER_COLUMN])[0]
df[ITEM_COLUMN] = pd.factorize(df[ITEM_COLUMN])[0]
user_cardinality = df[USER_COLUMN].max() + 1
item_cardinality = df[ITEM_COLUMN].max() + 1
# Need to sort before popping to get last item
df.sort_values(by='timestamp', inplace=True)
# clean up data
del df['rating'], df['timestamp']
df = df.drop_duplicates() # assuming it keeps order
# Test set is the last interaction for a given user
grouped_sorted = df.groupby(USER_COLUMN, group_keys=False)
test_data = grouped_sorted.tail(1).sort_values(by=USER_COLUMN)
# Train set is all interactions but the last one
train_data = grouped_sorted.apply(lambda x: x.iloc[:-1])
sampler = _TestNegSampler(train_data.values, args.valid_negative)
test_negs = sampler.generate().cuda()
if args.valid_negative > 0:
test_negs = test_negs.reshape(-1, args.valid_negative)
else:
test_negs = test_negs.reshape(test_data.shape[0], 0)
if args.test == 'more_pos':
mask = np.random.rand(len(test_data)) < 0.5
sample = test_data[mask].copy()
sample[ITEM_COLUMN] = sample[ITEM_COLUMN] + 5
test_data = test_data.append(sample)
test_negs_copy = test_negs[mask]
test_negs = torch.cat((test_negs, test_negs_copy), dim=0)
if args.test == 'less_pos':
mask = np.random.rand(len(test_data)) < 0.5
test_data = test_data[mask]
test_negs = test_negs[mask]
# Reshape train set into user,item,label tabular and save
train_ratings = torch.from_numpy(train_data.values).cuda()
train_labels = torch.ones_like(train_ratings[:, 0:1], dtype=torch.float32)
torch.save(train_ratings, os.path.join(args.output, TRAIN_0))
torch.save(train_labels, os.path.join(args.output, TRAIN_1))
# Reshape test set into user,item,label tabular and save
# All users have the same number of items, items for a given user appear consecutively
test_ratings = torch.from_numpy(test_data.values).cuda()
test_users_pos = test_ratings[:, 0:1] # slicing instead of indexing to keep dimensions
test_items_pos = test_ratings[:, 1:2]
test_users = test_users_pos.repeat_interleave(args.valid_negative + 1, dim=0)
test_items = torch.cat((test_items_pos.reshape(-1, 1), test_negs), dim=1).reshape(-1, 1)
positive_labels = torch.ones_like(test_users_pos, dtype=torch.float32)
negative_labels = torch.zeros_like(test_users_pos, dtype=torch.float32).repeat(1, args.valid_negative)
test_labels = torch.cat((positive_labels, negative_labels), dim=1).reshape(-1, 1)
dtypes = {'user': str(test_users.dtype), 'item': str(test_items.dtype), 'label': str(test_labels.dtype)}
test_tensor = torch.cat((test_users, test_items), dim=1)
torch.save(test_tensor, os.path.join(args.output, TEST_0))
torch.save(test_labels, os.path.join(args.output, TEST_1))
if args.test == 'other_names':
dtypes = {'user_2': str(test_users.dtype),
'item_2': str(test_items.dtype),
'label_2': str(test_labels.dtype)}
save_feature_spec(user_cardinality=user_cardinality, item_cardinality=item_cardinality, dtypes=dtypes,
test_negative_samples=args.valid_negative, output_path=args.output + '/feature_spec.yaml',
user_feature_name='user_2',
item_feature_name='item_2',
label_feature_name='label_2')
else:
save_feature_spec(user_cardinality=user_cardinality, item_cardinality=item_cardinality, dtypes=dtypes,
test_negative_samples=args.valid_negative, output_path=args.output + '/feature_spec.yaml')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/convert_test.py |
# Copyright (c) 2018, deepakn94, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
import sys
from os.path import abspath, join, dirname
class NeuMF(nn.Module):
def __init__(self, nb_users, nb_items,
mf_dim, mlp_layer_sizes, dropout=0):
if mlp_layer_sizes[0] % 2 != 0:
raise RuntimeError('u dummy, mlp_layer_sizes[0] % 2 != 0')
super(NeuMF, self).__init__()
nb_mlp_layers = len(mlp_layer_sizes)
self.mf_user_embed = nn.Embedding(nb_users, mf_dim)
self.mf_item_embed = nn.Embedding(nb_items, mf_dim)
self.mlp_user_embed = nn.Embedding(nb_users, mlp_layer_sizes[0] // 2)
self.mlp_item_embed = nn.Embedding(nb_items, mlp_layer_sizes[0] // 2)
self.dropout = dropout
self.mlp = nn.ModuleList()
for i in range(1, nb_mlp_layers):
self.mlp.extend([nn.Linear(mlp_layer_sizes[i - 1], mlp_layer_sizes[i])]) # noqa: E501
self.final = nn.Linear(mlp_layer_sizes[-1] + mf_dim, 1)
self.mf_user_embed.weight.data.normal_(0., 0.01)
self.mf_item_embed.weight.data.normal_(0., 0.01)
self.mlp_user_embed.weight.data.normal_(0., 0.01)
self.mlp_item_embed.weight.data.normal_(0., 0.01)
def glorot_uniform(layer):
fan_in, fan_out = layer.in_features, layer.out_features
limit = np.sqrt(6. / (fan_in + fan_out))
layer.weight.data.uniform_(-limit, limit)
def lecunn_uniform(layer):
fan_in, fan_out = layer.in_features, layer.out_features # noqa: F841, E501
limit = np.sqrt(3. / fan_in)
layer.weight.data.uniform_(-limit, limit)
for layer in self.mlp:
if type(layer) != nn.Linear:
continue
glorot_uniform(layer)
lecunn_uniform(self.final)
def forward(self, user, item, sigmoid=False):
xmfu = self.mf_user_embed(user)
xmfi = self.mf_item_embed(item)
xmf = xmfu * xmfi
xmlpu = self.mlp_user_embed(user)
xmlpi = self.mlp_item_embed(item)
xmlp = torch.cat((xmlpu, xmlpi), dim=1)
for i, layer in enumerate(self.mlp):
xmlp = layer(xmlp)
xmlp = nn.functional.relu(xmlp)
if self.dropout != 0:
xmlp = nn.functional.dropout(xmlp, p=self.dropout, training=self.training)
x = torch.cat((xmf, xmlp), dim=1)
x = self.final(x)
if sigmoid:
x = torch.sigmoid(x)
return x
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/neumf.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import matplotlib.pyplot as plt
def get_curve(filename):
hrs = []
with open(filename, 'r') as opened:
for line in opened.readlines():
d = json.loads(line[len("DLLL "):])
try:
hrs.append(d["data"]["hr@10"])
except KeyError:
pass
return hrs
a100 = "runs/pytorch_ncf_A100-SXM4-40GBx{numgpus}gpus_{precision}_{num_run}.json"
v16 = "runs/pytorch_ncf_Tesla V100-SXM2-16GBx{numgpus}gpus_{precision}_{num_run}.json"
v32 = "runs/pytorch_ncf_Tesla V100-SXM2-32GBx{numgpus}gpus_{precision}_{num_run}.json"
dgx2 = "runs/pytorch_ncf_Tesla V100-SXM3-32GBx{numgpus}gpus_{precision}_{num_run}.json"
fp32 = "FP32"
amp = "Mixed (AMP)"
tf32 = "TF32"
def get_accs(arch, numgpu, prec):
data = [get_curve(arch.format(numgpus=numgpu, num_run=num_run, precision=prec)) for num_run in range(1, 21)]
return data[0]
def get_plots():
archs = [dgx2, a100]
titles = ["DGX2 32GB", "DGX A100 40GB"]
fullprecs = [fp32, tf32]
halfprecs = [amp, amp]
gpuranges = [(1, 8, 16), (1, 8)]
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(10, 5))
plt.subplots_adjust(hspace=0.5)
for x, prec in enumerate([fullprecs, halfprecs]):
for i, arch in enumerate(archs):
for numgpu in gpuranges[i]:
d = get_accs(arch, numgpu, prec[i])
axs[x].plot(range(len(d)), d, label=f"{titles[i]} x {numgpu} {prec[i]}")
axs[x].legend()
#plt.show()
plt.savefig("val_curves.png")
if __name__ == "__main__":
get_plots()
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/qa/generate_validation_curves.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tabulate
archs = ["a100", "v100"]
precs = ["full", "half"]
for arch in archs:
for prec in precs:
filename = f"inference/{arch}_{prec}.log"
with open(filename) as opened:
line = opened.readlines()[-1]
log = json.loads(line[len("DLLL "):])['data']
print(log)
batch_sizes = [1024, 4096, 16384, 65536, 262144, 1048576]
t_avg = "batch_{}_mean_throughput"
l_mean = "batch_{}_mean_latency"
l_90 = "batch_{}_p90_latency"
l_95 = "batch_{}_p95_latency"
l_99 = "batch_{}_p99_latency"
headers = ["Batch size", "Throughput Avg", "Latency Avg", "Latency 90%", "Latency 95%", "Latency 99%"]
table = []
for bsize in batch_sizes:
table.append([bsize,
"{:3.3f}".format(log[t_avg.format(bsize)]),
"{:.6f}".format(log[l_mean.format(bsize)]),
"{:.6f}".format(log[l_90.format(bsize)]),
"{:.6f}".format(log[l_95.format(bsize)]),
"{:.6f}".format(log[l_99.format(bsize)])])
print(filename)
print(tabulate.tabulate(table, headers, tablefmt='pipe'))
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/qa/inference_table.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import matplotlib.pyplot as plt
def get_training_data(filename):
with open(filename, 'r') as opened:
line = opened.readlines()[-1]
json_content = line[len("DLLL "):]
data = json.loads(json_content)["data"]
with open(filename, 'r') as opened:
for line in opened.readlines():
d = json.loads(line[len("DLLL "):])
if d.get("step", "") == "PARAMETER":
data['batch_size'] = d["data"]["batch_size"]
return data
a100 = "runs/pytorch_ncf_A100-SXM4-40GBx{numgpus}gpus_{precision}_{num_run}.json"
v16 = "runs/pytorch_ncf_Tesla V100-SXM2-16GBx{numgpus}gpus_{precision}_{num_run}.json"
v32 = "runs/pytorch_ncf_Tesla V100-SXM2-32GBx{numgpus}gpus_{precision}_{num_run}.json"
dgx2 = "runs/pytorch_ncf_Tesla V100-SXM3-32GBx{numgpus}gpus_{precision}_{num_run}.json"
fp32 = "FP32"
amp = "Mixed (AMP)"
tf32 = "TF32"
def get_accs(arch, numgpu, prec):
data = [get_training_data(arch.format(numgpus=numgpu, num_run=num_run, precision=prec)) for num_run in range(1, 21)]
accs = [d["best_accuracy"] for d in data]
return accs
def get_plots():
archs = [dgx2, a100]
gpuranges = [(1, 8, 16), (1, 8)]
titles = ["DGX2 32GB", "DGX A100 40GB"]
fullprecs = [fp32, tf32]
fig, axs = plt.subplots(2, 3, sharey=True, figsize=(8, 8))
plt.subplots_adjust(hspace=0.5)
for x, arch in enumerate(archs):
gpurange = gpuranges[x]
for y, gpu in enumerate(gpurange):
f_data = get_accs(arch, gpu, fullprecs[x])
h_data = get_accs(arch, gpu, amp)
axs[x, y].boxplot([f_data, h_data])
axs[x, y].set_xticklabels([fullprecs[x], amp])
axs[x, y].set_title(f"{gpu} GPUs" if gpu > 1 else "1 GPU")
axs[x, 0].set_ylabel(titles[x])
fig.delaxes(axs[1, 2])
# plt.show()
plt.savefig("box_plots.png")
if __name__ == "__main__":
get_plots()
| DeepLearningExamples-master | PyTorch/Recommendation/NCF/qa/generate_boxplots.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tabulate
import numpy as np
def get_training_data(filename):
with open(filename, 'r') as opened:
line = opened.readlines()[-1]
json_content = line[len("DLLL "):]
data = json.loads(json_content)["data"]
with open(filename, 'r') as opened:
for line in opened.readlines():
d = json.loads(line[len("DLLL "):])
if d.get("step", "") == "PARAMETER":
data['batch_size'] = d["data"]["batch_size"]
return data
a100 = "runs/pytorch_ncf_A100-SXM4-40GBx{numgpus}gpus_{precision}_{num_run}.json"
v16 = "runs/pytorch_ncf_Tesla V100-SXM2-16GBx{numgpus}gpus_{precision}_{num_run}.json"
v32 = "runs/pytorch_ncf_Tesla V100-SXM2-32GBx{numgpus}gpus_{precision}_{num_run}.json"
dgx2 = "runs/pytorch_ncf_Tesla V100-SXM3-32GBx{numgpus}gpus_{precision}_{num_run}.json"
fp32 = "FP32"
amp = "Mixed (AMP)"
tf32 = "TF32"
first = a100.format(numgpus=1, precision=fp32, num_run=1)
timevar = 'time_to_target' #"time_to_best_model"
def get_acc_table(arch, numgpus, fullprec):
headers = ["GPUs", "Batch size / GPU", f"Accuracy - {fullprec}", "Accuracy - mixed precision", f"Time to train - {fullprec}", "Time to train - mixed precision", f"Time to train speedup ({fullprec} to mixed precision)"]
table = []
for numgpus in numgpus:
data_full = [get_training_data(arch.format(numgpus=numgpus, num_run=num_run, precision=fullprec)) for num_run in range(1, 21)]
data_mixed = [get_training_data(arch.format(numgpus=numgpus, num_run=num_run, precision=amp)) for num_run in range(1, 21)]
bsize = data_full[0]['batch_size']/numgpus
accs_full = np.mean([d["best_accuracy"] for d in data_full])
accs_mixed = np.mean([d["best_accuracy"] for d in data_mixed])
time_full = np.mean([d[timevar] for d in data_full])
time_mixed = np.mean([d[timevar] for d in data_mixed])
speedup = time_full / time_mixed
row = [numgpus, int(bsize),
"{:.6f}".format(accs_full),
"{:.6f}".format(accs_mixed),
"{:.6f}".format(time_full),
"{:.6f}".format(time_mixed),
"{:.2f}".format(speedup)]
table.append(row)
print(tabulate.tabulate(table, headers, tablefmt='pipe'))
def get_perf_table(arch, numgpus, fullprec):
headers = ["GPUs",
"Batch size / GPU",
f"Throughput - {fullprec} (samples/s)",
"Throughput - mixed precision (samples/s)",
f"Throughput speedup ({fullprec} to mixed precision)",
f"Strong scaling - {fullprec}",
"Strong scaling - mixed precision",
]
table = []
base_full = None
base_mixed = None
for numgpus in numgpus:
data_full = [get_training_data(arch.format(numgpus=numgpus, num_run=num_run, precision=fullprec)) for num_run in range(1, 21)]
data_mixed = [get_training_data(arch.format(numgpus=numgpus, num_run=num_run, precision=amp)) for num_run in range(1, 21)]
bsize = data_full[0]['batch_size']/numgpus
_full = np.mean([d["best_train_throughput"] for d in data_full])
_mixed = np.mean([d["best_train_throughput"] for d in data_mixed])
if numgpus == 1:
base_full = _full
base_mixed = _mixed
scaling_full = _full/ base_full
scaling_mixed = _mixed / base_mixed
time_mixed = np.mean([d[timevar] for d in data_mixed])
speedup = _full / _mixed
row = [numgpus, int(bsize),
"{:.2f}M".format(_full / 10**6),
"{:.2f}M".format(_mixed / 10**6),
"{:.2f}".format(speedup),
"{:.2f}".format(scaling_full),
"{:.2f}".format(scaling_mixed)]
table.append(row)
print(tabulate.tabulate(table, headers, tablefmt='pipe'))
#get_acc_table(a100, (1, 8), tf32)
#get_acc_table(v16, (1, 8), fp32)
#get_acc_table(v32, (1, 8), fp32)
#get_acc_table(dgx2, (1, 8, 16), fp32)
#get_perf_table(a100, (1, 8), tf32)
#get_perf_table(v16, (1, 8), fp32)
#get_perf_table(v32, (1, 8), fp32)
#get_perf_table(dgx2, (1, 8, 16), fp32) | DeepLearningExamples-master | PyTorch/Recommendation/NCF/qa/generate_tables.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import common.filter_warnings
import argparse
import copy
import io
import os
import sys
import random
from functools import partial
from itertools import cycle, islice
from pathlib import Path
import torch
import numpy as np
from contextlib import suppress as empty_context
from torch.nn.parallel import DistributedDataParallel
import wav2vec2.arg_parser
from common import tb_dllogger as logger
from common.dataset import adjust_max_tokens, get_batch_iterator
from common.fairseq.data import Dictionary
from common.fairseq.dist import ModuleProxyWrapper
from common.fairseq.utils import multiply_grads
from common.helpers import (Checkpointer, num_weights, to_gpu,
init_multi_tensor_ema, apply_multi_tensor_ema)
from common.optimizers import get_optimizer, lr_exp_policy, lr_poly_policy
from common.utils import print_once, set_torch_seed, setup_distributed
from wav2vec2.criterion import Wav2vecCriterion, CTCCriterion
from wav2vec2.logging import init_logger, W2v2Metrics, W2v2FineTuningMetrics
from wav2vec2.utils import build_model, load_dataset
@torch.no_grad()
def validate(epoch, step, valid_loader, model, ema_model, criterion,
val_metrics, val_ema_metrics, world_size, fp16, bf16):
val_losses = []
val_wer = []
for model, metrics, scope in [(model, val_metrics, 'val'),
(ema_model, val_ema_metrics, 'val_ema')]:
if model is None:
continue
model.eval()
criterion.eval()
metrics._start_accumulating(None, True, scope=scope)
output_keys = None
assert len(valid_loader) > 1, (
'Validation needs at least 2 iterations to handle empty batches.')
for batch in valid_loader:
is_empty_batch = len(batch) == 0
if not is_empty_batch:
to_gpu(batch, fp16=fp16, bf16=bf16)
loss, _, logging_output = criterion(model, batch)
if output_keys is None:
output_keys = logging_output.keys()
else:
assert output_keys is not None, (
f'Invalid iters num: {len(valid_loader)}')
logging_output = {k: 0 for k in output_keys}
logging_output['ignore'] = int(is_empty_batch)
metrics.log_scalars(logging_output)
metrics.all_reduce(world_size)
metrics.accumulate()
metrics.finish_val(scope=scope)
logger.log(() if epoch is None else (epoch,), metrics, scope=scope,
tb_iter=step)
val_losses.append(metrics.metrics[scope]['loss'])
if 'wer' in metrics.metrics[scope]:
val_wer.append(metrics.metrics[scope]['wer'])
model.train()
criterion.train()
return val_losses, val_wer
def main():
parser = argparse.ArgumentParser(
description='wav2vec 2.0 Deep Learning Example')
wav2vec2.arg_parser.populate(parser)
args = parser.parse_args()
assert not args.bf16 or args.fp32_pos_conv, (
"bfloat16 requires casting positional convolutions to float32")
if args.mode == 'finetune':
wav2vec2.utils.update_args_for_finetuning(args, args.w2v_path)
head = lambda list_: list_[0] # fairseq compat, scalars wrapped w/ lists
args.lr = head(args.lr)
args.update_freq = head(args.update_freq)
assert(torch.cuda.is_available())
torch.backends.cudnn.benchmark = args.cudnn_benchmark
world_size = setup_distributed(args.local_rank)
args.world_size = world_size # For FP16Optimizer
print_once(f"World size: {world_size}")
assert args.seed is not None, (
"Random seed is used to ensure same model weights across all devices. "
"To allow None, draw a seed and synchronize across devices")
set_torch_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
pre_training = (args.mode == 'pretrain')
checkpointer = Checkpointer(args, 'wav2vec2')
if not pre_training:
assert args.labels or checkpointer.last_state, \
"Supply output labels or resume from a checkpoint."
if checkpointer.last_state is not None:
f = io.StringIO(checkpointer.last_state["output_labels"])
else:
f = open(Path(args.data, f"dict.{args.labels}.txt"))
target_dictionary = Dictionary.load(f)
f.seek(0)
checkpointer.output_labels = f.read()
f.close()
Metrics = W2v2FineTuningMetrics
criterion = CTCCriterion(target_dictionary, post_process='letter')
else:
target_dictionary = None
Metrics = W2v2Metrics
criterion = Wav2vecCriterion(args)
kw = {'benchmark_epochs': args.benchmark_epochs_num, 'cuda': not args.cpu}
metrics = Metrics(**kw)
val_metrics = Metrics(scopes=['val'], **kw)
val_ema_metrics = Metrics(scopes=['val_ema'], **kw)
init_logger(args.output_dir, args.log_file, args.ema)
logger.log_parameters(vars(args), tb_subset='train')
assert args.update_freq >= 1
model, seq_gen, tokenizer = build_model(args, args.mode, target_dictionary)
model.cuda()
print_once(f'Model size: {num_weights(model) / 10 ** 6:.1f}M params\n')
print_once('Setting up datasets...')
train_dataset = load_dataset(args.train_subset, args, target_dictionary,
with_labels=not pre_training, training=True)
valid_dataset = load_dataset(args.valid_subset, args, target_dictionary,
with_labels=not pre_training, training=False)
# Future-proof for adoption of native AMP
scaler = torch.cuda.amp.GradScaler(enabled=False)
lr_kw = {'initial_lr_scale': args.initial_lr_scale,
'final_lr_scale': args.final_lr_scale,
'warmup_steps': args.warmup_updates,
'hold_steps': args.hold_updates,
'num_steps': args.max_update,
'lr': args.lr}
if args.lr_policy == 'poly':
adjust_lr = partial(lr_poly_policy, power=args.lr_poly_power, **lr_kw)
elif args.lr_policy == 'exp':
adjust_lr = partial(lr_exp_policy, decay=args.lr_exp_decay, **lr_kw)
else:
raise ValueError
assert args.fp16 + args.bf16 <= 1, (
"Select a single mechanism for mixed precision training.")
checkpointer.maybe_load_state(model=model)
if args.bf16:
model.to(dtype=torch.bfloat16)
if args.fp16:
model.half()
if (args.fp16 or args.bf16) and args.fp32_pos_conv:
w2v = model.w2v_encoder.w2v_model if args.mode == 'finetune' else model
w2v.encoder.pos_conv.to(dtype=torch.float32)
multi_gpu = world_size > 1
if multi_gpu:
model = DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
model = ModuleProxyWrapper(model)
args.bf16_disable_loss_scaler = False # TODO Add support in the future
optim = get_optimizer(model, args)
adjust_lr(1, optim)
if args.ema > 0.0:
raise NotImplementedError(
"EMA disabled, see https://github.com/pytorch/pytorch/issues/28594"
)
else:
ema_model = None
train_state = {'step': 0, 'epoch': 1, 'best_val_loss': float('inf'),
'best_val_wer': float('inf')}
checkpointer.maybe_load_state(ema_model=ema_model, optimizer=optim,
scaler=scaler, train_state=train_state)
shard_id = int(os.getenv("RANK", args.local_rank))
train_loader, sampler = get_batch_iterator(
train_dataset,
True,
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(args.max_tokens, args.max_tokens),
ignore_invalid_inputs=True,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=world_size,
shard_id=shard_id,
num_workers=args.num_workers,
num_concat_batches=args.num_concat_batches)
valid_loader, _ = get_batch_iterator(
valid_dataset,
False,
max_tokens=args.max_tokens_valid,
max_sentences=args.batch_size_valid,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=world_size,
shard_id=shard_id,
num_workers=args.num_workers,
num_concat_batches=args.num_concat_batches)
steps_per_epoch = len(train_loader) // args.update_freq
checkpointer.maybe_load_state(train_loader=train_loader)
checkpointer.last_state = None
print_once(model)
model.train()
step, epoch = train_state['step'], train_state['epoch']
start_step = step
start_epoch = epoch
while step < args.max_update: # training loop
set_torch_seed(args.seed + step) # reproducibility after resuming
metrics.start_epoch(epoch)
sampler.set_epoch(epoch)
optim.zero_grad()
itr = islice(train_loader, steps_per_epoch * args.update_freq)
for batch, accum_batches in zip(itr, cycle(range(args.update_freq))):
if accum_batches == 0:
step += 1
model.set_num_updates(step)
metrics.start_iter(accum_batches)
to_gpu(batch, fp16=args.fp16, bf16=args.bf16)
# use context manager to prevent redundant sync of gradients
if (multi_gpu and accum_batches + 1 < args.update_freq):
ctx = model.no_sync()
else:
ctx = empty_context()
with ctx:
loss, _, logging_output = criterion(model, batch)
if args.fp16 or args.bf16:
optim.backward(loss)
else:
scaler.scale(loss).backward()
# at this point, loss is scaled by loss_scale
# and averaged over different devices (because of DDP) (*)
metrics.log_scalars(logging_output)
if (accum_batches + 1) % args.update_freq == 0:
metrics.all_reduce(world_size)
# scales gradients update by world_size
# (to restore sum of gradients - see (*))
# divided by step_ntoks to average over tokens.
grads_mult_factor = world_size / metrics.partials['sample_size']
if args.optimizer == 'adam' and not (args.fp16 or args.bf16):
# adam and non-amp optimizer - can use 'scale' kwarg for step
# and defer grad multiplication
pass
elif args.fp16 or args.bf16:
optim.multiply_grads(grads_mult_factor)
else:
multiply_grads(optim, grads_mult_factor)
try:
if args.fp16 or args.bf16:
# calculate grad norm, maybe clip
grad_norm = optim.clip_grad_norm(args.clip_norm)
if args.optimizer == 'adam' and not (args.fp16 or args.bf16):
scaler.step(optim, scale=1. / grads_mult_factor)
else:
scaler.step(optim)
scaler.update()
model.set_num_updates(step)
except OverflowError as e:
print_once(f"Grad overflow, ignoring grad. {str(e)}")
grad_norm = torch.tensor(0.0).cuda()
optim.zero_grad()
if args.ema > 0.0:
apply_multi_tensor_ema(args.ema, *mt_ema_params)
if args.fp16 or args.bf16:
metrics['loss_scale'] = optim.scaler.loss_scale
metrics['lr'] = optim.param_groups[0]['lr']
metrics.accumulate()
metrics.finish_iter()
if step % args.log_frequency == 0:
metrics.finish_logging_interval()
epoch_step = step % steps_per_epoch or steps_per_epoch
logger.log((epoch, epoch_step, steps_per_epoch),
metrics, scope='train', tb_iter=step)
adjust_lr(step, optim)
if step >= args.max_update:
break
# NOTE this will brake when resuming training on a different dataset
assert step <= steps_per_epoch * epoch
# end of iter
metrics.finish_epoch()
logger.log((epoch,), metrics, scope='train_avg', flush_log=True,
tb_iter=step)
print_once('Validating...')
val_losses, val_wer = validate(
epoch, step, valid_loader, model, ema_model, criterion,
val_metrics, val_ema_metrics, world_size, args.fp16, args.bf16)
# save best ckpt based on non-EMA val results
checkpointer.maybe_save(model, ema_model, optim, scaler, train_state,
step, epoch, val_losses, val_wer, args)
if 0 < args.epochs_this_job <= epoch + 1 - start_epoch:
print_once(f'Reached {args.epochs_this_job} epochs in this run.')
break
if step >= args.max_update:
print_once(f'Reached {step} total updates.')
break
epoch += 1 # end of epoch
# finished training
if step > start_step:
logger.log((), metrics, scope='train_benchmark')
logger.log((), val_metrics, scope='val')
logger.log((), val_ema_metrics, scope='val_ema', flush_log=True)
print_once(f'Finished after reaching update {step}.')
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import math
import os
import random
import time
import warnings
from argparse import ArgumentParser
from heapq import nlargest
from itertools import chain, repeat
from pathlib import Path
from tqdm import tqdm
import dllogger
import numpy as np
import torch
import torch.distributed as distrib
from dllogger import JSONStreamBackend, StdOutBackend, Verbosity
import wav2vec2.arg_parser
import wav2vec2.utils
import common.fairseq.utils as utils
from common.fairseq.data import Dictionary
from common.helpers import (gather_predictions, gather_transcripts,
load_wrapped_state, process_evaluation_epoch)
from common.tb_dllogger import stdout_metric_format, unique_log_fpath
from common.utils import print_once
from torch.utils.data import DataLoader, DistributedSampler
from wav2vec2.logging import init_infer_metadata
def durs_to_percentiles(durations, ratios):
durations = np.asarray(durations) * 1000 # in ms
latency = durations
latency = latency[5:]
mean_latency = np.mean(latency)
latency_worst = nlargest(math.ceil((1 - min(ratios)) * len(latency)),
latency)
latency_ranges = get_percentile(ratios, latency_worst, len(latency))
latency_ranges[0.5] = mean_latency
return latency_ranges
def get_percentile(ratios, arr, nsamples):
res = {}
for a in ratios:
idx = max(int(nsamples * (1 - a)), 0)
res[a] = arr[idx]
return res
def fp_convert_batch(batch, precision):
dt = {'fp32': torch.float32, 'fp16': torch.half,
'bf16': torch.bfloat16}[precision]
def maybe_cast(t):
if t.dtype is torch.float32:
return t.to(dtype=dt)
return t
return utils.apply_to_sample(maybe_cast, batch)
def main():
parser = ArgumentParser(description='wav2vec2.0 inference')
wav2vec2.arg_parser.populate_infer(parser)
args = parser.parse_args()
ckpt = torch.load(args.w2v_path, map_location=torch.device("cpu"))
train_args = wav2vec2.utils.get_ckpt_args(ckpt)
is_nv_ckpt = "mode" in train_args
if is_nv_ckpt:
print("Loaded a model trained with NVIDIA DLE")
args.fp32_pos_conv = train_args.get("fp32_pos_conv",
args.fp16 or args.bf16)
args.fp32_conv_norms = train_args.get("fp32_conv_norms", args.fp16)
else:
args.fp32_pos_conv = args.fp16
args.fp32_conv_norms = args.fp16
args.fp32_pos_conv = True
args.fp32_conv_norms = True
log_fpath = args.log_file or str(Path(args.output_dir, 'nvlog_infer.json'))
dllogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, metric_format=stdout_metric_format)
])
[dllogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()]
init_infer_metadata()
if ((train_args.get("fp16", False) or train_args.get("amp", False))
and args.bf16):
warnings.warn('Using FP16 ckpts in BF16 precision.')
if train_args.get("bf16", False) and args.fp16:
warnings.warn('Using BF16 ckpts in FP16 precision.')
# load output labels - either from a file, or stored inside an nv ckpt
assert args.labels_path is not None or is_nv_ckpt
if args.labels_path is None:
f = io.StringIO(ckpt["output_labels"])
else:
f = open(args.labels_path)
target_dictionary = Dictionary.load(f)
f.close()
w2v_path_for_args = args.w2v_path_for_args or args.w2v_path
wav2vec2.utils.update_args_for_finetuning(args, w2v_path_for_args)
# "default" GroupNorm might leak padding
args.masked_feature_extractor = True
if args.torchscript:
from common.fairseq.modules import layer_norm
layer_norm.TORCHSCRIPT = True
model, *_ = wav2vec2.utils.build_model(args, "infer", target_dictionary)
load_wrapped_state(model, ckpt["model"])
model.w2v_encoder.w2v_model.remove_conv_wn()
model.w2v_encoder.w2v_model.feature_extractor.forward = \
model.w2v_encoder.w2v_model.feature_extractor.masked_forward
model.w2v_encoder.forward = model.w2v_encoder.infer
model.w2v_encoder.w2v_model.forward = model.w2v_encoder.w2v_model.infer
if args.cpu:
device = torch.device('cpu')
else:
assert torch.cuda.is_available()
device = torch.device('cuda')
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if args.seed is not None:
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
# set up distributed training
multi_gpu = not args.cpu and int(os.environ.get('WORLD_SIZE', 1)) > 1
if multi_gpu:
torch.cuda.set_device(args.local_rank)
distrib.init_process_group(backend='nccl', init_method='env://')
print_once(f'Inference with {distrib.get_world_size()} GPUs')
measure_perf = args.steps > 0
# Compliance with fairseq dataloader
assert args.batch_size is not None
args.min_sample_size = None
args.max_sample_size = None
if args.transcribe_wav or args.transcribe_filelist:
assert args.max_duration is None and not measure_perf
assert not (args.transcribe_wav and args.transcribe_filelist)
assert args.labels is None, "Labels won't be used during trainscribing"
assert not multi_gpu, (
"multigpu is currently supported only for WER/perf measurements")
if args.transcribe_wav:
dataset = wav2vec2.utils.single_audio_dataset(args.transcribe_wav,
args)
else:
dataset = wav2vec2.utils.load_dataset(args.transcribe_filelist,
args, target_dictionary)
data_loader = DataLoader(
dataset=dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=dataset.collater,
num_workers=args.num_workers,
pin_memory=True,
persistent_workers=args.num_workers > 0,
drop_last=False,
)
else: # compute WER or measure perf
assert args.labels is not None or measure_perf
dataset = wav2vec2.utils.load_dataset(args.valid_subset, args,
target_dictionary,
with_labels=True)
sampler = DistributedSampler(
dataset,
shuffle=False,
drop_last=False
) if multi_gpu else None
data_loader = DataLoader(
dataset=dataset,
batch_size=args.batch_size,
sampler=sampler,
shuffle=False,
collate_fn=dataset.collater,
num_workers=args.num_workers,
pin_memory=True,
persistent_workers=args.num_workers > 0,
drop_last=(True if measure_perf else False),
)
model.to(device)
model.eval()
assert args.amp == args.fp16, 'During inference these are equivalent'
if args.fp16:
model = model.half()
if args.bf16:
model = model.to(dtype=torch.bfloat16)
if (args.fp16 or args.bf16) and args.fp32_pos_conv:
model.w2v_encoder.w2v_model.encoder.pos_conv.to(dtype=torch.float32)
if args.torchscript:
print("Attempting TorchScript export...")
model = torch.jit.script(model)
agg = {'txts': [], 'preds': [], 'logits': [], 'ids': []}
dur = {'data': [], 'dnn': [], 'data+dnn': []}
looped_loader = chain.from_iterable(repeat(data_loader))
sync = lambda: torch.cuda.synchronize() if device.type == 'cuda' else None
steps = args.steps + args.warmup_steps or len(data_loader)
desc = 'warmup' if args.warmup_steps > 0 else 'inference'
pbar = tqdm(looped_loader, initial=1, total=steps, desc=desc)
for it, batch in enumerate(pbar):
if it == args.warmup_steps:
pbar.set_description('inference')
batch = utils.move_to_cuda(batch)
sync()
t1 = time.time()
if args.fp16:
batch = fp_convert_batch(batch, 'fp16')
if args.bf16:
batch = fp_convert_batch(batch, 'bf16')
with torch.no_grad():
enc_out, padding_mask = model(batch["net_input"]["source"],
batch["net_input"]["padding_mask"])
logp = model.get_normalized_probs(enc_out,
padding_mask,
log_probs=True).contiguous()
# greedy decoding
preds = logp.argmax(dim=-1, keepdim=False).int()
sync()
t2 = time.time()
# burn-in period; wait for a new loader due to num_workers
if it >= 1 and (args.steps == 0 or it >= args.warmup_steps):
dur['data'].append(t1 - t0)
dur['dnn'].append(t2 - t1)
dur['data+dnn'].append(t2 - t0)
preds = preds.transpose(0, 1)
agg['preds'] += gather_predictions([preds],
target_dictionary,
blank_id=0)
agg['logits'].append(logp)
if 'target' in batch:
agg['txts'] += gather_transcripts([batch['target']],
[batch['target_lengths']],
target_dictionary)
if multi_gpu:
# ids are needed to remove duplicates in multi_gpu inference
agg['ids'] += batch['id'].tolist()
if it + 1 == steps:
break
sync()
t0 = time.time()
tdict = target_dictionary
agg['preds'] = [pred.replace(tdict[tdict.nspecial], ' ')
for pred in agg['preds']]
agg['txts'] = [txt.replace(tdict[tdict.nspecial], ' ')
for txt in agg['txts']]
# communicate the results
if args.transcribe_wav or args.transcribe_filelist:
for idx, p in enumerate(agg['preds']):
print_once(f'Prediction {idx + 1: >3}: {p}')
elif args.valid_subset and not measure_perf:
wer, _ = process_evaluation_epoch(agg)
if not multi_gpu or distrib.get_rank() == 0:
dllogger.log(step=(), data={'eval_wer': 100 * wer})
if args.save_predictions and (not multi_gpu or distrib.get_rank() == 0):
with open(args.save_predictions, 'w') as f:
f.write('\n'.join(agg['preds']))
if args.save_logits and (not multi_gpu or distrib.get_rank() == 0):
logits = torch.cat(agg['logits'], dim=0).cpu()
torch.save(logits, args.save_logits)
# report timings
if len(dur['data']) >= 20 and (not multi_gpu or distrib.get_rank() == 0):
ratios = [0.9, 0.95, 0.99]
for stage in dur:
lat = durs_to_percentiles(dur[stage], ratios)
for k in [0.99, 0.95, 0.9, 0.5]:
k_ = str(k).replace('.', '_')
dllogger.log(step=(), data={f'{stage}_latency_{k_}': lat[k]})
else:
print_once('Not enough samples to measure latencies.')
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import commonpath, join, relpath
import sys
def load_tsv(fpath):
with open(fpath) as f:
return [l.split() for l in f]
tsvs = [load_tsv(tsv) for tsv in sys.argv[1:]]
root = commonpath([t[0][0] for t in tsvs])
tsvs = [[(relpath(join(lines[0][0], p), root), frames) for p, frames in lines[1:]]
for lines in tsvs]
print(root)
for lines in tsvs:
for line in lines:
print("\t".join(line))
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/combine_filelists.py |
#!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import glob
import multiprocessing
import json
import pandas as pd
from preprocessing_utils import parallel_preprocess
parser = argparse.ArgumentParser(description='Preprocess LibriSpeech.')
parser.add_argument('--input_dir', type=str, required=True,
help='LibriSpeech collection input dir')
parser.add_argument('--dest_dir', type=str, required=True,
help='Output dir')
parser.add_argument('--output_json', type=str, default='./',
help='name of the output json file.')
parser.add_argument('-s', '--speed', type=float, nargs='*',
help='Speed perturbation ratio')
parser.add_argument('--target_sr', type=int, default=None,
help='Target sample rate. '
'defaults to the input sample rate')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite file if exists')
parser.add_argument('--parallel', type=int, default=multiprocessing.cpu_count(),
help='Number of threads to use when processing audio files')
args = parser.parse_args()
args.input_dir = args.input_dir.rstrip('/')
args.dest_dir = args.dest_dir.rstrip('/')
def build_input_arr(input_dir):
txt_files = glob.glob(os.path.join(input_dir, '**', '*.trans.txt'),
recursive=True)
input_data = []
for txt_file in txt_files:
rel_path = os.path.relpath(txt_file, input_dir)
with open(txt_file) as fp:
for line in fp:
fname, _, transcript = line.partition(' ')
input_data.append(dict(input_relpath=os.path.dirname(rel_path),
input_fname=fname+'.flac',
transcript=transcript))
return input_data
print("[%s] Scaning input dir..." % args.output_json)
dataset = build_input_arr(input_dir=args.input_dir)
print("[%s] Converting audio files..." % args.output_json)
dataset = parallel_preprocess(dataset=dataset,
input_dir=args.input_dir,
dest_dir=args.dest_dir,
target_sr=args.target_sr,
speed=args.speed,
overwrite=args.overwrite,
parallel=args.parallel)
print("[%s] Generating json..." % args.output_json)
df = pd.DataFrame(dataset, dtype=object)
# Save json with python. df.to_json() produces back slashed in file paths
dataset = df.to_dict(orient='records')
with open(args.output_json, 'w') as fp:
json.dump(dataset, fp, indent=2)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/convert_librispeech.py |
#!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import hashlib
import os
import requests
import tarfile
from tqdm import tqdm
urls = {
"dev-clean": ("http://www.openslr.org/resources/12/dev-clean.tar.gz", "42e2234ba48799c1f50f24a7926300a1"),
"dev-other": ("http://www.openslr.org/resources/12/dev-other.tar.gz", "c8d0bcc9cca99d4f8b62fcc847357931"),
"test-clean": ("http://www.openslr.org/resources/12/test-clean.tar.gz", "32fa31d27d2e1cad72775fee3f4849a9"),
"test-other": ("http://www.openslr.org/resources/12/test-other.tar.gz", "fb5a50374b501bb3bac4815ee91d3135"),
"train-clean-100": ("http://www.openslr.org/resources/12/train-clean-100.tar.gz", "2a93770f6d5c6c964bc36631d331a522"),
"train-clean-360": ("http://www.openslr.org/resources/12/train-clean-360.tar.gz", "c0e676e450a7ff2f54aeade5171606fa"),
"train-other-500": ("http://www.openslr.org/resources/12/train-other-500.tar.gz", "d1a0fd59409feb2c614ce4d30c387708"),
}
def download_file(url, dest_folder, fname, overwrite=False):
fpath = os.path.join(dest_folder, fname)
if os.path.isfile(fpath):
if overwrite:
print("Overwriting existing file")
else:
print("File exists, skipping download.")
return
tmp_fpath = fpath + '.tmp'
if not os.path.exists(os.path.dirname(tmp_fpath)):
os.makedirs(os.path.dirname(tmp_fpath))
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-Length'])
chunk_size = 1024 * 1024 # 1MB
total_chunks = int(file_size / chunk_size)
with open(tmp_fpath, 'wb') as fp:
content_iterator = r.iter_content(chunk_size=chunk_size)
chunks = tqdm(content_iterator, total=total_chunks,
unit='MB', desc=fpath, leave=True)
for chunk in chunks:
fp.write(chunk)
os.rename(tmp_fpath, fpath)
def md5_checksum(fpath, target_hash):
file_hash = hashlib.md5()
with open(fpath, "rb") as fp:
for chunk in iter(lambda: fp.read(1024*1024), b""):
file_hash.update(chunk)
return file_hash.hexdigest() == target_hash
def extract(fpath, dest_folder):
if fpath.endswith('.tar.gz'):
mode = 'r:gz'
elif fpath.endswith('.tar'):
mode = 'r:'
else:
raise IOError('fpath has unknown extention: %s' % fpath)
with tarfile.open(fpath, mode) as tar:
members = tar.getmembers()
for member in tqdm(iterable=members, total=len(members), leave=True):
tar.extract(path=dest_folder, member=member)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Download, verify and extract dataset files')
parser.add_argument('dest', type=str,
help='Download destnation folder.')
parser.add_argument('-e', type=str, default=None,
help='Extraction destnation folder. Defaults to download folder if not provided')
parser.add_argument('--skip_download', action='store_true',
help='Skip downloading the files')
parser.add_argument('--skip_checksum', action='store_true',
help='Skip checksum')
parser.add_argument('--skip_extract', action='store_true',
help='Skip extracting files')
parser.add_argument('--subsets', type=str, nargs="+", choices=list(urls.keys()),
default=list(urls.keys()), help='Subsets to download')
args = parser.parse_args()
args.e = args.e or args.dest
print("\nNOTE: Depending on the selected subsets and connection bandwith "
"this process might take a few hours.\n")
for subset in args.subsets:
url, md5 = urls[subset]
if not args.skip_download:
fname = url.split('/')[-1]
print("Downloading %s:" % fname)
download_file(url=url, dest_folder=args.dest, fname=fname)
else:
print("Skipping file download")
if not args.skip_checksum:
fname = url.split('/')[-1]
fpath = os.path.join(args.dest, fname)
print("Verifing %s: " % fname, end='')
ret = md5_checksum(fpath=fpath, target_hash=md5)
print("Passed" if ret else "Failed")
else:
print("Skipping checksum")
if not args.skip_extract:
fname = url.split('/')[-1]
fpath = os.path.join(args.dest, fname)
print("Decompressing %s:" % fpath)
extract(fpath=fpath, dest_folder=args.e)
else:
print("Skipping file extraction")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/download_librispeech.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
import sys
in_ltr = sys.argv[1]
out_dict = sys.argv[2]
counter = Counter()
with open(in_ltr) as ltr:
for line in ltr:
counter.update(line[:-1].replace(" ", ""))
with open(out_dict, "w") as out:
for letter, cnt in counter.most_common():
out.write(f"{letter} {cnt}\n")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/generate_dictionary.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--manifest', type=Path, nargs='+',
help='w2v2 manifest files with <ID> <duration> on every line')
parser.add_argument(
'--alignments', type=Path,
help='CPC_audio alignments with <ID> <PHONE_ID_LIST> on every line')
parser.add_argument(
'--ids', type=Path,
help='List of IDs for this split (train/test, one per line)')
parser.add_argument(
'--out', type=Path,
help='Output manifest fpath')
args = parser.parse_args()
header = None
fpaths = {}
durs = {}
alis = {}
ids = []
out = []
for fpath in args.manifest:
print(f'Loading {fpath}')
with open(fpath) as f:
for i, line in enumerate(f):
if i == 0:
header = line.strip()
continue
fp, dur = line.split()
id = Path(fp).stem
fpaths[id] = fp
durs[id] = dur # int(dur)
with open(args.alignments) as f:
for line in f:
id, ph = line.strip().split(' ', 1)
alis[id] = ph
ids = [line.strip() for line in open(args.ids)]
for id in ids:
fp = fpaths[id]
d = durs[id]
a = alis[id]
out.append([fp, d, a])
with open(args.out.with_suffix('.tsv'), 'w') as f:
f.write(header + '\n')
for o in out:
f.write('\t'.join(o[:2]) + '\n')
with open(args.out.with_suffix('.ph'), 'w') as f:
for o in out:
f.write(o[2] + '\n')
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/combine_w2v2_filelist_with_phone_alignments.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
import soundfile
import tqdm
parser = argparse.ArgumentParser(description="Write .tsv dataset filelists")
parser.add_argument("dir", type=Path, help="Dataset directory")
parser.add_argument("output_tsv", type=Path, help="Output .tsv file path")
parser.add_argument("--extension", type=str, default="flac",
help="Find files with this extension")
args = parser.parse_args()
num_files = 0
print(f"Collecting .{args.extension} files in {args.dir} ...")
with open(args.output_tsv, "w") as f:
f.write(f"{args.dir}\n")
for fname in tqdm.tqdm(args.dir.rglob("*." + args.extension)):
num_frames = soundfile.info(fname).frames
f.write(f"{fname.relative_to(args.dir)}\t{num_frames}\n")
num_files += 1
print(f"Found {num_files} files for {args.output_tsv} .")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/generate_filelist.py |
DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import multiprocessing
import functools
import sox
from tqdm import tqdm
def preprocess(data, input_dir, dest_dir, target_sr=None, speed=None,
overwrite=True):
speed = speed or []
speed.append(1)
speed = list(set(speed)) # Make uniqe
input_fname = os.path.join(input_dir,
data['input_relpath'],
data['input_fname'])
input_sr = sox.file_info.sample_rate(input_fname)
target_sr = target_sr or input_sr
os.makedirs(os.path.join(dest_dir, data['input_relpath']), exist_ok=True)
output_dict = {}
output_dict['transcript'] = data['transcript'].lower().strip()
output_dict['files'] = []
fname = os.path.splitext(data['input_fname'])[0]
for s in speed:
output_fname = fname + '{}.wav'.format('' if s == 1 else '-{}'.format(s))
output_fpath = os.path.join(dest_dir,
data['input_relpath'],
output_fname)
if not os.path.exists(output_fpath) or overwrite:
cbn = sox.Transformer().speed(factor=s).convert(target_sr)
cbn.build(input_fname, output_fpath)
file_info = sox.file_info.info(output_fpath)
file_info['fname'] = os.path.join(os.path.basename(dest_dir),
data['input_relpath'],
output_fname)
file_info['speed'] = s
output_dict['files'].append(file_info)
if s == 1:
file_info = sox.file_info.info(output_fpath)
output_dict['original_duration'] = file_info['duration']
output_dict['original_num_samples'] = file_info['num_samples']
return output_dict
def parallel_preprocess(dataset, input_dir, dest_dir, target_sr, speed,
overwrite, parallel):
with multiprocessing.Pool(parallel) as p:
func = functools.partial(preprocess, input_dir=input_dir,
dest_dir=dest_dir, target_sr=target_sr,
speed=speed, overwrite=overwrite)
dataset = list(tqdm(p.imap(func, dataset), total=len(dataset)))
return dataset
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/preprocessing_utils.py |
#!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import requests
import os
import tarfile
import tqdm
def download_file(url, dest_folder, fname, overwrite=False):
fpath = os.path.join(dest_folder, fname)
if os.path.isfile(fpath):
if overwrite:
print("Overwriting existing file")
else:
print("File exists, skipping download.")
return
tmp_fpath = fpath + '.tmp'
if not os.path.exists(os.path.dirname(tmp_fpath)):
os.makedirs(os.path.dirname(tmp_fpath))
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-Length'])
chunk_size = 1024 * 1024 # 1MB
total_chunks = int(file_size / chunk_size)
with open(tmp_fpath, 'wb') as fp:
content_iterator = r.iter_content(chunk_size=chunk_size)
chunks = tqdm.tqdm(content_iterator, total=total_chunks,
unit='MB', desc=fpath, leave=True)
for chunk in chunks:
fp.write(chunk)
os.rename(tmp_fpath, fpath)
def md5_checksum(fpath, target_hash):
file_hash = hashlib.md5()
with open(fpath, "rb") as fp:
for chunk in iter(lambda: fp.read(1024*1024), b""):
file_hash.update(chunk)
return file_hash.hexdigest() == target_hash
def extract(fpath, dest_folder):
if fpath.endswith('.tar.gz'):
mode = 'r:gz'
elif fpath.endswith('.tar'):
mode = 'r:'
else:
raise IOError('fpath has unknown extention: %s' % fpath)
with tarfile.open(fpath, mode) as tar:
members = tar.getmembers()
for member in tqdm.tqdm(iterable=members, total=len(members), leave=True):
tar.extract(path=dest_folder, member=member)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/download_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from itertools import chain
from pathlib import Path
def load_lines(fpath):
with open(fpath) as f:
return [line for line in f]
parser = argparse.ArgumentParser()
parser.add_argument('ls_ft', type=Path,
help='Libri-light librispeech_finetuning dir')
parser.add_argument('ls_filelists', type=Path,
help='Directory with .tsv .wrd etc files for LibriSpeech full 960')
parser.add_argument('out', type=Path, help='Output directory')
args = parser.parse_args()
# Load LS
tsv = load_lines(args.ls_filelists / "train-full-960.tsv")
wrd = load_lines(args.ls_filelists / "train-full-960.wrd")
ltr = load_lines(args.ls_filelists / "train-full-960.ltr")
assert len(tsv) == len(wrd) + 1
assert len(ltr) == len(wrd)
files = {}
for path_frames, w, l in zip(tsv[1:], wrd, ltr):
path, _ = path_frames.split("\t")
key = Path(path).stem
files[key] = (path_frames, w, l)
print(f"Loaded {len(files)} entries from {args.ls_filelists}/train-full-960")
# Load LL-LS
files_1h = list((args.ls_ft / "1h").rglob("*.flac"))
files_9h = list((args.ls_ft / "9h").rglob("*.flac"))
print(f"Found {len(files_1h)} files in the 1h dataset")
print(f"Found {len(files_9h)} files in the 9h dataset")
for name, file_iter in [("train-1h", files_1h),
("train-10h", chain(files_1h, files_9h))]:
with open(args.out / f"{name}.tsv", "w") as ftsv, \
open(args.out / f"{name}.wrd", "w") as fwrd, \
open(args.out / f"{name}.ltr", "w") as fltr:
nframes = 0
ftsv.write(tsv[0])
for fpath in file_iter:
key = fpath.stem
t, w, l = files[key]
ftsv.write(t)
fwrd.write(w)
fltr.write(l)
nframes += int(t.split()[1])
print(f"Written {nframes} frames ({nframes / 16000 / 60 / 60:.2f} h at 16kHz)")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/generate_1h_10h_datasets.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument("tsv")
parser.add_argument("--output-dir", required=True)
parser.add_argument("--output-name", required=True)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
transcriptions = {}
with open(args.tsv, "r") as tsv, open(
os.path.join(args.output_dir, args.output_name + ".ltr"), "w"
) as ltr_out, open(
os.path.join(args.output_dir, args.output_name + ".wrd"), "w"
) as wrd_out:
root = next(tsv).strip()
for line in tsv:
line = line.strip()
dir = os.path.dirname(line)
if dir not in transcriptions:
parts = dir.split(os.path.sep)
trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt"
path = os.path.join(root, dir, trans_path)
assert os.path.exists(path), f"File {path} does not exist."
texts = {}
with open(path, "r") as trans_f:
for tline in trans_f:
items = tline.strip().split()
texts[items[0]] = " ".join(items[1:])
transcriptions[dir] = texts
part = os.path.basename(line).split(".")[0]
assert part in transcriptions[dir]
print(transcriptions[dir][part], file=wrd_out)
print(
" ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |",
file=ltr_out,
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/utils/libri_labels.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from pathlib import Path
import dllogger
import torch.distributed as dist
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from common import tb_dllogger
from common.metrics import MetricsAggregator
from common.tb_dllogger import (stdout_metric_format, stdout_step_format,
unique_log_fpath, TBLogger)
def init_logger(output_dir, log_file, ema_decay=0.0):
local_rank = 0 if not dist.is_initialized() else dist.get_rank()
if local_rank == 0:
Path(output_dir).mkdir(parents=False, exist_ok=True)
log_fpath = log_file or Path(output_dir, 'nvlog.json')
dllogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, step_format=stdout_step_format,
metric_format=stdout_metric_format)
])
init_train_metadata()
else:
dllogger.init(backends=[])
tb_train = ['train', 'train_avg']
tb_val = ['val']
tb_ema = [k + '_ema' for k in tb_val] if ema_decay > 0.0 else []
subset_names = {
'train': 'train_inner',
'train_avg': 'train',
'val': 'valid',
'val_ema': 'valid_ema',
}
enabled = (local_rank == 0)
tb_dllogger.tb_loggers = {
s: TBLogger(enabled, log_dir=output_dir, name=subset_names[s])
for s in tb_train + tb_val + tb_ema}
def init_train_metadata():
for id_, pref in [('train', ''), ('train_avg', 'avg train '),
('val', ' avg val '), ('val_ema', ' EMA val ')]:
dllogger.metadata(f"{id_}_loss",
{"name": f"{pref} loss", "format": ":>6.3f"})
dllogger.metadata(f"{id_}_accuracy",
{"name": f"{pref}acc", "format": ":>6.3f"})
dllogger.metadata(f"{id_}_prob_perplexity",
{"name": f"{pref}p pplx", "format": ":>6.3f"})
dllogger.metadata(f"{id_}_code_perplexity",
{"name": f"{pref}c pplx", "format": ":>6.3f"})
dllogger.metadata(f"{id_}_ntokens",
{"name": None, "unit": "tokens", "format": ":>8.0f"})
dllogger.metadata(f"{id_}_took",
{"name": "took", "unit": "s", "format": ":>3.2f"})
dllogger.metadata(f"{id_}_ntokens/s",
{"name": None, "unit": "tokens/s", "format": ":>8.2f"})
dllogger.metadata(f"{id_}_uer",
{"name": f"{pref} uer", "format": ":>6.2f"})
dllogger.metadata(f"{id_}_wer",
{"name": f"{pref} wer", "format": ":>6.2f"})
dllogger.metadata(f"{id_}_raw_wer",
{"name": f"{pref} raw wer", "format": ":>6.2f"})
dllogger.metadata(f"{id_}_lr",
{"name": "lr", "format": ":>3.2e"})
dllogger.metadata(f"{id_}_loss_scale",
{"name": "loss scale", "format": ":>3.2e"})
def init_infer_metadata():
for step in ['DNN', 'data+DNN', 'data']:
for c in [0.99, 0.95, 0.9, 0.5]:
cs = 'avg' if c == 0.5 else f'{int(100 * c)}%'
dllogger.metadata(f'{step.lower()}_latency_{c}',
{'name': f'{step} latency {cs}',
'format': ':>7.2f', 'unit': 'ms'})
dllogger.metadata(
'eval_wer', {'name': 'WER', 'format': ':>3.2f', 'unit': '%'})
class W2v2Metrics(MetricsAggregator):
def __init__(self, benchmark_epochs, scopes=('train', 'train_avg'), cuda=True):
super().__init__(
benchmark_epochs=benchmark_epochs,
benchmark_keys=('took', 'accuracy', 'loss', 'ntokens/s'),
scopes=scopes,
dllogger_keys=('loss', 'ntokens', 'accuracy', 'prob_perplexity',
'code_perplexity',
'took', 'loss_scale', 'lr', 'ntokens/s'),
reduce_mean=('temp', 'prob_perplexity', 'code_perplexity'),
reduce_last=('lr', 'loss_scale'),
cuda=cuda)
def accumulate(self, scopes=None):
if 'ignore' not in self.partials or self.partials['ignore'] == 0.0:
# compute_loss_and_accuracy
ntokens = self.partials['ntokens']
for k, v in self.partials.items():
if k.startswith('loss'):
self.partials[k] = v / ntokens / math.log(2) # as in fairseq
self['accuracy'] = (self.partials.pop('correct')
/ self.partials.pop('count'))
part_counts = self.partial_counts
assert part_counts['correct'] == part_counts['count'] == 1
super().accumulate(scopes=scopes)
def _finish_accumulating(self, scope='train'):
super()._finish_accumulating(scope=scope)
m = self.metrics[scope]
count = self.metric_counts[scope]
m['ntokens/s'] = m['ntokens'] * count['ntokens'] / m['took']
class W2v2FineTuningMetrics(MetricsAggregator):
def __init__(
self,
benchmark_epochs,
benchmark_keys=('took', 'accuracy', 'loss', 'ntokens/s'),
scopes=('train', 'train_avg'),
dllogger_keys=('loss', 'ntokens', 'accuracy', 'lr',
'prob_perplexity', 'took', 'ntokens/s', 'uer',
'wer', 'raw_wer'),
reduce_mean=('temp', 'prob_perplexity', 'code_perplexity'),
reduce_last=('lr',),
cuda=True):
super().__init__(
benchmark_epochs=benchmark_epochs, benchmark_keys=benchmark_keys,
scopes=scopes, dllogger_keys=dllogger_keys,
reduce_mean=reduce_mean, reduce_last=reduce_last, cuda=cuda)
def accumulate(self, scopes=None):
if 'ignore' not in self.partials or self.partials['ignore'] == 0.0:
# compute_loss_and_accuracy
nsentences = self.partials['nsentences']
for k, v in self.partials.items():
if k.startswith('loss'):
self.partials[k] = v / nsentences / math.log(2) # as in fairseq
super().accumulate(scopes=scopes)
def _finish_accumulating(self, scope='train'):
super()._finish_accumulating(scope=scope)
m = self.metrics[scope]
count = self.metric_counts[scope]
m['ntokens/s'] = m['ntokens'] * count['ntokens'] / m['took']
if 'c_errs' in m:
m['uer'] = 100 * m['c_errs'] / m['c_len']
if 'w_errs' in m:
m['wer'] = 100 * m['w_errs'] / m['w_len']
if 'wv_errs' in m:
m['raw_wer'] = 100 * m['wv_errs'] / m['w_len']
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/wav2vec2/logging.py |
DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/wav2vec2/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import math
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from common import fairseq_fake_modules
from common.fairseq import utils
from common.fairseq.data.data_utils import compute_mask_indices
from common.fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
Fp32MaskedGroupNorm,
GradMultiply,
GumbelVectorQuantizer,
LayerNorm,
MaskedGroupNorm,
MultiheadAttention,
SamePad,
TransposeLast,
)
from common.features import FilterbankFeatures
from common.helpers import load_wrapped_state
from common.pyt_mha import PytMultiheadAttention
from common.utils import print_once
class Fp32Conv1d(nn.Conv1d):
"""Casts to FP32. TorchScript ready, does not use inheritance.
Details: https://github.com/pytorch/pytorch/issues/42885 .
"""
def forward(self, x):
return F.conv1d(
x.float(), self.weight, bias=self.bias, stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=self.groups
).to(dtype=x.dtype)
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
if isinstance(module, PytMultiheadAttention):
normal_(module.qkv.weight.data)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class MaskedBlock(nn.Module):
def __init__(self, *args):
super().__init__()
self.conv = args[0]
self.drop = args[1]
if len(args) == 4:
self.norm = args[2]
self.activation = args[3]
else:
self.norm = None
self.activation = args[2]
def hook(state_dict, prefix, *args, **kwargs):
"""Rename Blocks saved as nn.Sequential."""
new_sd = {}
for k, v in state_dict.items():
if not k.startswith(prefix):
new_sd[k] = v
else:
*pref, feat, conv, mod_num, layer_num, param = k.split(".")
assert feat == "feature_extractor" and conv == "conv_layers"
if layer_num == "0":
new_k = ".".join(pref + [feat, conv, mod_num, "conv", param])
elif layer_num == "2":
new_k = ".".join(pref + [feat, conv, mod_num, "norm", param])
else:
raise ValueError
print(f"Rename {k} --> {new_k}")
new_sd[new_k] = v
state_dict.clear()
state_dict.update(new_sd)
self._register_load_state_dict_pre_hook(hook)
def forward(self, x: Tensor, x_lens: Tensor):
x = self.drop(self.conv(x))
x_lens = (x_lens - self.conv.kernel_size[0]) / self.conv.stride[0] + 1
x_lens = torch.floor(x_lens).long()
if self.norm is not None:
if isinstance(self.norm, nn.Sequential):
# LayerNorm wraped with nn.Sequential
raise ValueError("LayerNorm does not require masking")
else:
x = self.norm(x, x_lens)
return self.activation(x), x_lens
class Wav2Vec2Model(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.use_spectrogram_features = cfg.use_spectrogram_features
if self.use_spectrogram_features:
self.spec_feature_extractor = FilterbankFeatures(
frame_stacking=cfg.spectrogram_feature_stacking,
frame_subsampling=cfg.spectrogram_feature_subsampling,
window_size=cfg.spectrogram_window_size,
window_stride=cfg.spectrogram_window_stride,
n_filt=cfg.spectrogram_n_filt).cuda()
self.feature_extractr = None
self.spec_feature_extractor.eval()
self.embed = self.spec_feature_extractor.output_dim()
else:
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.spec_feature_extractor = None
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
fp32_norms=cfg.fp32_conv_norms,
masked=getattr(cfg, 'masked_feature_extractor', False),
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.input_quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
self.fp32_cosine_sim = cfg.fp32_cosine_sim
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
if cfg.quantize_targets:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
self.quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
if cfg.quantize_input:
if cfg.same_quantizer and self.quantizer is not None:
vq_dim = final_dim
self.input_quantizer = self.quantizer
else:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim
self.input_quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
self.conv_cfg_list = eval(self.cfg.conv_feature_layers)
def apply_mask(
self,
x,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
require_same_masks=True,
mask_dropout=0.0,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def sample_negatives(self, y, num, padding_count=None):
if self.n_negatives == 0 and self.cross_sample_negatives == 0:
return y.new(0)
bsz, tsz, fsz = y.shape
y = y.view(-1, fsz) # BTC => (BxT)C
cross_high = tsz * bsz
high = tsz - (padding_count or 0)
with torch.no_grad():
assert high > 1, f"{bsz,tsz,fsz}"
if self.n_negatives > 0:
tszs = torch.arange(num, device=y.device).unsqueeze(-1)
tszs = tszs.expand(-1, self.n_negatives).flatten()
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * num),
device=y.device
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = torch.arange(num, device=y.device).unsqueeze(-1)
tszs = tszs.expand(-1, self.cross_sample_negatives).flatten()
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * num),
device=y.device
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[neg_idxs.view(-1)]
negs = negs.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
return negs, neg_idxs
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all(-1)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
if self.fp32_cosine_sim:
logits = torch.cosine_similarity(x.float(), targets.float(),
dim=-1).type_as(x)
else:
logits = torch.cosine_similarity(x, targets, dim=-1)
logits = logits / self.logit_temp
if neg_is_pos.any():
if not hasattr(self, "_inftensor"):
self._inftensor = float("-inf")
logits[1:][neg_is_pos] = self._inftensor
return logits
def _conv_out_length(self, input_length: torch.Tensor, kernel_size: int, stride: int):
return torch.floor((input_length - kernel_size) / stride + 1)
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
for i in range(len(self.conv_cfg_list)):
input_lengths = self._conv_out_length(
input_lengths,
self.conv_cfg_list[i][1],
self.conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def infer(self, source: Tensor, padding_mask: Tensor):
"""Forward method for (masked) inference."""
input_lengths = (1 - padding_mask.long()).sum(-1)
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
features, _ = self.feature_extractor.masked_forward(source, input_lengths)
features = features.transpose(1, 2)
features = self.layer_norm(features)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])) == 1
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
x = self.dropout_input(features)
x, _ = self.encoder(x, padding_mask=padding_mask)
return x, padding_mask
def forward(
self,
source,
padding_mask: Optional[Tensor] = None,
mask=True,
features_only=False,
layer=-1,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
sub_batch_sizes=None,
sub_batch_lens=None,
):
masked_inference = self.feature_extractor.masked
if self.spec_feature_extractor is not None:
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
else:
input_lengths = (torch.zeros(source.size(0)) + source.size(1)).cuda()
features, output_lengths = self.spec_feature_extractor(source, input_lengths)
output_lengths = output_lengths.to(torch.long)
else:
if self.training and self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
if masked_inference:
input_lengths = (1 - padding_mask.long()).sum(-1)
features, _ = self.feature_extractor.masked_forward(source, input_lengths)
else:
features = self.feature_extractor(source)
if masked_inference or (padding_mask is not None and padding_mask.any()):
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
else:
output_lengths = None
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if output_lengths is not None:
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])) == 1
else:
padding_mask = None
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
if self.input_quantizer:
q = self.input_quantizer(features, produce_targets=False)
features = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
features = self.project_inp(features)
split_accumulation = sub_batch_sizes is not None and sub_batch_sizes.size(0) > 1
if split_accumulation:
assert sub_batch_sizes is not None
assert self.quantizer is not None
assert not self.negatives_from_everywhere
assert self.codebook_negatives == 0
assert self.target_glu is None
assert mask_indices is None
assert mask_channel_indices is None
assert mask
split_sizes = sub_batch_sizes.tolist()
sub_x, sub_y, sub_mask_indices, sub_negs = [], [], [], []
for s, e in zip(np.cumsum(split_sizes) - split_sizes, np.cumsum(split_sizes)):
x_, mask_indices_ = self.apply_mask(
features[s:e],
padding_mask[s:e] if padding_mask is not None else None,
)
sub_x.append(x_)
sub_mask_indices.append(mask_indices_)
y_ = unmasked_features[s:e][mask_indices_].view(
e-s, -1, unmasked_features.size(-1)
)
q_ = self.quantizer(y_, produce_targets=False)
y_ = q_["x"]
y_ = self.project_q(y_)
negs_, _ = self.sample_negatives(
y_,
y_.size(1),
padding_count=padding_count,
)
sub_y.append(y_)
sub_negs.append(negs_)
x = torch.cat(sub_x, dim=0)
mask_indices = torch.cat(sub_mask_indices, dim=0)
x, layer_results = self.encoder(x, padding_mask=padding_mask, layer=layer)
if features_only:
return {
"x": x,
"padding_mask": padding_mask,
"features": unmasked_features,
"layer_results": layer_results,
}
x = x[mask_indices] # .view(x.size(0), -1, x.size(-1))
x = self.final_proj(x)
# At this point, x needs to be smartly reshaped / split into x_'s
sub_x2 = []
offset = 0
for y_, mask_inds_, negs_ in zip(sub_y, sub_mask_indices, sub_negs):
sz = mask_inds_.sum()
x_ = x[offset:offset+sz].view(mask_inds_.size(0), -1, x.size(-1))
x_ = self.compute_preds(x_, y_, negs_)
sub_x2.append(x_)
offset += sz
x = torch.cat([x_.view(x_.size(0), 1, -1) for x_ in sub_x2], dim=2)
result = {
"x": x,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
# TODO Reassemble q stats, currently using first chunk's stats
q = q_
if q["prob_perplexity"] is not None:
result["prob_perplexity"] = q["prob_perplexity"]
result["code_perplexity"] = q["code_perplexity"]
result["num_vars"] = q["num_vars"]
result["temp"] = q["temp"]
return result
# End split_accumulation ----------------------------------------------
if mask:
x, mask_indices = self.apply_mask(
features,
padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
if mask_indices is not None:
y = unmasked_features[mask_indices].view(
unmasked_features.size(0), -1, unmasked_features.size(-1)
)
else:
y = unmasked_features
else:
x = features
y = unmasked_features
mask_indices = None
x, layer_results = self.encoder(x, padding_mask=padding_mask, layer=layer)
if features_only:
return {
"x": x,
"padding_mask": padding_mask,
"features": unmasked_features,
"layer_results": layer_results,
}
if self.quantizer:
q = self.quantizer(y, produce_targets=False)
y = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
y = self.project_q(y)
if self.negatives_from_everywhere:
neg_cands = self.quantizer(unmasked_features, produce_targets=False)[
"x"
]
negs, _ = self.sample_negatives(
neg_cands,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if self.codebook_negatives > 0:
cb_negs = self.quantizer.sample_from_codebook(
y.size(0) * y.size(1), self.codebook_negatives
)
cb_negs = cb_negs.view(
self.codebook_negatives, y.size(0), y.size(1), -1
) # order doesnt matter
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y = self.project_q(y)
if self.negatives_from_everywhere:
negs, _ = self.sample_negatives(
unmasked_features,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
x = x[mask_indices].view(x.size(0), -1, x.size(-1))
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
x = self.final_proj(x)
x = self.compute_preds(x, y, negs)
result = {
"x": x,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
if prob_ppl is not None:
result["prob_perplexity"] = prob_ppl
result["code_perplexity"] = code_ppl
result["num_vars"] = num_vars
result["temp"] = curr_temp
return result
def quantize(self, x):
assert self.quantizer is not None
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_features(self, source, padding_mask, mask=False, layer=-1):
res = self.forward(
source, padding_mask, mask=mask, features_only=True, layer=layer
)
return res
def get_logits(self, net_output):
logits = net_output["x"]
logits = logits.transpose(0, 2)
logits = logits.reshape(-1, logits.size(-1))
return logits
def get_targets(self, sample, net_output, expand_steps=True):
x = net_output["x"]
return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)
def get_extra_losses(self, net_output):
pen = []
if "prob_perplexity" in net_output:
pen.append(
(net_output["num_vars"] - net_output["prob_perplexity"])
/ net_output["num_vars"]
)
if "features_pen" in net_output:
pen.append(net_output["features_pen"])
return pen
def remove_pretraining_modules(self):
self.quantizer = None
self.project_q = None
self.target_glu = None
self.final_proj = None
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def get_normalized_probs_scriptable(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Scriptable helper function for get_normalized_probs in ~BaseFairseqModel"""
if hasattr(self, "decoder"):
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
elif torch.is_tensor(net_output):
# syntactic sugar for simple models which don't have a decoder
# (e.g., the classification tutorial)
logits = net_output.float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
raise NotImplementedError
def max_positions(self):
"""Maximum length supported by the model."""
return None
def load_state_dict(
self,
state_dict,
strict=True,
):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
self.upgrade_state_dict(state_dict)
new_state_dict = state_dict
return super().load_state_dict(new_state_dict, strict)
def upgrade_state_dict(self, state_dict):
"""Upgrade old state dicts to work with newer code."""
self.upgrade_state_dict_named(state_dict, "")
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code.
Args:
state_dict (dict): state dictionary to upgrade, in place
name (str): the state dict key corresponding to the current module
"""
assert state_dict is not None
def do_upgrade(m, prefix):
if len(prefix) > 0:
prefix += "."
for n, c in m.named_children():
name = prefix + n
if hasattr(c, "upgrade_state_dict_named"):
c.upgrade_state_dict_named(state_dict, name)
elif hasattr(c, "upgrade_state_dict"):
c.upgrade_state_dict(state_dict)
do_upgrade(c, name)
do_upgrade(self, name)
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
for m in self.modules():
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
def prepare_for_inference_(self, cfg):
"""Prepare model for inference."""
kwargs = {}
kwargs["beamable_mm_beam_size"] = (
None
if getattr(cfg.generation, "no_beamable_mm", False)
else getattr(cfg.generation, "beam", 5)
)
kwargs["need_attn"] = getattr(cfg.generation, "print_alignment", False)
if getattr(cfg.generation, "retain_dropout", False):
kwargs["retain_dropout"] = cfg.generation.retain_dropout
kwargs["retain_dropout_modules"] = cfg.generation.retain_dropout_modules
self.make_generation_fast_(**kwargs)
def make_generation_fast_(self, **kwargs):
"""
Legacy entry point to optimize model for faster generation.
Prefer prepare_for_inference_.
"""
if self._is_generation_fast:
return # only apply once
self._is_generation_fast = True
# remove weight norm from all modules in the network
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except (AttributeError, ValueError): # this module didn't have weight norm
return
self.apply(apply_remove_weight_norm)
def train(mode=True):
if mode:
raise RuntimeError("cannot train after make_generation_fast")
# this model should no longer be used for training
self.eval()
self.train = train
def prepare_for_onnx_export_(self, **kwargs):
"""Make model exportable via ONNX trace."""
seen = set()
def apply_prepare_for_onnx_export_(module):
if (
module != self
and hasattr(module, "prepare_for_onnx_export_")
and module not in seen
):
seen.add(module)
module.prepare_for_onnx_export_(**kwargs)
self.apply(apply_prepare_for_onnx_export_)
def remove_conv_wn(self):
nn.utils.remove_weight_norm(self.encoder.pos_conv[0])
def apply_conv_wn(self):
nn.utils.weight_norm(self.encoder.pos_conv[0], name="weight", dim=2)
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
fp32_norms: bool = True,
masked: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
self.mode = mode
self.masked = masked
LayerNorm_ = Fp32LayerNorm if fp32_norms else nn.LayerNorm
if masked and mode == "default":
Block_ = MaskedBlock
GroupNorm_ = Fp32MaskedGroupNorm if fp32_norms else MaskedGroupNorm
else:
Block_ = nn.Sequential
GroupNorm_ = Fp32GroupNorm if fp32_norms else nn.GroupNorm
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
assert not (is_layer_norm and is_group_norm), (
"layer norm and group norm are mutually exclusive")
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
def make_norm():
if is_group_norm:
l = GroupNorm_(dim, dim, affine=True)
elif is_layer_norm:
l = nn.Sequential(TransposeLast(),
LayerNorm_(dim, elementwise_affine=True),
TransposeLast())
return l
has_norm = is_layer_norm or is_group_norm
return Block_(make_conv(),
nn.Dropout(p=dropout),
*([make_norm()] if has_norm else []),
nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
def masked_forward(self, x: Tensor, x_lens: Tensor):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x, x_lens = conv(x, x_lens)
return x, x_lens
class Upsampler(nn.Module):
def __init__(self, emb_dim, factor, mode="linear"):
super().__init__()
assert mode in ("linear", "naive")
self.factor = factor
if mode == "linear":
self.linear = nn.Linear(emb_dim, emb_dim * factor)
else:
self.linear = None
def forward(self, x):
if self.linear is not None:
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.linear(x)
x = x.reshape(x.size(0), x.size(1) * self.factor, -1)
x = x.transpose(0, 1)
else:
x = x.repeat_interleave(self.factor, dim=0)
return x
class Downsampler(nn.Module):
def __init__(self, emb_dim, factor, mode="linear"):
super().__init__()
assert mode in ("linear", "naive")
self.factor = factor
if mode == "linear":
self.linear = nn.Linear(emb_dim * factor, emb_dim)
else:
self.linear = None
def forward(self, x):
if self.linear is not None:
# T x B x C -> B x T x C
x = x.transpose(0, 1)
B, T, C = x.size()
x = x.reshape(B, T // self.factor, C * self.factor)
x = self.linear(x)
x = x.transpose(0, 1)
else:
# T x B x C -> B x C x T
x = x.permute(1, 2, 0)
x = F.avg_pool1d(x, kernel_size=self.factor, stride=self.factor)
x = x.permute(2, 0, 1)
return x
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
PosConv = Fp32Conv1d if args.fp32_pos_conv else nn.Conv1d
self.pos_conv = PosConv(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
def create_decoder_layers(n_layers):
return nn.ModuleList([
TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
rotary_embeddings=args.rotary_embeddings,
mha=args.mha,
fp32_transformer_layernorm=args.fp32_transformer_layernorm,
fp32_mha_softmax=args.fp32_mha_softmax,
)
for _ in range(n_layers)
])
if args.hourglass_transformer:
n_pre, (n_hourglass, self.shorten_factor), n_post = eval(
args.hourglass_transformer)
self.layers = create_decoder_layers(n_pre)
self.hourglass_layers = create_decoder_layers(n_hourglass)
self.post_layers = create_decoder_layers(n_post)
assert args.hourglass_resample in ['linear', 'naive']
# otherwise i want to resample before merging resutls
assert not args.layer_norm_first
kw = {'emb_dim': self.embedding_dim, 'factor': self.shorten_factor,
'mode': args.hourglass_resample}
self.upsample_layer = Upsampler(**kw)
self.downsample_layer = Downsampler(**kw)
else:
self.layers = create_decoder_layers(args.encoder_layers)
self.hourglass_layers = None
self.post_layers = None
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x: Tensor, padding_mask: Optional[Tensor] = None,
layer: int = -1):
x, layer_results = self.extract_features(x, padding_mask, layer)
if self.layer_norm_first and layer == -1:
x = self.layer_norm(x)
return x, layer_results
def process_layers(self, x: Tensor, padding_mask: Optional[Tensor],
tgt_layer: int = -1):
for i, layer in enumerate(self.layers):
if not self.training or (torch.rand(1) > self.layerdrop):
x, _ = layer(x, self_attn_padding_mask=padding_mask,
need_weights=False)
if i == tgt_layer:
return x
return x
def process_hourglass_layers(self, x: Tensor, padding_mask:
Optional[Tensor], tgt_layer: int = -1):
for i, layer in enumerate(self.hourglass_layers):
if not self.training or (torch.rand(1) > self.layerdrop):
x, _ = layer(x, self_attn_padding_mask=padding_mask,
need_weights=False)
if i == tgt_layer:
return x
return x
def process_post_layers(self, x: Tensor, padding_mask: Optional[Tensor],
tgt_layer: int = -1):
if self.post_layers is None:
return x
else:
for i, layer in enumerate(self.post_layers):
if not self.training or (torch.rand(1) > self.layerdrop):
x, _ = layer(x, self_attn_padding_mask=padding_mask,
need_weights=False)
if i == tgt_layer:
return x
return x
def extract_features(self, x: Tensor, padding_mask: Optional[Tensor] = None,
tgt_layer: int = -1):
if padding_mask is not None:
x[padding_mask] = 0
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if self.hourglass_layers is not None:
# we don't want to take outputs from inside of hourglass
# as they are shortened and differnt
n_layers_before_upsampling = (len(self.layers) # pre layers
+ len(self.hourglass_layers))
assert tgt_layer == -1 or tgt_layer >= n_layers_before_upsampling
if tgt_layer is not None:
tgt_layer = tgt_layer - n_layers_before_upsampling
x = self.process_layers(x, padding_mask)
res = x
hourglass_pad_mask = padding_mask
diff = ((self.shorten_factor - x.size(0) % self.shorten_factor)
% self.shorten_factor)
if diff != 0:
x = torch.cat([x, x.new_zeros(diff, x.size(1), x.size(2))])
if hourglass_pad_mask is not None:
if diff != 0:
hourglass_pad_mask = torch.cat([
hourglass_pad_mask,
x.new_ones(hourglass_pad_mask.size(0), diff)
], dim=1)
hourglass_pad_mask = (F.avg_pool1d(
hourglass_pad_mask.unsqueeze(0).float(),
self.shorten_factor,
self.shorten_factor
).int() > 0).squeeze(0)
x = self.downsample_layer(x)
x = self.process_hourglass_layers(x, hourglass_pad_mask)
x = self.upsample_layer(x)
if diff != 0:
x = x[:-diff]
x = x + res
x = self.process_post_layers(x, padding_mask, tgt_layer)
else:
x = self.process_layers(x, padding_mask, tgt_layer)
# T x B x C -> B x T x C
return x.transpose(0, 1), []
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
rotary_embeddings: bool = False,
mha: str = 'fairseq',
fp32_transformer_layernorm: bool = False,
fp32_mha_softmax: bool = False,
) -> None:
assert not fp32_mha_softmax, "Support for FP32 MHA Softmax disabled"
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
MHA = {'fairseq': MultiheadAttention,
'pyt': PytMultiheadAttention}[mha]
self.self_attn = MHA(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
rotary_embeddings=rotary_embeddings
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
LN = Fp32LayerNorm if fp32_transformer_layernorm else LayerNorm
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LN(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LN(self.embedding_dim)
def forward(
self,
x: torch.Tensor,
self_attn_mask: Optional[Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
need_weights: bool = False,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
attn_mask=self_attn_mask,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
class Wav2VecEncoder(nn.Module):
def __init__(self, cfg, init_state_dict=None, output_size=None):
super().__init__()
self.apply_mask = cfg.apply_mask
self.w2v_model = Wav2Vec2Model(cfg)
if init_state_dict is not None:
load_wrapped_state(self.w2v_model, init_state_dict)
self.w2v_model.remove_pretraining_modules()
d = cfg.encoder_embed_dim
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
tgt_d = None
self.proj = None
if output_size is not None:
tgt_d = output_size
elif getattr(cfg, "decoder_embed_dim", d) != d:
tgt_d = cfg.decoder_embed_dim
if tgt_d is not None:
self.proj = Linear(d, tgt_d)
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.num_updates = num_updates
def extract_features(self, source, padding_mask, layer):
assert not self.training
with torch.no_grad():
out = self.w2v_model.extract_features(
source=source, padding_mask=padding_mask, mask=False,
layer=layer)
return out
def infer(self, source: Tensor, padding_mask: Optional[Tensor],
tbc: bool = True):
assert padding_mask is not None
x, padding_mask = self.w2v_model.infer(source, padding_mask)
if tbc:
# BTC -> TBC
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj is not None:
x = self.proj(x)
return x, padding_mask
def forward(self, source: Tensor, padding_mask: Optional[Tensor],
tbc: bool = True):
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
res = self.w2v_model.extract_features(
source=source,
padding_mask=padding_mask,
mask=self.apply_mask and self.training
)
x = res["x"]
padding_mask = res["padding_mask"]
layer_results = res["layer_results"]
if tbc:
# BTC -> TBC
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj is not None:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask.transpose(0, 1)
if padding_mask is not None
else None, # T x B
"padding_mask": padding_mask,
"layer_results": layer_results,
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class Wav2VecCtc(nn.Module):
def __init__(self, cfg, w2v_encoder):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
self.blank_weight = cfg.blank_weight
self.blank_mode = cfg.blank_mode
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@torch.jit.export
def get_logits(self, logits: Tensor, padding_mask: Optional[Tensor], normalize: bool = False):
if self.blank_weight != 0:
if self.blank_mode == "add":
logits[..., 0] += self.blank_weight
elif self.blank_mode == "set":
logits[..., 0] = self.blank_weight
else:
raise ValueError(f"invalid blank mode {self.blank_mode}")
if padding_mask is not None and padding_mask.any():
num_classes = logits.size(-1)
masking_tensor = torch.full((num_classes,), float("-inf"),
dtype=logits.dtype, device=logits.device)
masking_tensor[0] = 0
logits[padding_mask.T] = masking_tensor
if normalize:
logits = F.log_softmax(logits.float(), dim=-1)
return logits
@torch.jit.export
def get_normalized_probs(self, logits: Tensor, padding_mask: Optional[Tensor], log_probs: bool):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = self.get_logits(logits, padding_mask, normalize=False)
if log_probs:
return F.log_softmax(logits.float(), dim=-1)
else:
return F.softmax(logits.float(), dim=-1)
def forward(self, source: Tensor, padding_mask: Optional[Tensor],
tbc: bool = True):
return self.w2v_encoder(source, padding_mask, tbc)
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.w2v_encoder.set_num_updates(num_updates)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/wav2vec2/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def populate(parser):
choices = ["pretrain", "finetune"]
parser.add_argument("mode", help="Training mode", choices=choices)
mode = parser.parse_args([a for a in sys.argv[1:] if a in choices]).mode
if mode == "pretrain":
populate_pretraining(parser)
else:
populate_finetuning(parser)
populate_common(parser)
return parser
def populate_infer(parser):
populate_finetuning(parser)
populate_common(parser)
_populate_infer(parser)
return parser
def populate_common(parser):
train = parser.add_argument_group("training setup")
train.add_argument("--epochs_this_job", default=0, type=int,
help="Run for a number of epochs and exit")
train.add_argument("--cudnn_benchmark", action="store_true",
help="Enable cudnn benchmark")
train.add_argument("--local_rank", default=os.getenv("LOCAL_RANK", 0),
type=int, help="GPU id used for distributed training")
optim = parser.add_argument_group("optimization setup")
optim.add_argument("--optimizer", default="adam", type=str,
help="Optimization algorithm")
optim.add_argument("--ema", type=float, default=0.0,
help="Discount factor for EMA of model weights")
io = parser.add_argument_group("feature and checkpointing setup")
io.add_argument("--log_frequency", default=1, type=int,
help="Number of steps between printing training stats")
io.add_argument("--output_dir", type=str, required=True,
help="Directory for logs and checkpoints")
io.add_argument("--log_file", type=str, default=None,
help="Path to save the training logfile.")
io.add_argument("--benchmark_epochs_num", type=int, default=3,
help="Number of last epochs to calculate throughput stats")
ckpt = parser.add_argument_group("checkpoint")
ckpt.add_argument("--no_save", action="store_true",
help="Don't save models or checkpoints")
ckpt.add_argument("--resume", action="store_true",
help="Try to resume from last saved checkpoint")
ckpt.add_argument("--ckpt", default=None, type=str,
help="Path to a checkpoint for resuming training")
ckpt.add_argument("--save_frequency", default=10, type=int,
help="Checkpoint saving frequency in epochs")
ckpt.add_argument("--keep_milestones", default=[100, 200, 300, 400],
type=int, nargs="+",
help="Milestone checkpoints to keep from removing")
# io.add_argument("--save_best_from", default=380, type=int,
# help="Epoch on which to begin tracking best checkpoint (dev WER)")
common = parser.add_argument_group("common")
common.add_argument("--seed", type=int, default=1,
help="Pseudo random number generator seed")
common.add_argument("--cpu", action="store_true",
help="Use CPU instead of CUDA")
common.add_argument("--amp", action="store_true",
help="Use automatic mixed precision")
common.add_argument("--fp16", action="store_true",
help="If fp16 is being used")
common.add_argument("--bf16", action="store_true",
help="Train in bfloat16 precision")
common.add_argument("--min_loss_scale", type=float, default=0.0001,
help="Minimum FP16/AMP loss scale, after which "
"training is stopped")
common.add_argument("--fp16_init_scale", type=int, default=128,
help="Default FP16 loss scale")
common.add_argument("--fp32_transformer_layernorm", action="store_true",
help="Calculate MHA LayerNorms in full precision")
common.add_argument("--fp32_mha_softmax", action="store_true",
help="Calculate multi-head attention to FP32")
common.add_argument("--fp32_cosine_sim", action="store_true",
help="Calculate cosine similarity in FP32")
common.add_argument("--fp32_pos_conv", action="store_true",
help="Calculate positional conv in FP32")
common.add_argument("--fp32_conv_norms", action="store_true",
help="Calculate normalization in conv layers in FP32")
common.add_argument("--mha", type=str, default="fairseq",
choices=["fairseq", "pyt"], help="MHA implementation")
common.add_argument("--num_concat_batches", type=int, default=1)
dataset = parser.add_argument_group("dataset")
dataset.add_argument("--num_workers", type=int, default=6,
help="How many subprocesses to use for data loading")
dataset.add_argument("--skip_invalid_size_inputs_valid_test",
action="store_true",
help="Ignore too long or too short lines in valid and"
" test set")
dataset.add_argument("--max_tokens", type=int, default=1400000,
help="Maximum number of tokens in a batch")
dataset.add_argument("--max_tokens_valid", type=int, default=1400000,
help="Maximum number of tokens in a validation batch "
"(defaults to --max-tokens)")
dataset.add_argument("--required_batch_size_multiple", type=int, default=8,
help="Batch size will be a multiplier of this value")
dataset.add_argument("--required_seq_len_multiple", type=int, default=2,
help="Pad the input to encoder such that the sequence"
" length is divisible by multiple")
dataset.add_argument("--train_subset", type=str, default="train",
help="Data subset to use for training (e.g. train, "
"valid, test)")
dataset.add_argument("--valid_subset", type=str, default="valid",
help="Comma separated list of data subsets to use for"
" validation (e.g. train, valid, test)")
dataset.add_argument("--batch_size", type=int, default=None,
help="Number of examples in a batch")
dataset.add_argument("--batch_size_valid", type=int, default=None,
help="Batch size of the validation batch (defaults "
"to --batch-size)")
task = parser.add_argument_group("task")
task.add_argument("--data", type=str,
default="/workspace/fairseq/librispeech",
help="Path to data directory")
task.add_argument("--sample_rate", type=int, default=16000,
help="Target sample rate. audio files will be up/down "
"sampled to this rate")
task.add_argument("--enable_padding", action="store_true",
help="Pad shorter samples instead of cropping")
task.add_argument("--min_sample_size", type=int, default=None,
help="Min sample size to crop to for batching")
task.add_argument("--max_sample_size", type=int, default=None,
help="Max sample size to crop to for batching")
task.add_argument("--num_batch_buckets", type=int, default=0,
help="If >0, then bucket source and target lengths into "
"N buckets and pad accordingly; this is useful on "
"TPUs to minimize the number of compilations")
opt = parser.add_argument_group("optimization & optimizer")
opt.add_argument("--max_update", type=int, default=400000,
help="Force stop training at specified update")
opt.add_argument("--update_freq", type=int, nargs="+", default=[64],
help="Accumulate grads and update params every N batches")
opt.add_argument("--lr", type=float, nargs="+", default=[0.0005],
help="Max learning rate, must be more than cfg.min_lr")
opt.add_argument("--adam_betas", type=float, nargs="+", default=[0.9, 0.98],
help="Betas for Adam optimizer")
opt.add_argument("--adam_eps", type=float, default=1e-06,
help="Epsilon for Adam optimizer")
opt.add_argument("--weight_decay", type=float, default=0.01,
help="Weight decay")
opt.add_argument("--clip_norm", type=float, default=0.0,
help="Clip threshold of gradients")
sched = parser.add_argument_group("lr_scheduler")
sched.add_argument("--lr_policy", type=str, default="poly",
choices=["poly", "exp"], help="LR decay policy")
sched.add_argument("--warmup_updates", type=int, default=32000,
help="Warmup the learning rate linearly for the first "
"N updates")
sched.add_argument("--hold_updates", type=int, default=0,
help="The number of updates with const learning rate")
sched.add_argument("--initial_lr_scale", type=float, default=0.0,
help="Initial learning rate scale")
sched.add_argument("--final_lr_scale", type=float, default=0.0,
help="Final learning rate scale")
sched.add_argument("--lr_poly_power", type=float, default=1.0,
help="Poly lr policy policy power")
sched.add_argument("--lr_exp_decay", type=float, default=None,
help="Exp lr policy decay factor")
drop = parser.add_argument_group("dropout")
drop.add_argument("--dropout", type=float, default=0.1,
help="Dropout probability for the transformer")
drop.add_argument("--attention_dropout", type=float, default=0.0,
help="Dropout probability for attention weights")
drop.add_argument("--activation_dropout", type=float, default=0.0,
help="Dropout probability after activation in FFN")
drop.add_argument("--dropout_input", type=float, default=0.1,
help="Dropout to apply to the input (after feat extr)")
drop.add_argument("--dropout_features", type=float, default=0.1,
help="Dropout to apply to the features (after feat extr)")
mask = parser.add_argument_group("input masking")
mask.add_argument("--apply_mask", action="store_true",
help="Apply masking during fine-tuning")
mask.add_argument("--mask_length", type=int, default=10,
help="Repeat the mask indices multiple times")
mask.add_argument("--mask_prob", type=float, default=0.5,
help="Probability of replacing a token with mask "
"(normalized by length)")
mask.add_argument("--require_same_masks", type=bool, default=True,
help="Whether to number of masked timesteps must be the"
" same across all examples in a batch")
mask.add_argument("--mask_selection", default="static",
choices=["static", "uniform", "normal", "poisson"],
help="How to choose masks")
mask.add_argument("--mask_other", type=float, default=0,
help="Secondary mask argument (used for more complex "
"distributions), see help in compute_mask_indices")
mask.add_argument("--no_mask_overlap", type=bool, default=False,
help="Whether to allow masks to overlap")
mask.add_argument("--mask_min_space", type=int, default=1,
help="Min space between spans (if no overlap is enabled)")
mask.add_argument("--mask_channel_length", type=int, default=10,
help="Length of the mask for features (channels)")
mask.add_argument("--mask_channel_prob", type=float, default=0.0,
help="Probability of replacing a feature with 0")
mask.add_argument("--mask_channel_before", type=bool, default=False,
help="Apply channel-masking before frequency-masking")
mask.add_argument("--mask_channel_selection", default="static",
choices=["static", "uniform", "normal", "poisson"],
help="How to choose mask length for channel masking")
mask.add_argument("--mask_channel_other", type=float, default=0,
help="Secondary mask argument (used for more complex "
"distributions), see help in compute_mask_indicesh")
mask.add_argument("--no_mask_channel_overlap", type=bool, default=False,
help="Whether to allow channel masks to overlap")
mask.add_argument("--mask_channel_min_space", type=int, default=1,
help="Min space between spans (if no overlap is enabled)")
parser.add_argument("--feature_grad_mult", type=float, default=0.1,
help="Reset feature grad mult in wav2vec 2.0 to this")
# NOTE In Fairseq this is called `--layerdrop` in fine-tuning yamls
parser.add_argument("--encoder_layerdrop", type=float, default=0.05,
help="Probability of dropping a layer in wav2vec 2.0")
mask.add_argument("--mask_dropout", type=float, default=0.0,
help="Percent of masks to unmask for each sample")
def populate_finetuning(parser):
"""Args for fine-tuning, absent from pre-trained ckpts."""
ft = parser.add_argument_group("supervised fine-tuning")
ft.add_argument("--final_dropout", type=float, default=0.0,
help="Dropout after transformer and before final proj")
ft.add_argument("--w2v_path", type=str, default=None,
help="Path to wav2vec 2.0 model")
ft.add_argument("--blank_weight", type=float, default=0)
ft.add_argument("--blank_mode", type=str, default="add")
ft.add_argument("--labels", type=str, default="ltr",
help="Extension of the label file to load for fine-tuning")
ft.add_argument("--freeze_finetune_updates", type=int, default=0,
help="Don't finetune wav2vec for this many updates")
def populate_pretraining(parser):
"""During fine-tuning these parameters will be loaded from a ckpt."""
model = parser.add_argument_group("model")
model.add_argument("--extractor_mode", type=str, default="default",
help="Mode for feature extractor. default has a single "
"group norm with d groups in the first conv block,"
" whereas layer_norm has layer norms in every "
"block (meant to use with normalize=True)")
model.add_argument("--encoder_layers", type=int, default=12,
help="Num encoder layers in the transformer")
model.add_argument("--encoder_embed_dim", type=int, default=768,
help="Encoder embedding dimension")
model.add_argument("--encoder_ffn_embed_dim", type=int, default=3072,
help="Encoder embedding dimension for FFN")
model.add_argument("--encoder_attention_heads", type=int, default=12,
help="Num encoder attention heads")
model.add_argument("--activation_fn", type=str, default="gelu",
help="Activation function to use")
model.add_argument("--final_dim", type=int, default=256,
help="Project final representations and targets to this"
" many dimensions. set to encoder_embed_dim "
"is <= 0")
model.add_argument("--layer_norm_first", action="store_true",
help="Apply layernorm first in the transformer")
model.add_argument("--conv_feature_layers", type=str,
default="[(512,10,5)]+[(512,3,2)]*4+[(512,2,2)]+[(512,2,2)]",
help="String describing convolutional feature "
"extraction layers in form of a python list that "
"contains [(dim, kernel_size, stride), ...]")
model.add_argument("--conv_bias", action="store_true",
help="Include bias in conv encoder")
model.add_argument("--logit_temp", type=float, default=0.1,
help="Temperature to divide logits by")
model.add_argument("--quantize_targets", action="store_true",
help="Use quantized targets")
model.add_argument("--quantize_input", action="store_true",
help="Use quantized inputs")
model.add_argument("--target_glu", action="store_true",
help="Adds projection + glu to targets")
model.add_argument("--quantizer_depth", type=int, default=1,
help="Number of quantizer layers")
model.add_argument("--quantizer_factor", type=int, default=3,
help="Dimensionality increase for inner quantizer "
"layers (if depth > 1)")
model.add_argument("--latent_vars", type=int, default=320,
help="Number of latent variables V in each group of the"
" codebook")
model.add_argument("--latent_groups", type=int, default=2,
help="Number of groups G of latent variables in the "
"codebook")
model.add_argument("--latent_dim", type=int, default=0,
help="If > 0, uses this dimensionality for latent var"
"iables. otherwise uses final_dim / latent_groups")
model.add_argument("--num_negatives", type=int, default=100,
help="Num of sampled negatives")
model.add_argument("--negatives_from_everywhere", action="store_true",
help="Sample negatives from everywhere, not just masked"
" states")
model.add_argument("--cross_sample_negatives", type=int, default=0,
help="Num of cross sampled negatives")
model.add_argument("--codebook_negatives", type=int, default=0,
help="Number of negative examples codebook")
model.add_argument("--conv_pos", type=int, default=128,
help="Number of filters for convolutional positional "
"embeddings")
model.add_argument("--conv_pos_groups", type=int, default=16,
help="Number of groups for convolutional positional "
"embedding")
model.add_argument("--latent_temp", type=float, nargs="+",
default=[2.0, 0.5, 0.999995],
help="Legacy (to be removed)")
model.add_argument("--normalize", action="store_true",
help="If set, normalizes input to have 0 mean and unit "
"variance")
parser.add_argument("--log_keys", type=str, nargs="*",
default=["prob_perplexity", "code_perplexity", "temp"],
help="Additional output keys to log")
crit = parser.add_argument_group("criterion")
crit.add_argument("--infonce", action="store_true",
help="If set, uses cross entropy instead of binary cross"
" entropy (i.e. InfoNCE loss)")
crit.add_argument("--loss_weights", type=float, nargs="*",
default=[0.1, 10.0], help="Weights for the loss terms")
joc = parser.add_argument_group("joc experimental")
joc.add_argument("--use_spectrogram_features", action="store_true",
help="Train on input spectrograms")
joc.add_argument("--rotary_embeddings", action="store_true",
help="Use rotarty embeddings for Transformer layers")
joc.add_argument("--hourglass_transformer", type=str, default=None,
help="Specify the number of layers and shorteining, e.g.,"
" [n_pre,(n_hourglass, shorten_factor),n_post]")
joc.add_argument("--hourglass_resample", type=str, default="naive",
help="Method of up/downsampling in the hourglass model")
joc.add_argument("--spectrogram_feature_stacking", type=int, default=1)
joc.add_argument("--spectrogram_feature_subsampling", type=int, default=1)
joc.add_argument("--spectrogram_window_size", type=float, default=0.02)
joc.add_argument("--spectrogram_window_stride", type=float, default=0.01)
joc.add_argument("--spectrogram_n_filt", type=int, default=80)
return parser
def _populate_infer(parser):
# Fine-tuning only
infer = parser.add_argument_group("inference")
infer.add_argument("--steps", default=0, type=int,
help="Eval this many steps for every worker")
infer.add_argument("--warmup_steps", default=0, type=int,
help="Burn-in period before measuring latencies")
infer.add_argument("--labels_path", type=str, default=None,
help="Path to output labels file, e.g., dict.ltr.txt")
infer.add_argument("--save_predictions", type=str, default=None,
help="Save predictions in text form at this location")
infer.add_argument("--save_logits", default=None, type=str,
help="Save output logits under specified path")
infer.add_argument("--transcribe_wav", type=str,
help="Path to a single .wav file (16KHz)")
infer.add_argument("--transcribe_filelist", type=str,
help="Path to a filelist with one .wav path per line")
infer.add_argument("--torchscript", action="store_true",
help="Evaluate with a TorchScripted model")
infer.add_argument("--w2v_path_for_args", type=str, default=None,
help="Args to build model for inference (weights will "
"be loaded from --w2v_path)")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/wav2vec2/arg_parser.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from functools import reduce
from pathlib import Path
import torch
import wav2vec2.arg_parser
from common.fairseq.data import AddTargetDataset, FileAudioDataset
from common.utils import AttrDict, print_once
from wav2vec2.model import Wav2Vec2Model, Wav2VecEncoder, Wav2VecCtc
blank_symbol = "<s>" # for CTC
# Supervised CTC training
class LabelEncoder(object):
def __init__(self, dictionary):
self.dictionary = dictionary
def __call__(self, label):
return self.dictionary.encode_line(
label, append_eos=False, add_if_not_exist=False
)
# For frame-wise phoneme labels
class PhoneLabelEncoder:
def __call__(self, label):
return torch.IntTensor([int(id) for id in label.split()])
def load_dataset(split, args, target_dictionary=None, with_labels=False,
training=True):
dataset = FileAudioDataset(
manifest_path=Path(args.data, f'{split}.tsv'),
sample_rate=args.sample_rate,
min_sample_size=args.min_sample_size if training else None,
max_sample_size=args.max_sample_size if training else None,
pad=(hasattr(args, 'labels') or args.enable_padding),
normalize=args.normalize,
num_buckets=args.num_batch_buckets,
compute_mask_indices=False,
repeat_to_refsize=(args.num_concat_batches > 1),
)
if with_labels:
assert args.labels
assert hasattr(args, 'labels')
skip_inds = getattr(dataset, "skipped_indices", set())
with open(Path(args.data, f"{split}.{args.labels}")) as f:
labels = [line for i, line in enumerate(f) if i not in skip_inds]
assert len(labels) == len(dataset), (
f"labels length ({len(labels)}) and dataset length "
f"({len(dataset)}) do not match"
)
dataset = AddTargetDataset(
dataset,
labels,
pad=target_dictionary.pad(),
eos=target_dictionary.eos(),
batch_targets=True,
process_label=LabelEncoder(target_dictionary),
add_to_input=False
)
return dataset
def load_phone_classification_dataset(split, args):
assert not args.labels
manifest_path = os.path.join(args.data, "{}.tsv".format(split))
dataset = FileAudioDataset(
manifest_path=manifest_path,
sample_rate=args.sample_rate,
max_sample_size=args.max_sample_size,
min_sample_size=args.min_sample_size,
pad=args.labels is not None or args.enable_padding,
normalize=args.normalize,
num_buckets=args.num_batch_buckets,
compute_mask_indices=False,
)
return dataset
def _prune_infer_state_dict_prefix(state_dict,
prefix='w2v_encoder.w2v_model.'):
pref_len = len(prefix)
return {
(k[pref_len:] if k.startswith(prefix) else k): v
for k, v in state_dict.items()
}
def build_model(args, mode='pretrain', target_dictionary=None):
cfg = AttrDict(vars(args))
if mode == 'pretrain':
assert target_dictionary is None
model = Wav2Vec2Model(cfg)
elif mode == 'finetune':
state = torch.load(args.w2v_path, map_location='cpu')['model']
enc = Wav2VecEncoder(cfg, state, output_size=len(target_dictionary))
model = Wav2VecCtc(cfg, enc)
elif mode == 'infer':
enc = Wav2VecEncoder(cfg, None, output_size=len(target_dictionary))
model = Wav2VecCtc(cfg, enc)
else:
raise ValueError
sequence_generator = None
tokenizer = None
actualized_cfg = getattr(model, "cfg", None)
if actualized_cfg is not None and "w2v_args" in actualized_cfg:
cfg.w2v_args = actualized_cfg.w2v_args
return model, sequence_generator, tokenizer
def build_phone_classification_model(args):
model = Wav2VecEncoder(args)
actualized_cfg = getattr(model, "cfg", None)
if actualized_cfg is not None:
if "w2v_args" in actualized_cfg:
raise NotImplementedError
return model
def get_ckpt_args(ckpt):
"""Return a dictionary of args saved inside a ckpt.
Handles old and new Fairseq ckpts, Nvidia DLE ckpts.
"""
if "cfg" in ckpt:
import omegaconf
w2v_args = omegaconf.OmegaConf.to_container(ckpt["cfg"])
# Flatten nested dicts (hopefully matching keys have same values)
w2v_args = reduce(lambda a, b: {**(a or {}), **(b or {})},
w2v_args.values())
else: # Legacy checkpoints
w2v_args = ckpt["args"]
if type(w2v_args) is argparse.Namespace:
w2v_args = vars(w2v_args)
return w2v_args
def update_args_for_finetuning(args, w2v_path_for_args):
w2v_args = get_ckpt_args(torch.load(w2v_path_for_args, map_location="cpu"))
pretrain_parser = argparse.ArgumentParser()
wav2vec2.arg_parser.populate_pretraining(pretrain_parser)
my_args = vars(pretrain_parser.parse_args([]))
for arg in my_args:
if arg in w2v_args and my_args[arg] != w2v_args[arg]:
fname = Path(args.w2v_path).name
print_once(f'Setting from {fname}: {arg}={w2v_args[arg]}',
local_rank=args.local_rank)
setattr(args, arg, w2v_args[arg])
else:
setattr(args, arg, my_args[arg])
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/wav2vec2/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import editdistance
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from common.fairseq import utils
from common.fairseq.data.data_utils import post_process
from common.utils import AttrDict
class Wav2vecCriterion(_Loss):
def __init__(self, args):
super().__init__(args)
self.infonce = args.infonce
self.loss_weights = args.loss_weights
self.log_keys = [] if args.log_keys is None else args.log_keys
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"],
sub_batch_sizes=sample["sub_batch_sizes"],
sub_batch_lens=sample["sub_batch_lens"])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
weights = None
if hasattr(model, "get_target_weights") and not self.infonce:
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
reduction = "sum" if reduce else "none"
if self.infonce:
loss = F.cross_entropy(logits, target, reduction=reduction)
else:
loss = F.binary_cross_entropy_with_logits(
logits, target.float(), weights, reduction=reduction
)
if 'sample_size' in sample:
sample_size = sample['sample_size']
elif 'mask_indices' in sample['net_input']:
sample_size = sample['net_input']['mask_indices'].sum()
elif self.infonce:
sample_size = target.numel()
else:
sample_size = target.long().sum().item()
losses.append(loss.detach().clone())
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(self.loss_weights), \
f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, coef in zip(extra_losses, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
losses.append(p)
log_out = {
"loss": loss.item() if reduce else loss.detach(),
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
for lk in self.log_keys:
# Only store "logits" and "target" for computing MAP and MAUC
# during validation
if lk == "logits":
if not self.training:
log_out["logits"] = logits.cpu().numpy()
elif lk == "target":
if not self.training:
# If the targets have been mixed with the predictions of
# teacher models, find the original targets
if hasattr(model, "get_original_targets"):
original_target = model.get_original_targets(
sample, net_output)
else:
original_target = target
log_out["target"] = original_target.cpu().numpy()
elif lk in net_output:
log_out[lk] = float(net_output[lk])
if len(losses) > 1:
for i, l in enumerate(losses):
log_out[f"loss_{i}"] = l.item()
if self.infonce:
with torch.no_grad():
if logits.numel() == 0:
corr = 0
count = 0
else:
assert logits.dim() > 1, logits.shape
max_ = logits.argmax(-1) == 0
min_ = logits.argmin(-1) == 0
both = max_ & min_
corr = max_.long().sum().item() - both.long().sum().item()
count = float(max_.numel())
log_out["correct"] = corr
log_out["count"] = count
return loss, sample_size, log_out
class CTCCriterion(_Loss):
def __init__(self, target_dictionary, blank_idx=0, pad_idx=1, eos_idx=2,
zero_infinity=True, sentence_avg=True, post_process='letter'):
super().__init__()
# keep all indexes for compatibility with fairseq
self.blank_idx = blank_idx
self.pad_idx = target_dictionary.pad()
self.eos_idx = target_dictionary.eos()
assert self.blank_idx != self.pad_idx != self.eos_idx
self.target_dictionary = target_dictionary
self.zero_infinity = zero_infinity
self.sentence_avg = sentence_avg
self.post_process = post_process
# currently we don't support decoders (e.g., KenLM)
self.w2l_decoder = None
def forward(self, model, sample, reduce=True):
net_out = model(**sample["net_input"])
logp = model.get_normalized_probs(
net_out["encoder_out"], net_out["padding_mask"], log_probs=True
).contiguous()
T, B, _ = logp.size()
if net_out["padding_mask"] is not None:
lens = (~net_out["padding_mask"]).long().sum(-1)
else:
lens = logp.new_full((B,), T, dtype=torch.long)
tgt = sample["target"]
pad_mask = (tgt != self.pad_idx) & (tgt != self.eos_idx)
tgt_flat = tgt.masked_select(pad_mask)
tgt_lens = sample["target_lengths"]
with torch.backends.cudnn.flags(enabled=False):
loss = F.ctc_loss(logp, tgt_flat, lens, tgt_lens,
blank=self.blank_idx, reduction="sum",
zero_infinity=self.zero_infinity)
log_out = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": sample["id"].numel(),
"sample_size": B if self.sentence_avg else sample["ntokens"]
}
if not model.training:
log_out.update(self.calculate_wer(sample, logp, lens))
return loss, log_out['sample_size'], log_out
def calculate_wer(self, sample, logp, lens):
with torch.no_grad():
log = AttrDict({"wv_errs": 0, "w_errs": 0, "w_len": 0,
"c_errs": 0, "c_len": 0})
logp_t = logp.transpose(0, 1).float().contiguous().cpu()
tgt_labels = sample.get('target_label', sample['target'])
head = lambda l: None if l is None or len(l) < 1 else l[0]
for lp, L, tgt in zip(logp_t, lens, tgt_labels):
lp = lp[:L].unsqueeze(0)
if self.w2l_decoder is not None:
decoded = head(head(self.w2l_decoder.decode(lp)))
else:
decoded = None
mask = (tgt != self.pad_idx) & (tgt != self.eos_idx)
tgt_units = self.target_dictionary.string(tgt[mask])
tgt_units_arr = tgt[mask].tolist()
toks = lp.argmax(dim=-1).unique_consecutive()
pred_units_arr = toks[toks != self.blank_idx].tolist()
log.c_errs += editdistance.eval(pred_units_arr, tgt_units_arr)
log.c_len += len(tgt_units_arr)
tgt_words = post_process(tgt_units, self.post_process).split()
pred_units = self.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units,
self.post_process).split()
if decoded is not None and "words" in decoded:
pred_words = decoded["words"]
log.w_errs += editdistance.eval(pred_words, tgt_words)
log.wv_errs += editdistance.eval(pred_words_raw, tgt_words)
else:
dist = editdistance.eval(pred_words_raw, tgt_words)
log.w_errs += dist
log.wv_errs += dist
log.w_len += len(tgt_words)
return vars(log)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/wav2vec2/criterion.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import glob
import os
import re
from pathlib import Path
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
import dllogger
tb_loggers = {}
class TBLogger:
"""
xyz_dummies: stretch the screen with empty plots so the legend would
always fit for other plots
"""
def __init__(self, enabled, log_dir, name, interval=1, dummies=True):
self.enabled = enabled
self.interval = interval
self.cache = {}
if self.enabled:
self.summary_writer = SummaryWriter(
log_dir=os.path.join(log_dir, name),
flush_secs=120, max_queue=200)
atexit.register(self.summary_writer.close)
if dummies:
for key in ('_', '✕'):
self.summary_writer.add_scalar(key, 0.0, 1)
def log(self, step, data):
for k, v in data.items():
self.log_value(step, k, v.item() if type(v) is torch.Tensor else v)
def log_value(self, step, key, val, stat='mean'):
if self.enabled:
if key not in self.cache:
self.cache[key] = []
self.cache[key].append(val)
if len(self.cache[key]) == self.interval:
agg_val = getattr(np, stat)(self.cache[key])
self.summary_writer.add_scalar(key, agg_val, step)
del self.cache[key]
def log_grads(self, step, model):
if self.enabled:
norms = [p.grad.norm().item() for p in model.parameters()
if p.grad is not None]
for stat in ('max', 'min', 'mean'):
self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms),
stat=stat)
def unique_log_fpath(fpath):
"""Have a unique log filename for every separate run"""
log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1))
for f in glob.glob(f"{fpath}.*")])
return f"{fpath}.{log_num + 1}"
def stdout_step_format(step):
if isinstance(step, str):
return step
fields = []
if len(step) > 0:
fields.append("epoch {:>4}".format(step[0]))
if len(step) > 1:
fields.append("iter {:>3}".format(step[1]))
if len(step) > 2:
fields[-1] += "/{}".format(step[2])
return " | ".join(fields)
def stdout_metric_format(metric, metadata, value):
name = metadata.get("name", metric + " : ")
unit = metadata.get("unit", None)
fmt = f'{{{metadata.get("format", "")}}}'
fields = [name, fmt.format(value) if value is not None else value, unit]
fields = [f for f in fields if f is not None]
return "| " + " ".join(fields)
def log(when, metrics={}, scope='train', flush_log=False, tb_iter=None):
dllogger.log(when, data=metrics.get_metrics(scope, 'dll'))
if tb_iter is not None:
tb_loggers[scope].log(tb_iter, metrics.get_metrics(scope, 'tb'))
if flush_log:
flush()
def log_grads_tb(tb_total_steps, grads, tb_subset='train'):
tb_loggers[tb_subset].log_grads(tb_total_steps, grads)
def log_parameters(data, verbosity=0, tb_subset=None):
for k, v in data.items():
v = str(v) if isinstance(v, Path) else v
dllogger.log(step="PARAMETER", data={k: v}, verbosity=verbosity)
if tb_subset is not None and tb_loggers[tb_subset].enabled:
tb_data = {k: v for k, v in data.items()
if type(v) in (str, bool, int, float)}
tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {})
def flush():
dllogger.flush()
for tbl in tb_loggers.values():
if tbl.enabled:
tbl.summary_writer.flush()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/tb_dllogger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Fake fairseq.* modules allowing to torch.load fairseq checkpoints.'''
import sys
class Dummy:
pass
class FakeModule:
def __init__(self, classes=["AverageMeter", "TimeMeter", "StopwatchMeter"]):
[setattr(self, cls, Dummy) for cls in classes]
sys.modules["fairseq"] = Dummy()
sys.modules["fairseq.data"] = Dummy()
sys.modules["fairseq.data.dictionary"] = FakeModule(["Dictionary"])
sys.modules["fairseq.logging.meters"] = FakeModule()
sys.modules["fairseq.meters"] = FakeModule()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq_fake_modules.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import defaultdict
from copy import copy
import numpy as np
import torch
from common.utils import all_reduce_cpu_scalars, print_once
def __levenshtein(a, b):
"""Calculates the Levenshtein distance between two sequences."""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n + 1))
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def word_error_rate(hypotheses, references):
"""Computes average Word Error Rate (WER) between two text lists."""
scores = 0
words = 0
len_diff = len(references) - len(hypotheses)
if len_diff > 0:
raise ValueError("Uneqal number of hypthoses and references: "
"{0} and {1}".format(len(hypotheses), len(references)))
elif len_diff < 0:
hypotheses = hypotheses[:len_diff]
for h, r in zip(hypotheses, references):
h_list = h.split()
r_list = r.split()
words += len(r_list)
scores += __levenshtein(h_list, r_list)
if words != 0:
wer = 1.0*scores/words
else:
wer = float('inf')
return wer, scores, words
class MetricsAggregator:
def __init__(self, scopes=('train', 'train_avg'),
dllogger_keys=(),
benchmark_keys=(),
benchmark_epochs=0,
reduce_mean=(),
reduce_last=(),
group_tb_entries=False,
cuda=True):
"""
Args:
scopes: possible scopes of metrics accumulation
dll_keys: metrics to log with dllogger
benchmark_keys: metrics to log as benchmark metrics
benchmark_epochs: num of last epochs to benchmark
"""
super().__init__()
self.dll_keys = dllogger_keys
self.partials = defaultdict(float)
self.partial_counts = defaultdict(int)
self.accum_reductions = defaultdict(lambda: 'sum')
self.accum_reductions.update({k: 'mean' for k in reduce_mean})
self.accum_reductions.update({k: 'last' for k in reduce_last})
self.metrics = {scope: defaultdict(float) for scope in scopes}
self.metric_counts = {scope: defaultdict(int) for scope in scopes}
self.start_time = {scope: None for scope in scopes}
self.done_accumulating = {scope: True for scope in scopes}
self.benchmark_epochs = benchmark_epochs
self.metrics['train_benchmark'] = defaultdict(list)
self.benchmark_keys = benchmark_keys
self.scopes = scopes
self.group_tb_entries = group_tb_entries
self.cuda = cuda
def log_scalar(self, key, val, accum_reduction=None):
"""Main primitive for logging partial metrics from single batch.
NOTE: Assumption: `log_scalar` cannot be called with different
`accum_reduction` for the same `key`. This results in undefined behavior
Args:
key: metric key
val: metric value
accum_reduction: defines how to accumulate given metric:
- 'sum': sums metrics across grad acc and devices batches
- 'mean': same as 'sum' but with averaging
- 'last': overwrites previous accumulated values. Useful for
logging metric once in a grad acc batch, e.g. learning rate.
If None, a default value is fetched from self.accum_reductions.
If not None, overwrites defaults in self.accum_reductions
"""
if accum_reduction is None:
accum_reduction = self.accum_reductions[key]
else:
self.accum_reductions[key] = accum_reduction
if accum_reduction == 'sum':
self.partials[key] += val
self.partial_counts[key] = 1
elif accum_reduction == 'mean':
self.partials[key] += val
self.partial_counts[key] += 1
elif accum_reduction == 'last':
self.partials[key] = val # overwrite accumulation
self.partial_counts[key] = 1
else:
raise ValueError(accum_reduction)
def log_scalars(self, scalars_dict, accum_reduction=None):
""" Log whole dict of metrics at once """
for k, v in scalars_dict.items():
self.log_scalar(k, v, accum_reduction)
def __setitem__(self, key, val):
""" Convenience logging method. Use sparingly (see NOTE below).
Uses 'last' aggregation and extracts tensors.
Example:
>>> metrics['lr'] = optim.param_groups[0]['lr']
NOTE: `metrics['lr'] = ...` is very different
from `metrics.partial['lr'] = ...`
"""
extract = lambda t: t.item() if type(t) is torch.Tensor else t
if type(val) is dict:
for k, v in val.items():
self.log_scalar(k, extract(v), 'last')
else:
self.log_scalar(key, extract(val), 'last')
def accumulate(self, scopes=None):
""" Accumulates partial metrics in metrics for given scopes.
Defines boundaries of accum_reduction in `log_scalar` method.
Intended to run after each gradient accumulation adjusted iteration.
"""
scopes = scopes if scopes is not None else self.scopes
for scope in scopes:
for k, v in self.partials.items():
self.metrics[scope][k] += v
self.metric_counts[scope][k] += self.partial_counts.get(k, 1)
self.partials.clear()
self.partial_counts.clear()
def all_reduce(self, world_size):
""" Reduce metrics across devices.
Currently assumes that all metrics are float scalars.
After reducing, `log_scalar` method with accumulation other than 'last'
shouldn't be called prior to calling `accumulate`.
"""
if world_size == 1:
return
self.partials = defaultdict(float,
all_reduce_cpu_scalars(self.partials))
for k, v in self.partials.items():
if self.accum_reductions[k] in ('mean', 'last'):
self.partial_counts[k] *= (world_size - self.partials.get('ignore', 0))
if self.partials.get('ignore', 0) > 0:
assert self.accum_reductions[k] == 'mean'
print_once(f'reducing with world size {world_size - self.partials.get("ignore", 0)}')
def start_iter(self, iter):
self._start_accumulating(iter, True, 'train')
def start_epoch(self, epoch):
if self.cuda:
torch.cuda.synchronize()
self._start_accumulating(epoch, True, 'train_avg')
def start_val(self):
if self.cuda:
torch.cuda.synchronize()
self._start_accumulating(None, True, 'val')
def finish_iter(self):
self._accumulate_time('train')
def finish_logging_interval(self):
self._finish_accumulating('train')
def finish_epoch(self):
if self.cuda:
torch.cuda.synchronize()
self._accumulate_time('train_avg')
self._finish_accumulating('train_avg')
metr = self.metrics['train_benchmark']
for k in self.benchmark_keys:
metr[k].append(self.metrics['train_avg'][k])
if len(metr[k]) > self.benchmark_epochs:
metr[k].pop(0)
def finish_val(self, scope='val'):
if self.cuda:
torch.cuda.synchronize()
self._accumulate_time(scope)
self._finish_accumulating(scope)
def get_metrics(self, scope='train', target='dll'):
if scope == 'train_benchmark':
metr = self.metrics[scope]
ret = {'train_avg_' + k: np.mean(v) for k, v in metr.items()}
ret['benchmark_epochs_num'] = len(list(metr.values())[0])
return ret
assert self.done_accumulating[scope]
ret = copy(self.metrics[scope])
if target == 'dll':
ret = {f'{scope}_{k}': v
for k, v in ret.items() if k in self.dll_keys}
elif target == 'tb' and self.group_tb_entries:
# Rename keys so they would group nicely inside TensorBoard
def split_key(k):
pos = k.rfind('_')
return k[:pos] + '/' + k[pos+1:] if pos >= 0 else k
ret = {split_key(k): v for k, v in ret.items()}
return ret
def _start_accumulating(self, step, start_timer=True, scope='train'):
del step # unused
assert not self.partials, 'metrics.accumulate call missed'
assert not self.partial_counts, 'metrics.accumulate call missed'
if self.done_accumulating[scope]:
self.metrics[scope].clear()
self.metric_counts[scope].clear()
if start_timer:
self.start_time[scope] = time.time()
self.done_accumulating[scope] = False
def _finish_accumulating(self, scope='train'):
assert not self.done_accumulating[scope]
metr = self.metrics[scope]
counts = self.metric_counts[scope]
for k, v in metr.items():
metr[k] = v / counts[k]
self.done_accumulating[scope] = True
def _accumulate_time(self, scope='train'):
assert not self.done_accumulating[scope]
took = time.time() - self.start_time[scope]
self.start_time[scope] = None
self.metrics[scope]['took'] += took
self.metric_counts[scope]['took'] = 1 # not +=
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/metrics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
# NGC 22.04-py3 container (PyTorch 1.12.0a0+bd13bc6)
warnings.filterwarnings(
"ignore",
message='positional arguments and argument "destination" are deprecated.'
' nn.Module.state_dict will not accept them in the future.')
# NGC ~22.05-py3
warnings.filterwarnings(
"ignore", message="pyprof will be removed by the end of June, 2022")
# 22.08-py3 RC
warnings.filterwarnings(
"ignore",
message="is_namedtuple is deprecated, please use the python checks")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/filter_warnings.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from common.fairseq.modules.multihead_attention import RotaryEmbedding
def mha_state_dict_to_fairseq(sd):
"""Concatenate q, k, v matrices and load as usual."""
new_sd = {}
qkv = defaultdict(dict)
for key, val in sd.items():
fields = key.split('.')
if len(fields) < 2:
continue
prefix = '.'.join(fields[:-2] + [""])
module, param = fields[-2:]
if module in ['q_proj', 'k_proj', 'v_proj']:
qkv[prefix][module + '.' + param] = val
else:
new_sd[key] = val
for prefix, param_dict in qkv.items():
# Stitch qkv params together
assert len(param_dict) == 6
new_sd[f"{prefix}qkv.weight"] = torch.cat(
[param_dict[f"{k}_proj.weight"] for k in ["q", "k", "v"]], dim=0)
new_sd[f"{prefix}qkv.bias"] = torch.cat(
[param_dict[f"{k}_proj.bias"] for k in ["q", "k", "v"]], dim=0)
return new_sd
class PytMultiheadAttention(nn.Module):
"""Drop-in replacement for Fairseq MHA.
Calls torch.nn.functional with combined qkv.
"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
self_attention=True,
rotary_embeddings=False,
):
super().__init__()
assert self_attention
assert not rotary_embeddings, "Not yet supported"
self.embed_dim = embed_dim
self.num_heads = num_heads
self.rotary_embeddings = rotary_embeddings
if self.rotary_embeddings:
self.rotary_freq = RotaryEmbedding(embed_dim)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.qkv = nn.Linear(embed_dim, 3 * num_heads * self.head_dim,
bias=bias)
self.dropatt = nn.Dropout(dropout)
self.out_proj = nn.Linear(num_heads * self.head_dim, embed_dim,
bias=bias)
self.reset_parameters()
def hook(state_dict, prefix, *args, **kwargs):
this_keys = {k for k in state_dict.keys() if k.startswith(prefix)}
new_sd = {k: v for k, v in state_dict.items() if k in this_keys}
for k in this_keys:
del state_dict[k]
state_dict.update(mha_state_dict_to_fairseq(new_sd))
self._register_load_state_dict_pre_hook(hook)
def forward(self, query, key=None, value=None, key_padding_mask=None,
attn_mask=None):
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.qkv.weight,
self.qkv.bias,
None,
None,
False,
self.dropatt.p,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=False,
attn_mask=attn_mask,
average_attn_weights=False,
)
def state_dict(self, *args, destination=None, prefix='', keep_vars=False):
"""Split q, k, v matrices for bwd compatibility with Fairseq."""
sd = super().state_dict(*args, destination, prefix, keep_vars)
for key in list(sd.keys()):
if not (key.endswith(".qkv.weight") or key.endswith(".qkv.bias")):
continue
*pref, qkv, param = key.split(".")
pref = ".".join(pref)
assert qkv == "qkv"
q, k, v = torch.chunk(sd.pop(key), 3, dim=0)
sd[f"{pref}.q_proj.{param}"] = q
sd[f"{pref}.k_proj.{param}"] = k
sd[f"{pref}.v_proj.{param}"] = v
return sd
def reset_parameters(self):
# Init as in Fairseq with qkv_same_dim=True and separate qkv projs
t = self.qkv.weight.size(0) // 3
nn.init.xavier_uniform_(self.qkv.weight[0*t:1*t], gain=1 / (2 ** 0.5))
nn.init.xavier_uniform_(self.qkv.weight[1*t:2*t], gain=1 / (2 ** 0.5))
nn.init.xavier_uniform_(self.qkv.weight[2*t:3*t], gain=1 / (2 ** 0.5))
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
class Fp32Softmax(nn.Softmax):
def forward(self, x):
return F.softmax(x.float(), dim=self.dim).type_as(x)
class SlowMultiHeadAttention(nn.Module):
"""Drop-in replacement for Fairseq MHA."""
def __init__(self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
self_attention=True,
rotary_embeddings=None,
fp32_softmax=False,
):
super().__init__()
n_head = num_heads
d_model = embed_dim
d_head = embed_dim // n_head
dropatt = dropout
pre_lnorm = False
assert self_attention
assert rotary_embeddings is None, "Rotary embs not yet supported"
self.embed_dim = embed_dim
self.num_heads = num_heads
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
self.qkv = nn.Linear(d_model, 3 * n_head * d_head, bias=bias)
self.dropatt = nn.Dropout(dropatt)
self.proj = nn.Linear(n_head * d_head, d_model, bias=bias)
self.layer_norm = nn.LayerNorm(d_model, elementwise_affine=False)
self.softmax = Fp32Softmax(dim=2) if fp32_softmax else nn.Softmax(dim=2)
def state_dict(self):
"""Convert QKV to be compatible with Fairseq"""
sd = super().state_dict()
ret = {}
for key, val in sd.items():
fields = key.split('.')
if len(fields) < 2:
continue
prefix = '.'.join(fields[:-2] + [""])
module, param = fields[-2:]
if module == 'qkv':
q, k, v = torch.chunk(val, 3, dim=0)
ret[f"{prefix}q_proj.{param}"] = q
ret[f"{prefix}k_proj.{param}"] = k
ret[f"{prefix}v_proj.{param}"] = v
else:
ret[key] = val
return ret
def load_state_dict(self, sd):
from collections import defaultdict
ret = {}
qkv = defaultdict(dict)
for key, val in sd.items():
fields = key.split('.')
if len(fields) < 2:
continue
prefix = '.'.join(fields[:-2] + [""])
module, param = fields[-2:]
if module in ['q_proj', 'k_proj', 'v_proj']:
qkv[prefix][module + '.' + param] = val
else:
ret[key] = val
for prefix, param_dict in qkv.items():
# Stitch qkv params together
assert len(param_dict) == 6
ret[f"{prefix}qkv.weight"] = torch.cat(
[param_dict[f"{k}_proj.weight"] for k in ["q", "k", "v"]],
dim=0)
ret[f"{prefix}qkv.bias"] = torch.cat(
[param_dict[f"{k}_proj.bias"] for k in ["q", "k", "v"]],
dim=0)
super().load_state_dict(ret)
def forward(self, inp, attn_mask=None):
inp = inp.permute(1, 0, 2) # (T, B, H) -> (B, T, H)
if self.pre_lnorm:
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1).to(attn_score.dtype)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask.to(torch.bool), -float('inf'))
attn_prob = self.softmax(attn_score)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(
inp.size(0), inp.size(1), n_head * d_head)
output = self.proj(attn_vec)
return output.permute(1, 0, 2) # (B, T, H) -> (T, B, H)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/pyt_mha.py |
DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
import librosa
import torch
import torch.nn as nn
class BaseFeatures(nn.Module):
"""Base class for GPU accelerated audio preprocessing."""
__constants__ = ["pad_align", "pad_to_max_duration", "max_len"]
def __init__(self, pad_align, pad_to_max_duration, max_duration,
sample_rate, window_size, window_stride, spec_augment=None,
cutout_augment=None):
super(BaseFeatures, self).__init__()
self.pad_align = pad_align
self.pad_to_max_duration = pad_to_max_duration
self.win_length = int(sample_rate * window_size) # frame size
self.hop_length = int(sample_rate * window_stride)
# Calculate maximum sequence length (# frames)
if pad_to_max_duration:
self.max_len = 1 + math.ceil(
(max_duration * sample_rate - self.win_length) / self.hop_length
)
if spec_augment is not None:
self.spec_augment = SpecAugment(**spec_augment)
else:
self.spec_augment = None
if cutout_augment is not None:
self.cutout_augment = CutoutAugment(**cutout_augment)
else:
self.cutout_augment = None
@torch.no_grad()
def calculate_features(self, audio, audio_lens):
return audio, audio_lens
def __call__(self, audio, audio_lens):
dtype = audio.dtype
audio = audio.float()
feat, feat_lens = self.calculate_features(audio, audio_lens)
feat = self.apply_padding(feat)
if self.cutout_augment is not None:
feat = self.cutout_augment(feat)
if self.spec_augment is not None:
feat = self.spec_augment(feat)
feat = feat.to(dtype)
return feat, feat_lens
def apply_padding(self, x):
if self.pad_to_max_duration:
x_size = max(x.size(-1), self.max_len)
else:
x_size = x.size(-1)
if self.pad_align > 0:
pad_amt = x_size % self.pad_align
else:
pad_amt = 0
padded_len = x_size + (self.pad_align - pad_amt if pad_amt > 0 else 0)
return nn.functional.pad(x, (0, padded_len - x.size(-1)))
class SpecAugment(nn.Module):
"""Spec augment. refer to https://arxiv.org/abs/1904.08779
"""
def __init__(self, freq_masks=0, min_freq=0, max_freq=10, time_masks=0,
min_time=0, max_time=10):
super(SpecAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.freq_masks = freq_masks
self.min_freq = min_freq
self.max_freq = max_freq
self.time_masks = time_masks
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for _ in range(self.freq_masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
f0 = torch.randint(0, max(1, sh[1] - w), size=(1,))
mask[idx, f0:f0+w] = 1
for _ in range(self.time_masks):
w = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
t0 = torch.randint(0, max(1, sh[2] - w), size=(1,))
mask[idx, :, t0:t0+w] = 1
return x.masked_fill(mask, 0)
class CutoutAugment(nn.Module):
"""Cutout. refer to https://arxiv.org/pdf/1708.04552.pdf
"""
def __init__(self, masks=0, min_freq=20, max_freq=20, min_time=5, max_time=5):
super(CutoutAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.masks = masks
self.min_freq = min_freq
self.max_freq = max_freq
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for i in range(self.masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
h = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
f0 = int(random.uniform(0, sh[1] - w))
t0 = int(random.uniform(0, sh[2] - h))
mask[idx, f0:f0+w, t0:t0+h] = 1
return x.masked_fill(mask, 0)
@torch.jit.script
def normalize_batch(x, seq_len, normalize_type: str):
if normalize_type == "per_feature":
x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, :int(seq_len[i])].mean()
x_std[i] = x[i, :, :int(seq_len[i])].std()
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)
else:
return x
@torch.jit.script
def stack_subsample_frames(x, x_lens, stacking: int = 1, subsampling: int = 1):
""" Stacks frames together across feature dim, and then subsamples
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim * stacking, num_frames / subsampling
"""
seq = [x]
for n in range(1, stacking):
tmp = torch.zeros_like(x)
tmp[:, :, :-n] = x[:, :, n:]
seq.append(tmp)
x = torch.cat(seq, dim=1)[:, :, ::subsampling]
if subsampling > 1:
x_lens = torch.ceil(x_lens.float() / subsampling).int()
if x.size(2) > x_lens.max().item():
assert abs(x.size(2) - x_lens.max().item()) <= 1
x = x[:,:,:x_lens.max().item()]
return x, x_lens
class FilterbankFeatures(BaseFeatures):
# For JIT, https://pytorch.org/docs/stable/jit.html#python-defined-constants
__constants__ = ["dither", "preemph", "n_fft", "hop_length", "win_length",
"log", "frame_stacking", "frame_subsampling", "normalize"]
# torchscript: "center" removed due to a bug
def __init__(self, spec_augment=None, cutout_augment=None,
sample_rate=16000, window_size=0.02, window_stride=0.01,
window="hann", normalize="per_feature", n_fft=512,
preemph=0.97, n_filt=80, lowfreq=0, highfreq=None, log=True,
dither=1e-5, pad_align=16, pad_to_max_duration=False,
max_duration=float('inf'), frame_stacking=1,
frame_subsampling=1):
super(FilterbankFeatures, self).__init__(
pad_align=pad_align, pad_to_max_duration=pad_to_max_duration,
max_duration=max_duration, sample_rate=sample_rate,
window_size=window_size, window_stride=window_stride,
spec_augment=spec_augment, cutout_augment=cutout_augment)
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.normalize = normalize
self.log = log
#TORCHSCRIPT: Check whether or not we need this
self.dither = dither
self.frame_stacking = frame_stacking
self.frame_subsampling = frame_subsampling
self.n_filt = n_filt
self.preemph = preemph
highfreq = highfreq or sample_rate / 2
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(self.win_length,
periodic=False) if window_fn else None
filterbanks = torch.tensor(
librosa.filters.mel(sample_rate, self.n_fft, n_mels=n_filt,
fmin=lowfreq, fmax=highfreq),
dtype=torch.float).unsqueeze(0)
# torchscript
self.register_buffer("fb", filterbanks)
self.register_buffer("window", window_tensor)
def output_dim(self):
return self.n_filt * self.frame_stacking
def get_seq_len(self, seq_len):
return torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(
dtype=torch.int)
# TORCHSCRIPT: center removed due to bug
def stft(self, x):
spec = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,
win_length=self.win_length,
window=self.window.to(dtype=torch.float),
return_complex=True)
return torch.view_as_real(spec)
@torch.no_grad()
def calculate_features(self, x, x_lens):
dtype = x.dtype
x_lens = self.get_seq_len(x_lens)
# dither
if self.dither > 0:
x += self.dither * torch.randn_like(x)
# do preemphasis
if self.preemph is not None:
x = torch.cat(
x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1],
dim=1)
x = self.stft(x)
# get power spectrum
x = x.pow(2).sum(-1)
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
x = torch.log(x + 1e-20)
# normalize if required
x = normalize_batch(x, x_lens, normalize_type=self.normalize)
if self.frame_stacking > 1 or self.frame_subsampling > 1:
x, x_lens = stack_subsample_frames(x, x_lens, self.frame_stacking,
self.frame_subsampling)
# mask to zero any values beyond x_lens in batch,
# pad to multiple of `pad_align` (for efficiency)
max_len = x.size(-1)
mask = torch.arange(max_len, dtype=x_lens.dtype, device=x.device)
mask = mask.expand(x.size(0), max_len) >= x_lens.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1), 0)
# TORCHSCRIPT: Is this del important? It breaks scripting
# del mask
return x.to(dtype), x_lens
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/features.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from common.fairseq.optim.adam import FairseqAdam
from common.fairseq.optim.fp16_optimizer import FP16Optimizer
from common.fairseq.optim.fused_adam import get_fused_adam_class
from common.utils import print_once
def lr_poly_policy(step, optimizer, lr, initial_lr_scale=0.0,
final_lr_scale=0.0, warmup_steps=1000, hold_steps=0,
num_steps=None, power=1.0):
"""Polynomial decay LR policy with an optional hold period."""
assert step >= 1
assert num_steps is not None
assert power is not None
start_lr = initial_lr_scale * lr
end_lr = final_lr_scale * lr
if step <= warmup_steps:
new_lr = start_lr + (step) / warmup_steps * (lr - start_lr)
elif step <= warmup_steps + hold_steps:
new_lr = lr
elif warmup_steps + hold_steps < step <= num_steps:
remain = 1 - (step - warmup_steps) / (num_steps - warmup_steps)
new_lr = (lr - end_lr) * remain ** power + end_lr
else:
new_lr = end_lr
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def lr_exp_policy(step, optimizer, initial_lr_scale, lr, final_lr_scale=0.0,
warmup_steps=1000, hold_steps=0, num_steps=float('inf'),
decay=None):
"""Exponential LR policy with an optional hold period.
If `decay` factor is not supplied, it is calculated to reach `end_lr`
on `num_steps` steps.
Args:
num_steps (int): Limits the number of decay steps.
end_lr (float): The lowest possible LR.
decay (float or None): Decay factor; if None, the it will be derived
from `num_steps` and `end_lr`.
"""
assert step >= 1
start_lr = initial_lr_scale * lr
end_lr = final_lr_scale * lr
if decay is None:
assert not math.isinf(num_steps) and end_lr > 0.0
decay_steps = num_steps - warmup_steps - hold_steps
decay = math.log(end_lr / lr) / decay_steps
else:
decay = math.log(decay)
if step <= warmup_steps:
new_lr = start_lr + (step) / warmup_steps * (lr - start_lr)
elif step <= warmup_steps + hold_steps:
new_lr = lr
else:
a = math.exp(decay * (min(step, num_steps) - warmup_steps - hold_steps))
new_lr = max(a * lr, end_lr)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def get_optimizer(model, args):
kw = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'adam' and (args.fp16 or args.bf16):
print_once('WARNING: Using Fairseq FP16Optimizer')
# based on fairseq.optim.FP16Optimizer.build_optimizer
flatten = True # not args.fp16_no_flatten_grads
args.betas = args.adam_betas
args.eps = args.adam_eps
params = list(filter(lambda p: p.requires_grad, model.parameters()))
fp32_params = FP16Optimizer.build_fp32_params(args, params,
flatten=flatten)
# based on fairseq.optim.build_optimizer
def build_optimizer(cfg, params, *extra_args, **extra_kwargs):
if all(isinstance(p, dict) for p in params):
params = [t for p in params for t in p.values()]
params = list(filter(lambda p: p.requires_grad, params))
return FairseqAdam(cfg, params, *extra_args, **extra_kwargs)
if flatten:
fp32_optimizer = build_optimizer(args, [fp32_params])
else:
fp32_optimizer = build_optimizer(args, fp32_params)
if flatten and not fp32_optimizer.supports_flat_params:
raise RuntimeError(
f"chosen optimizer {fp32_optimizer.__class__.__name__} does "
"not support flat params, please set --fp16-no-flatten-grads"
)
kwargs = {}
optimizer = FP16Optimizer(args, params, fp32_optimizer, fp32_params,
**kwargs)
elif args.optimizer == 'adam' and not (args.fp16 or args.bf16):
print_once('WARNING: Using FusedAdam instead of Adam')
kw.update({'betas': args.adam_betas, 'eps': args.adam_eps})
fused_adam_cls = get_fused_adam_class()
optimizer = fused_adam_cls(model.parameters(), **kw)
else:
raise ValueError(f'Invalid optimizer "{args.optimizer}"')
return optimizer
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/optimizers.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from torch.utils.data import DataLoader
from common.fairseq.data import data_utils
from common.helpers import print_once
from common.sampler import DistributedIndicesSampler
def adjust_max_tokens(train_dataset, world_size, args):
def get_steps_per_epoch(world_size, max_tokens, update_freq):
train_loader, sampler = get_batch_iterator(
train_dataset,
True,
max_tokens=max_tokens,
max_sentences=args.batch_size,
max_positions=(max_tokens, max_tokens),
ignore_invalid_inputs=True,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=world_size,
shard_id=0,
num_workers=args.num_workers)
steps_per_epoch = len(train_loader) // update_freq
return steps_per_epoch
steps_ref = get_steps_per_epoch(args.ref_world_size, args.ref_max_tokens, 1)
min_ = args.ref_max_tokens // 20
max_ = args.ref_max_tokens * 20
prev_max_tokens = 0
align_to = 1000
while min_ < max_:
max_tokens = (max_ + min_) // 2 // align_to * align_to # try to round
if max_tokens == prev_max_tokens:
break
prev_max_tokens = max_tokens
steps = get_steps_per_epoch(world_size, max_tokens, args.update_freq)
print_once(f"max_tokens={max_tokens} yields {steps} steps "
f"(adjusting for {steps_ref}).")
if steps == steps_ref:
break
elif steps > steps_ref:
min_ = max_tokens
else:
max_ = max_tokens
args.max_tokens = max_tokens
args.max_tokens_valid = max_tokens
def filter_indices_by_size(
indices, dataset, max_positions=None, ignore_invalid_inputs=False
):
"""
Filter examples that are too large
Args:
indices (np.array): original array of sample indices
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
Returns:
np.array: array of filtered sample indices
"""
indices, ignored = dataset.filter_indices_by_size(indices, max_positions)
# TODO: consider removing this function. If `len(ignored) > 0`,
# an error is raised in fairseq dataset code, both in sup and unsup case
if len(ignored) > 0:
if not ignore_invalid_inputs:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
print(
(
"WARNING: {:,} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def get_batch_iterator(
dataset,
training,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
num_concat_batches=1,
):
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs)
# create mini-batches with given size constraints
batch_inds, non_grouped_batch_inds = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
num_concat_batches=num_concat_batches,
)
batch_ids = copy.deepcopy(non_grouped_batch_inds)
[bi.fill(i) for i, bi in enumerate(batch_ids)]
inds_ids = zip(np.concatenate(batch_inds), np.concatenate(batch_ids))
dataset.batch_ids = {idx: batch_idx for idx, batch_idx in inds_ids}
# Batches are already specified, now we just need to shuffle them
batch_ind_sampler = DistributedIndicesSampler(batch_inds, shuffle=training,
num_replicas=num_shards,
rank=shard_id, seed=seed,
drop_last=training,
fillvalue=[])
loader = DataLoader(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_ind_sampler,
num_workers=num_workers,
pin_memory=True,
persistent_workers=num_workers > 0,
)
return loader, batch_ind_sampler
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.distributed as dist
def print_once(*msg, local_rank=0):
"""Single stdout print with multiple processes."""
if dist.is_initialized():
if dist.get_rank() == 0:
print(*msg)
elif int(os.environ.get('WORLD_SIZE', 1)) == 1:
print(*msg)
elif int(os.environ.get('RANK', 0)) == 0 and local_rank == 0:
print(*msg)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def set_torch_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def reduce_tensor(tensor, world_size, mean=True):
if world_size == 1:
return tensor
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
if mean:
rt = rt.true_divide(world_size)
return rt
def all_reduce_cpu_scalars(data, device=torch.device('cuda')):
data_keys = list(data.keys())
data_vals = list(data.values())
tensor_vals = torch.tensor(data_vals, dtype=torch.double, device=device)
dist.all_reduce(tensor_vals, op=dist.ReduceOp.SUM)
data_vals = tensor_vals.cpu().numpy()
return dict(zip(data_keys, data_vals))
def setup_distributed(local_rank):
multi_gpu = int(os.environ.get('WORLD_SIZE', 1)) > 1
if multi_gpu:
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
world_size = dist.get_world_size()
print_once(f'Distributed training with {world_size} GPUs\n')
else:
world_size = 1
return world_size
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/utils.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TypeVar, List
import torch
import numpy as np
from torch.utils.data import (RandomSampler, Sampler,
DistributedSampler as TorchDistributedSampler)
from common.fairseq.data import data_utils
T = TypeVar('T')
class DistributedSampler(Sampler):
def __init__(self, dataset, batch_size, world_size, rank):
"""
Constructor for the DistributedSampler.
:param dataset: dataset
:param batch_size: local batch size
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
self.dataset = dataset
self.world_size = world_size
self.rank = rank
self.epoch = 0
self.batch_size = batch_size
self.global_batch_size = batch_size * world_size
self.data_len = len(self.dataset)
self.num_samples = self.data_len // self.global_batch_size \
* self.global_batch_size
def distribute_batches(self, indices):
"""
Assigns batches to workers.
Consecutive ranks are getting consecutive batches.
:param indices: torch.tensor with batch indices
"""
assert len(indices) == self.num_samples
indices = indices.view(-1, self.batch_size)
indices = indices[self.rank::self.world_size].contiguous()
indices = indices.view(-1)
indices = indices.tolist()
assert len(indices) == self.num_samples // self.world_size
return indices
def reshuffle_batches(self, indices, rng):
"""
Permutes global batches
:param indices: torch.tensor with batch indices
:param rng: instance of torch.Generator
"""
indices = indices.view(-1, self.global_batch_size)
num_batches = indices.shape[0]
order = torch.randperm(num_batches, generator=rng)
indices = indices[order, :]
indices = indices.view(-1)
return indices
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
# generate permutation
indices = torch.randperm(self.data_len, generator=g)
# make indices evenly divisible by (batch_size * world_size)
indices = indices[:self.num_samples]
# assign batches to workers
indices = self.distribute_batches(indices)
return iter(indices)
def set_epoch(self, epoch):
"""
Sets current epoch index.
Epoch index is used to seed RNG in __iter__() function.
:param epoch: index of current epoch
"""
self.epoch = epoch
def __len__(self):
return self.num_samples // self.world_size
class BucketingSampler(DistributedSampler):
def __init__(self, dataset, batch_size, num_buckets, world_size, rank):
"""
Bucketing sampler with approx. equally-sized buckets.
:param dataset: dataset
:param batch_size: local batch size
:param seeds: list of seeds, one seed for each training epoch
:param num_buckets: number of buckets
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
super().__init__(dataset, batch_size, world_size, rank)
self.num_buckets = num_buckets
len_ids = np.argsort([sample['duration']
for sample in dataset.samples])
self.buckets = [torch.from_numpy(t)
for t in np.array_split(len_ids, num_buckets)]
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
global_bsz = self.global_batch_size
indices = []
for bid in range(self.num_buckets):
# random shuffle within current bucket
perm = torch.randperm(len(self.buckets[bid]), generator=g)
bucket_indices = self.buckets[bid][perm]
# add samples from current bucket to indices for current epoch
indices.append(bucket_indices)
indices = torch.cat(indices)
# make indices evenly divisible by global batch size
length = len(indices) // global_bsz * global_bsz
indices = indices[:length]
assert len(indices) % self.global_batch_size == 0
# perform global reshuffle of all global batches
indices = self.reshuffle_batches(indices, g)
# distribute batches to individual workers
indices = self.distribute_batches(indices)
return iter(indices)
class DistributedIndicesSampler(TorchDistributedSampler):
""" DistributedSampler operating on indices.
Differences wrt. DistributedSampler:
1) use Numpy RNG instead of PyTorch RNG
2) treat `self.dataset` as indices - DistributedSampler assumes indices
are determined with `range(len(self.dataset))`
3) if `drop_last` is False, pad indices with `fillvalue`
or don't pad at all if `fillvalue` is None (useful for validation)
"""
def __init__(self, *args, fillvalue=None, **kwargs):
super().__init__(*args, **kwargs)
self.fillvalue = fillvalue
if not self.drop_last and self.fillvalue is None:
self.total_size = len(self.dataset)
# possibly different num_samples for each device,
# this will work with DDP only for validation
self.num_samples = len(range(self.rank, self.total_size,
self.num_replicas))
def __iter__(self):
indices = list(self.dataset)
if self.shuffle:
# deterministically shuffle based on epoch and seed
with data_utils.numpy_seed(self.seed + self.epoch):
np.random.shuffle(indices)
if not self.drop_last:
if self.fillvalue is not None:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
indices += [self.fillvalue] * padding_size
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
class RandomSeedableSampler(RandomSampler):
def __init__(self, *args, generator=None, seed=0, **kwargs):
if generator is None:
generator = torch.Generator()
if seed is not None:
generator.manual_seed(seed)
super().__init__(*args, generator=generator, **kwargs)
self.epoch = 0
self.seed = seed
def __iter__(self):
self.generator.manual_seed(self.seed + self.epoch)
return super().__iter__()
def set_epoch(self, epoch: int) -> None:
""" Allows reproducibility after resuming training. """
self.epoch = epoch
class IndexMappingSampler(Sampler[T]):
""" Transforms index-based sampler to arbitrary one, e.g. batch-based. """
def __init__(self, indices_map: List[T], base_sampler: Sampler[int]):
super().__init__(indices_map)
self.base_sampler = base_sampler
self.indices_map = indices_map
assert len(self.base_sampler) <= len(indices_map)
def __iter__(self):
return map(lambda ind: self.indices_map[ind], iter(self.base_sampler))
def __len__(self):
return len(self.base_sampler)
def set_epoch(self, epoch: int) -> None:
""" Allows reproducibility after resuming training. """
self.base_sampler.set_epoch(epoch)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/sampler.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from collections import OrderedDict
from pathlib import Path
import amp_C
import numpy as np
import torch
import torch.distributed as dist
from .metrics import word_error_rate
from common.utils import print_once
def to_gpu(batch, fp16=False, bf16=False):
assert not (fp16 and bf16)
for k, v in batch['net_input'].items():
if fp16 and v.dtype is torch.float:
batch['net_input'][k] = v.cuda(non_blocking=True).half()
elif bf16 and v.dtype is torch.float:
batch['net_input'][k] = v.cuda(non_blocking=True).to(dtype=torch.bfloat16)
else:
batch['net_input'][k] = v.cuda(non_blocking=True)
def init_multi_tensor_ema(model, ema_model):
model_weights = list(model.state_dict().values())
ema_model_weights = list(ema_model.state_dict().values())
ema_overflow_buf = torch.cuda.IntTensor([0])
return model_weights, ema_model_weights, ema_overflow_buf
def apply_multi_tensor_ema(decay, model_weights, ema_model_weights,
overflow_buf):
amp_C.multi_tensor_axpby(
65536, overflow_buf,
[ema_model_weights, model_weights, ema_model_weights],
decay, 1-decay, -1)
def apply_ema(model, ema_model, decay, patch_conv_wn=False):
if not decay:
return
if patch_conv_wn:
torch.nn.utils.remove_weight_norm(model.encoder.pos_conv[0])
sd = getattr(model, 'module', model).state_dict()
for k, v in ema_model.state_dict().items():
v.copy_(decay * v + (1 - decay) * sd[k])
if patch_conv_wn:
torch.nn.utils.weight_norm(
model.encoder.pos_conv[0], name="weight", dim=2)
def add_ctc_blank(symbols):
return symbols + ['<BLANK>']
def ctc_decoder_predictions_tensor(tensor, labels, blank_id=None):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Returns prediction
Args:
tensor: model output tensor
label: A list of labels
Returns:
prediction
"""
if blank_id is None:
blank_id = len(labels) - 1
hypotheses = []
labels_map = {i: labels[i] for i in range(len(labels))}
prediction_cpu_tensor = tensor.long().cpu()
# iterate over batch
for prediction in prediction_cpu_tensor:
prediction = prediction.numpy().tolist()
# CTC decoding procedure
decoded_prediction = []
previous = blank_id
for p in prediction:
if (p != previous or previous == blank_id) and p != blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = ''.join([labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
def greedy_wer(preds, tgt, tgt_lens, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Prints wer and prediction examples
to screen.
Args:
tensors: A list of 3 tensors (predictions, targets, target_lengths)
labels: A list of labels
Returns:
word error rate
"""
with torch.no_grad():
references = gather_transcripts([tgt], [tgt_lens], labels)
hypotheses = ctc_decoder_predictions_tensor(preds, labels)
wer, _, _ = word_error_rate(hypotheses, references)
return wer, hypotheses[0], references[0]
def gather_losses(losses_list):
return [torch.mean(torch.stack(losses_list))]
def gather_predictions(predictions_list, labels, blank_id=None):
results = []
for prediction in predictions_list:
results += ctc_decoder_predictions_tensor(prediction, labels=labels,
blank_id=blank_id)
return results
def gather_transcripts(transcript_list, transcript_len_list, labels):
results = []
labels_map = {i: labels[i] for i in range(len(labels))}
# iterate over workers
for txt, lens in zip(transcript_list, transcript_len_list):
for t, l in zip(txt.long().cpu(), lens.long().cpu()):
t = list(t.numpy())
results.append(''.join([labels_map[c] for c in t[:l]]))
return results
def process_evaluation_batch(tensors, global_vars, labels):
"""
Processes results of an iteration and saves it in global_vars
Args:
tensors: dictionary with results of an evaluation iteration,
e.g., loss, predictions, transcript, and output
global_vars: dictionary where processes results of iteration are saved
labels: A list of labels
"""
for kv, v in tensors.items():
if kv.startswith('loss'):
global_vars['EvalLoss'] += gather_losses(v)
elif kv.startswith('predictions'):
global_vars['preds'] += gather_predictions(v, labels)
elif kv.startswith('transcript_length'):
transcript_len_list = v
elif kv.startswith('transcript'):
transcript_list = v
elif kv.startswith('output'):
global_vars['logits'] += v
global_vars['txts'] += gather_transcripts(
transcript_list, transcript_len_list, labels)
def process_evaluation_epoch(aggregates):
"""
Processes results from each worker and combine to final result.
Args:
aggregates: dictionary containing information of entire evaluation
Return:
wer: final word error rate
loss: final loss
"""
if 'losses' in aggregates:
eloss = torch.mean(torch.stack(aggregates['losses'])).item()
else:
eloss = None
hypotheses = aggregates['preds']
references = aggregates['txts']
ids = aggregates['ids']
wer, scores, num_words = word_error_rate(hypotheses, references)
multi_gpu = dist.is_initialized()
if multi_gpu:
if eloss is not None:
eloss /= dist.get_world_size()
eloss_tensor = torch.tensor(eloss).cuda()
dist.all_reduce(eloss_tensor)
eloss = eloss_tensor.item()
scores_tensor = torch.tensor(scores).cuda().unsqueeze(-1)
num_words_tensor = torch.tensor(num_words).cuda().unsqueeze(-1)
ids_tensor = torch.tensor(ids).cuda().unsqueeze(-1)
result_tensor = torch.cat(
[scores_tensor, num_words_tensor, ids_tensor], dim=-1)
result_tensor_list = [torch.zeros_like(result_tensor)
for i in range(dist.get_world_size())]
dist.all_gather(result_tensor_list, result_tensor)
if dist.get_rank() == 0:
agg_results = torch.cat(result_tensor_list, dim=0)
agg_ids = set()
agg_score, agg_num_words = 0, 0
for x in agg_results.cpu().numpy():
score, num_words, sample_id = x
if sample_id in agg_ids:
continue
else:
agg_ids.add(sample_id)
agg_score += score
agg_num_words += num_words
wer = 1.0 * agg_score / agg_num_words
return wer, eloss
def num_weights(module):
return sum(p.numel() for p in module.parameters() if p.requires_grad)
def load_wrapped_state(model, state_dict, strict=True):
if model is None:
return
unwrap_ddp = lambda model: getattr(model, 'module', model)
state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}
unwrap_ddp(unwrap_ddp(model)).load_state_dict(state_dict, strict=strict)
class Checkpointer:
def __init__(self, args, model_name):
self.no_save = args.no_save
self.save_dir = args.output_dir
self.keep_milestones = args.keep_milestones
self.model_name = model_name
self.output_labels = None # for supervised training
pattern = f'{self.model_name}_update*.pt'
tracked = [(int(re.search('update(\d+)\.pt', str(f)).group(1)), f)
for f in Path(args.output_dir).rglob(pattern)]
self.tracked = OrderedDict(sorted(tracked, key=lambda t: t[0]))
fpath = (self.last_checkpoint() if args.resume else None) or args.ckpt
if fpath is not None:
print_once(f'Loading model from {fpath}')
self.last_state = torch.load(fpath, map_location="cpu")
else:
self.last_state = None
def maybe_save(self, model, ema_model, optimizer, scaler, train_state,
step, epoch, val_losses, val_wer, args):
"""Saves model checkpoint for inference/resuming training.
Args:
model: the model, optionally wrapped by DistributedDataParallel
ema_model: model with averaged weights, can be None
optimizer: optimizer
epoch (int): epoch during which the model is saved
step (int): number of steps since beginning of training
best_wer (float): lowest recorded WER on the dev set
is_best (bool, optional): set name of checkpoint to 'best'
and overwrite the previous one
"""
if epoch == 0 or args.no_save:
return
if args.local_rank != 0 or int(os.environ.get('RANK', 0)) != 0:
return
if args.mode == "finetune":
is_best_ckpt = val_wer[0] < train_state["best_val_wer"]
elif args.mode == "pretrain":
is_best_ckpt = val_losses[0] < train_state["best_val_loss"]
if not is_best_ckpt and epoch % args.save_frequency != 0:
return
unwrap_ = lambda model: getattr(model, 'module', model)
unwrap_ddp = lambda model: unwrap_(unwrap_(model))
state_dict = lambda m: m.state_dict() if m is not None else None
type_name = lambda m: None if m is None else type(m).__name__
val_wer = val_wer or [float("inf")] # wer absent in pretraining
train_state.update({
'optimizer_type': type_name(optimizer),
'scaler_type': type_name(scaler),
'step': step,
'epoch': epoch + 1, # fairseq compat; restart at the next epoch
'best_val_wer': min(val_wer[0], train_state["best_val_wer"]),
'best_val_loss': min(val_losses[0], train_state['best_val_loss']),
})
state = {
'args': args.__dict__,
'model': state_dict(unwrap_ddp(model)),
'ema_model': state_dict(unwrap_ddp(ema_model)),
'optimizer': state_dict(optimizer),
'scaler': state_dict(scaler),
'train_state': train_state,
**({'output_labels': self.output_labels} if self.output_labels else {}),
}
if is_best_ckpt:
fpath = Path(self.save_dir, f"{self.model_name}_best.pt")
print_once(f"Saving {fpath}...")
torch.save(state, fpath)
fpath = Path(self.save_dir, f"{self.model_name}_update{step}.pt")
print_once(f"Saving {fpath}...")
torch.save(state, fpath)
# keep checkpoints with steps closest to milestones
for_keeps = set()
if len(self.tracked) > 0:
tracked = np.array(list(self.tracked.keys()))
for milestone in self.keep_milestones:
st = tracked[np.argmin(np.abs(tracked - milestone))]
for_keeps.add(st)
# remove old checkpoints; keep milestones and the last two
self.tracked[step] = fpath
for epoch in set(list(self.tracked)[:-2]) - for_keeps:
try:
os.remove(self.tracked[epoch])
except:
pass
del self.tracked[epoch]
def maybe_load_state(self, model=None, ema_model=None, optimizer=None,
scaler=None, train_state=None, train_loader=None):
if self.last_state is None:
return
if model is not None:
load_wrapped_state(model, self.last_state['model'])
if ema_model is not None:
if checkpoint.get('ema_model', None) is not None:
load_wrapped_state(ema_model, self.last_state['ema_model'])
else:
print_once('WARNING: EMA weights not found in the ckpt.')
print_once('WARNING: Initializing EMA model with main model.')
# https://github.com/pytorch/pytorch/issues/28594
model.remove_conv_wn()
load_wrapped_state(ema_model, model.state_dict())
model.apply_conv_wn()
if optimizer is not None:
if 'last_optimizer_state' in self.last_state:
optimizer.load_state_dict(
self.last_state['last_optimizer_state'])
elif 'optimizer' in self.last_state:
optimizer.load_state_dict(self.last_state['optimizer'])
else:
raise ValueError('Optimizer state not found')
if scaler is not None:
if 'scaler' in self.last_state:
scaler.load_state_dict(self.last_state['scaler'])
elif 'amp' in self.last_state:
scaler.load_state_dict(self.last_state['amp'])
else:
raise ValueError('Scaler state not found')
if train_state is not None:
if 'train_state' in self.last_state:
train_state.update(self.last_state['train_state'])
if 'extra_state' in self.last_state:
extra_state = self.last_state['extra_state']
train_state.update({
'epoch': extra_state['train_iterator']['epoch'],
'best_val_loss': extra_state['best']
})
if 'optimizer_history' in extra_state:
train_state['step'] = (extra_state['optimizer_history']
[-1]['num_updates']),
if train_loader is not None and 'extra_state' in self.last_state:
state = self.last_state['extra_state']['train_iterator']
train_loader.load_state_dict(state)
def last_checkpoint(self):
tracked = list(self.tracked.values())
for fpath in reversed(tracked):
try:
torch.load(fpath, map_location='cpu')
return fpath
except:
print_once(f'Checkpoint {fpath} appears corrupted.')
return None
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/helpers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import typing as tp
def _safe_readline(fd) -> str:
pos = fd.tell()
while True:
try:
return fd.readline()
except UnicodeDecodeError:
pos -= 1
fd.seek(pos) # search where this character begins
def find_offsets(filename: str, num_chunks: int) -> tp.List[int]:
"""
given a file and a number of chuncks, find the offsets in the file
to be able to chunk around full lines.
"""
with open(filename, "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
_safe_readline(f)
offsets[i] = f.tell()
offsets[-1] = size
return offsets
class ChunkLineIterator:
"""
Iterator to properly iterate over lines of a file chunck.
"""
def __init__(self, fd, start_offset: int, end_offset: int):
self._fd = fd
self._start_offset = start_offset
self._end_offset = end_offset
def __iter__(self) -> tp.Iterable[str]:
self._fd.seek(self._start_offset)
# next(f) breaks f.tell(), hence readline() must be used
line = _safe_readline(self._fd)
while line:
pos = self._fd.tell()
# f.tell() does not always give the byte position in the file
# sometimes it skips to a very large number
# it is unlikely that through a normal read we go from
# end bytes to end + 2**32 bytes (4 GB) and this makes it unlikely
# that the procedure breaks by the undeterministic behavior of
# f.tell()
if (
self._end_offset > 0
and pos > self._end_offset
and pos < self._end_offset + 2 ** 32
):
break
yield line
line = self._fd.readline()
class Chunker:
"""
contextmanager to read a chunck of a file line by line.
"""
def __init__(self, path: str, start_offset: int, end_offset: int):
self.path = path
self.start_offset = start_offset
self.end_offset = end_offset
def __enter__(self) -> ChunkLineIterator:
self.fd = open(self.path, "r", encoding="utf-8")
return ChunkLineIterator(self.fd, self.start_offset, self.end_offset)
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.fd.close()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/file_chunker_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
SPACE_NORMALIZER = re.compile(r"\s+")
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line.split()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/tokenizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from typing import Dict, Optional
from torch import Tensor
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(
b for b in cls.__bases__ if b != FairseqIncrementalState
)
return cls
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/incremental_decoding_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from typing import Callable, List
import torch
import torch.nn.functional as F
MANIFOLD_PATH_SEP = "|"
def split_paths(paths: str, separator=os.pathsep) -> List[str]:
return (
paths.split(separator) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP)
)
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
from .modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
warnings.warn(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def index_put(tensor, indices, value):
tensor[indices] = value
return tensor
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def multiply_grads(optimizer, c):
"""Multiplies grads by a constant *c*."""
for param_group in optimizer.param_groups:
for p in param_group["params"]:
if p.grad is not None:
if torch.is_tensor(c):
c = c.to(p.grad.device)
p.grad.data.mul_(c)
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.to(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
from typing import List, Optional
logger = logging.getLogger(__file__)
try:
from iopath.common.file_io import g_pathmgr as IOPathManager
try:
# [FB only - for now] AWS PathHandler for PathManager
from .fb_pathhandlers import S3PathHandler
IOPathManager.register_handler(S3PathHandler())
except KeyError:
logging.warning("S3PathHandler already registered.")
except ImportError:
logging.debug(
"S3PathHandler couldn't be imported. Either missing fb-only files, or boto3 module."
)
except ImportError:
IOPathManager = None
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
@staticmethod
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
@staticmethod
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
@staticmethod
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
@staticmethod
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
@staticmethod
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/file_io.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class ModuleProxyWrapper(torch.nn.Module):
"""
Wrap a DistributedDataParallel module and forward requests for missing
attributes to the module wrapped by DDP (the twice-wrapped module).
Also forward calls to :func:`state_dict` and :func:`load_state_dict`.
Usage::
module.xyz = "hello world"
wrapped_module = DistributedDataParallel(module, **ddp_args)
wrapped_module = ModuleProxyWrapper(wrapped_module)
assert wrapped_module.xyz == "hello world"
assert wrapped_module.state_dict().keys() == module.state_dict().keys()
Args:
module (nn.Module): module to wrap
"""
def __init__(self, module: torch.nn.Module):
super().__init__()
assert hasattr(module, "module"), \
"ModuleProxyWrapper expects input to wrap another module"
self.module = module
def __getattr__(self, name):
"""Forward missing attributes to twice-wrapped module."""
try:
# defer to nn.Module's logic
return super().__getattr__(name)
except AttributeError:
try:
# forward to the once-wrapped module
return getattr(self.module, name)
except AttributeError:
# forward to the twice-wrapped module
return getattr(self.module.module, name)
def state_dict(self, *args, **kwargs):
"""Forward to the twice-wrapped module."""
return self.module.module.state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
"""Forward to the twice-wrapped module."""
return self.module.module.load_state_dict(*args, **kwargs)
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/dist.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import torch
def get_fused_adam_class():
"""
Look for the FusedAdam optimizer from apex. We first try to load the
"contrib" interface, which is a bit faster than the main interface,
but is technically deprecated.
"""
try:
# The "deprecated" interface in recent versions of apex is a bit
# faster than the main interface, since we don't use the apex
# optimizer. This can be installed by passing the
# `--deprecated_fused_adam` option when building apex.
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
return FusedAdamV1
except ImportError:
try:
# fallback to the newer interface
from apex.optimizers import FusedAdam as _FusedAdam # noqa
from apex.multi_tensor_apply import multi_tensor_applier
if multi_tensor_applier.available:
return FusedAdamV2
except ImportError:
pass
return None
class FusedAdamV1(torch.optim.Optimizer):
"""
Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Compared to the original version in Apex, the fairseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.0,
max_grad_norm=0.0,
amsgrad=False,
):
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
if amsgrad:
raise RuntimeError("FusedAdam does not support the AMSGrad variant.")
defaults = {
"lr": lr,
"bias_correction": bias_correction,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
"max_grad_norm": max_grad_norm,
}
super().__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
@property
def supports_step_with_scale(self):
return True
def step(self, closure=None, grads=None, scale=1.0, grad_norms=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if grad_norms is None:
grad_norms = [None] * len(self.param_groups)
for group, grads_this_group, grad_norm in zip(
self.param_groups, grads_group, grad_norms
):
if grads_this_group is None:
grads_this_group = [None] * len(group["params"])
# compute combined scale factor for this group
combined_scale = scale
if group.get("max_grad_norm", 0) > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group["max_grad_norm"]
if clip > 1:
combined_scale = clip * scale
bias_correction = 1 if group.get("bias_correction", 1) else 0
for p, grad in zip(group["params"], grads_this_group):
# note: p.grad should not ever be set for correct
# operation of mixed precision optimizer that sometimes
# sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
p_data_fp32 = p.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
exp_avg = state["exp_avg"]
exp_avg_sq = state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
out_p = p.data
with torch.cuda.device(p.device):
fused_adam_cuda.adam(
p_data_fp32,
out_p,
exp_avg,
exp_avg_sq,
grad,
group["lr"],
beta1,
beta2,
group["eps"],
combined_scale,
state["step"],
self.eps_mode,
bias_correction,
group["weight_decay"],
)
return loss
try:
from apex.optimizers import FusedAdam
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdamV2(FusedAdam):
"""
Compared to the original version in Apex, the fairseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(self, "multi_tensor_adam"):
raise Exception(
"Apex installation is outdated. Please install an updated version of apex."
)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(
self,
closure=None,
grads=None,
output_params=None,
scale=None,
grad_norms=None,
):
"""Performs a single optimization step."""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group["bias_correction"] else 0
beta1, beta2 = group["betas"]
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if "step" in group:
group["step"] += 1
else:
group["step"] = 1
# create lists for multi-tensor apply
g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group["params"]:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data, dtype=torch.float)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p.data, dtype=torch.float
)
else:
state["exp_avg"] = state["exp_avg"].to(
device=p.data.device, dtype=torch.float
)
state["exp_avg_sq"] = state["exp_avg_sq"].to(
device=p.data.device, dtype=torch.float
)
if p.dtype == torch.float16:
g_16.append(p.grad.data.float())
p_16.append(p.data.float())
orig_p_16.append(p.data)
m_16.append(state["exp_avg"])
v_16.append(state["exp_avg_sq"])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state["exp_avg"])
v_32.append(state["exp_avg_sq"])
else:
raise RuntimeError("FusedAdam only support fp16 and fp32.")
with torch.cuda.device(p.device):
if len(g_16) > 0:
multi_tensor_applier(
self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16],
group["lr"],
beta1,
beta2,
group["eps"],
group["step"],
self.adam_w_mode,
bias_correction,
group["weight_decay"],
)
for orig_p, p in zip(orig_p_16, p_16):
orig_p.copy_(p.data)
if len(g_32) > 0:
multi_tensor_applier(
self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32],
group["lr"],
beta1,
beta2,
group["eps"],
group["step"],
self.adam_w_mode,
bias_correction,
group["weight_decay"],
)
return loss
except ImportError:
pass
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/optim/fused_adam.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DynamicLossScaler(object):
def __init__(
self,
init_scale=2.0**15,
scale_factor=2.0,
scale_window=2000,
tolerance=0.0,
threshold=None,
min_loss_scale=1e-4,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
self.min_loss_scale = min_loss_scale
def scale(self, outputs):
return self.loss_scale * outputs
def update(self):
if (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
def check_overflow(self, grad_norm):
# detect inf and nan
if grad_norm == float("inf") or grad_norm != grad_norm:
# overflow has occured
prev_scale = self.loss_scale
iter_since_rescale = self._iter - self._last_rescale_iter
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
if self.loss_scale <= self.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
self.loss_scale = prev_scale
raise FloatingPointError(
(
"Minimum loss scale reached ({}). Your loss is probably exploding. "
"Try lowering the learning rate, using gradient clipping or "
"increasing the batch size."
).format(self.min_loss_scale)
)
self._iter += 1
raise OverflowError("setting loss scale to: " + str(self.loss_scale))
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/optim/dynamic_loss_scaler.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from collections import defaultdict
import torch
from common.fairseq.optim.dynamic_loss_scaler import DynamicLossScaler
@torch.no_grad()
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
def grad_exists(p):
return p is not None and getattr(p, "grad", None) is not None
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [
p.grad.detach() for p in params if grad_exists(p) and not hasattr(p, "expert")
]
expert_grads = [
p.grad.detach() for p in params if grad_exists(p) and hasattr(p, "expert")
]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.0)
else:
return torch.tensor(0.0)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
# XXX Missing imports
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads + expert_grads:
g.mul_(clip_coef)
return total_norm
class FairseqOptimizer(object):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
@classmethod
def add_args(cls, parser):
"""Add optimizer-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer):
"""Reset optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
self._optimizer = optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
@property
def params(self):
"""Return an iterable of the parameters held by the optimizer."""
for param_group in self.param_groups:
for p in param_group["params"]:
yield p
@property
def param_groups(self):
return self.optimizer.param_groups
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
"""Return the current learning rate."""
return self.param_groups[0]["lr"]
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.param_groups:
param_group["lr"] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
if optimizer_overrides is not None and len(optimizer_overrides) > 0:
# override learning rate, momentum, etc. with latest values
for group in self.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
loss.backward()
def all_reduce_grads(self, module):
"""Manually all-reduce gradients (if required)."""
if hasattr(module, "all_reduce_grads"):
module.all_reduce_grads()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
for p in self.params:
if p.grad is not None:
if torch.is_tensor(c):
c = c.to(p.grad.device)
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm."""
return clip_grad_norm_(self.params, max_norm, aggregate_norm_fn)
def step(self, closure=None, scale=1.0, groups=None):
"""Performs a single optimization step."""
if self.supports_step_with_scale:
if self.supports_groups:
self.optimizer.step(closure, scale=scale, groups=groups)
else:
self.optimizer.step(closure, scale=scale)
else:
if scale != 1.0:
self.multiply_grads(1.0 / scale)
if self.supports_groups:
self.optimizer.step(closure, groups=groups)
else:
self.optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
@property
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, "supports_memory_efficient_fp16"):
return self.optimizer.supports_memory_efficient_fp16
return False
@property
def supports_step_with_scale(self):
if hasattr(self.optimizer, "supports_step_with_scale"):
return self.optimizer.supports_step_with_scale
return False
@property
def supports_groups(self):
if hasattr(self.optimizer, "supports_groups"):
return self.optimizer.supports_groups
return False
@property
def supports_flat_params(self):
"""
Whether the optimizer supports collapsing of the model
parameters/gradients into a single contiguous Tensor.
"""
if hasattr(self.optimizer, "supports_flat_params"):
return self.optimizer.supports_flat_params
return False
def broadcast_global_state_dict(self, state_dict):
"""
Broadcasts a global state dict to all ranks.
Useful for optimizers that shard state between ranks.
"""
if hasattr(self.optimizer, "broadcast_global_state_dict"):
return self.optimizer.broadcast_global_state_dict(state_dict)
else:
return state_dict
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in mro(method resolution order)
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
@property
def has_flat_params(self):
return torch.is_tensor(self.fp32_params) or (
isinstance(self.fp32_params, dict)
and all(torch.is_tensor(t) for t in self.fp32_params.values())
)
@classmethod
def build_fp32_params(cls, args, params, flatten=True):
# create FP32 copy of parameters and grads
if flatten:
is_pipeline_parallel = getattr(
args, "pipeline_model_parallel", False
) and getattr(args, "distributed_no_spawn", False)
total_param_size = sum(p.data.numel() for p in params)
devices = [torch.cuda.current_device()]
if is_pipeline_parallel:
devices = list(set(args.pipeline_devices))
fp32_params = {}
for device in devices:
if is_pipeline_parallel:
device_param_size = sum(
p.data.numel() for p in params if p.device.index == device
)
device_params = [p for p in params if p.device.index == device]
else:
device_param_size = total_param_size
device_params = params
fp32_params[device] = (
device_params[0].new(0).float().new(device_param_size)
)
offset = 0
for p in device_params:
numel = p.data.numel()
fp32_params[device][offset : offset + numel].copy_(p.data.view(-1))
offset += numel
fp32_params[device] = torch.nn.Parameter(fp32_params[device])
fp32_params[device].grad = fp32_params[device].data.new(
device_param_size
)
return fp32_params
else:
fp32_params = []
for p in params:
p32 = torch.nn.Parameter(p.data.float())
if hasattr(p, 'expert'):
p32.expert = True
p32.grad = torch.zeros_like(p32.data)
if hasattr(p, "param_group"):
p32.param_group = p.param_group
fp32_params.append(p32)
return fp32_params
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
if self.scaler is not None:
state_dict["loss_scale"] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if "loss_scale" in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict["loss_scale"]
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self):
if self._needs_sync:
# copy FP16 grads to FP32
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
if p.requires_grad:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
grad_data = (
p.grad.data
if p.grad is not None
else p.data.new_zeros(p.data.shape)
)
numel = grad_data.numel()
self.fp32_params[device].grad.data[
offset : offset + numel
].copy_(grad_data.view(-1))
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
if p.grad is not None:
if p32.grad is None:
p32.grad = p.grad.data.float()
else:
p32.grad.data.copy_(p.grad.data)
else:
p32.grad = torch.zeros_like(p.data, dtype=torch.float)
self._needs_sync = False
def _sync_fp32_params_to_fp16(self):
# copy FP32 params back into FP16 model
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
numel = p.data.numel()
p.data.copy_(
self.fp32_params[device]
.data[offset : offset + numel]
.view_as(p.data)
)
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
p.data.copy_(p32.data)
def _unscale_grads(self):
self._sync_fp16_grads_to_fp32()
if (
# Skip the multiplication if it's a no-op (i.e., if _multiply_factor
# is 1.0). At the same time, we want to avoid the device-to-host
# transfer by comparing it to 1.0. Since _multiply_factor starts as
# a Python float, we roughly assume that if it's a tensor then it's
# probably not =1.0 anymore and we do the multiplication. Otherwise
# we can safely check the value without a D2H transfer.
torch.is_tensor(self._multiply_factor)
or self._multiply_factor != 1.0
):
self.fp32_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm(
0, aggregate_norm_fn
)
if self.scaler is not None:
if grad_norm > max_norm > 0.0:
self._multiply_factor *= max_norm / grad_norm
self.scaler.check_overflow(grad_norm)
elif max_norm > 0.0:
clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
if getattr(self, "supports_step_with_scale", False):
self.fp32_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups)
else:
self._unscale_grads()
self.fp32_optimizer.step(closure, groups=groups)
if self.scaler is not None:
self.scaler.update()
self._sync_fp32_params_to_fp16()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise RuntimeError("self.fp32_params must be a tensor or dict")
else:
for p32 in self.fp32_params:
if p32.grad is not None:
p32.grad.zero_()
self._needs_sync = False
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
class FP16Optimizer(_FP16OptimizerMixin, FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, cfg, params, fp32_optimizer, fp32_params, **kwargs):
super().__init__(cfg.optimizer)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
scale_window = int(2 ** 14 / cfg.world_size / cfg.update_freq)
if not (cfg.bf16 and cfg.bf16_disable_loss_scaler):
self.scaler = DynamicLossScaler(
init_scale=cfg.fp16_init_scale,
scale_window=scale_window,
tolerance=0.0,
threshold=None,
min_loss_scale=cfg.min_loss_scale,
)
else:
print('Disabled loss scaler.')
# disable loss scaling for bfloat16
self.scaler = None
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self.fp32_optimizer.optimizer = optimizer
@property
def lr_scheduler(self):
return getattr(self.fp32_optimizer, "lr_scheduler", None)
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.fp32_optimizer.all_reduce_grads(module)
@property
def supports_flat_params(self):
return self.fp32_optimizer.supports_flat_params
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/optim/fp16_optimizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Collection
import torch
import torch.distributed as dist
import torch.optim
from common.fairseq.optim.fp16_optimizer import FairseqOptimizer
from common.fairseq.optim.fused_adam import get_fused_adam_class
class FairseqAdam(FairseqOptimizer):
"""Adam optimizer for fairseq.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, cfg, params):
super().__init__(cfg)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = (
not getattr(cfg, "use_old_adam", False)
and fused_adam_cls is not None
and torch.cuda.is_available()
)
if use_fused_adam:
self._optimizer = fused_adam_cls(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas)
if isinstance(self.cfg.adam_betas, str)
else self.cfg.adam_betas,
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
}
def average_params(self):
"""Reduce Params is only used during BMUF distributed training."""
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for _, value in state_dict["state"].items():
value["exp_avg"] /= total_gpus
value["exp_avg_sq"] /= total_gpus
dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM)
dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM)
class Adam(torch.optim.Optimizer):
r"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(Adam, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group.get("amsgrad", False)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
p_data_fp32
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/optim/adam.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, List
from collections.abc import Collection
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from omegaconf import II, OmegaConf
@dataclass
class FairseqLambConfig(FairseqDataclass):
lamb_betas: Any = field(
default=(0.9, 0.999), metadata={"help": "betas for lamb optimizer"}
)
lamb_eps: float = field(
default=1e-8, metadata={"help": "epsilon for lamb optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
lr: List[float] = II("optimization.lr")
@register_optimizer("lamb", dataclass=FairseqLambConfig)
class FairseqLAMB(FairseqOptimizer):
"""LAMB optimizer."""
def __init__(self, cfg: FairseqLambConfig, params):
super().__init__(cfg)
try:
from apex.optimizers import FusedLAMB
self._optimizer = FusedLAMB(params, **self.optimizer_config)
except ImportError:
raise ImportError("Please install apex to use LAMB optimizer")
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr,
"betas": eval(self.cfg.lamb_betas) if isinstance(self.cfg.lamb_betas, str)
else OmegaConf.to_container(self.cfg.lamb_betas),
"eps": self.cfg.lamb_eps,
"weight_decay": self.cfg.weight_decay,
}
@property
def supports_flat_params(self):
return False
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/optim/fused_lamb.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
from common.fairseq import utils
from common.fairseq.incremental_decoding_utils import with_incremental_state
from .fairseq_dropout import FairseqDropout
from .quant_noise import quant_noise
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
self.seq_len_cached = None
self.cos_cached = None
self.sin_cached = None
def forward(self, x, seq_dim=0):
seq_len = x.shape[seq_dim]
if seq_len != self.seq_len_cached:
self.seq_len_cached = seq_len
t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq)
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self.cos_cached = emb.cos()[:, None, :]
self.sin_cached = emb.sin()[:, None, :]
return self.cos_cached, self.sin_cached
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1)
@torch.jit.script
def apply_rotary_pos_emb(x, cos, sin):
return (x * cos) + (rotate_half(x) * sin)
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
rotary_embeddings=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.rotary_embeddings = rotary_embeddings
if self.rotary_embeddings:
self.rotary_freq = RotaryEmbedding(embed_dim)
else:
self.rotary_freq = None
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(
nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if (
not self.rotary_embeddings
and not self.onnx_trace
and not is_tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
# seq_len, batch_size, dim
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
if self.rotary_freq is not None:
cos, sin = self.rotary_freq(q)
q = apply_rotary_pos_emb(q, cos, sin)
k = apply_rotary_pos_emb(k, cos, sin)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
if src_len > prev_key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask.float()
elif key_padding_mask is not None:
if src_len > key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = key_padding_mask.float()
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/multihead_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/grad_multiply.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with
the corresponding GitHub repo: https://github.com/hendrycks/GELUs
"""
import math
import torch
import torch.nn as nn
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/gelu.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""isort:skip_file"""
from .fairseq_dropout import FairseqDropout
from .fp32_group_norm import Fp32GroupNorm, Fp32MaskedGroupNorm, MaskedGroupNorm
from .gelu import gelu, gelu_accurate
from .grad_multiply import GradMultiply
from .gumbel_vector_quantizer import GumbelVectorQuantizer
from .layer_norm import Fp32LayerNorm, LayerNorm
from .multihead_attention import MultiheadAttention
from .same_pad import SamePad
from .transpose_last import TransposeLast
__all__ = [
"Fp32GroupNorm",
"Fp32LayerNorm",
"Fp32MaskedGroupNorm",
"MaskedGroupNorm",
"gelu",
"gelu_accurate",
"GradMultiply",
"GumbelVectorQuantizer",
"LayerNorm",
"MultiheadAttention",
"SamePad",
"TransposeLast",
]
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
def quant_noise(module, p, block_size):
"""
Wraps modules and applies quantization noise to the weights for
subsequent quantization with Iterative Product Quantization as
described in "Training with Quantization Noise for Extreme Model Compression"
Args:
- module: nn.Module
- p: amount of Quantization Noise
- block_size: size of the blocks for subsequent quantization with iPQ
Remarks:
- Module weights must have the right sizes wrt the block size
- Only Linear, Embedding and Conv2d modules are supported for the moment
- For more detail on how to quantize by blocks with convolutional weights,
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
- We implement the simplest form of noise here as stated in the paper
which consists in randomly dropping blocks
"""
# if no quantization noise, don't register hook
if p <= 0:
return module
# supported modules
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
# test whether module.weight has the right sizes wrt block_size
is_conv = module.weight.ndim == 4
# 2D matrix
if not is_conv:
assert (
module.weight.size(1) % block_size == 0
), "Input features must be a multiple of block sizes"
# 4D matrix
else:
# 1x1 convolutions
if module.kernel_size == (1, 1):
assert (
module.in_channels % block_size == 0
), "Input channels must be a multiple of block sizes"
# regular convolutions
else:
k = module.kernel_size[0] * module.kernel_size[1]
assert k % block_size == 0, "Kernel size must be a multiple of block size"
def _forward_pre_hook(mod, input):
# no noise for evaluation
if mod.training:
if not is_conv:
# gather weight and sizes
weight = mod.weight
in_features = weight.size(1)
out_features = weight.size(0)
# split weight matrix into blocks and randomly drop selected blocks
mask = torch.zeros(
in_features // block_size * out_features, device=weight.device
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
else:
# gather weight and sizes
weight = mod.weight
in_channels = mod.in_channels
out_channels = mod.out_channels
# split weight matrix into blocks and randomly drop selected blocks
if mod.kernel_size == (1, 1):
mask = torch.zeros(
int(in_channels // block_size * out_channels),
device=weight.device,
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
else:
mask = torch.zeros(
weight.size(0), weight.size(1), device=weight.device
)
mask.bernoulli_(p)
mask = (
mask.unsqueeze(2)
.unsqueeze(3)
.repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
)
# scale weights and apply mask
mask = mask.to(
torch.bool
) # x.bool() is not currently supported in TorchScript
s = 1 / (1 - p)
mod.weight.data = s * weight.masked_fill(mask, 0)
module.register_forward_pre_hook(_forward_pre_hook)
return module
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/quant_noise.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer norm done in fp32 (for fp16 training)."""
import torch
import torch.nn as nn
import torch.nn.functional as F
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
class MaskedGroupNorm(nn.Module):
"""GroupNorm layer which skips padding.
In wav2vec 2.0 encoder where batch size is small and time dimensio huge,
this is nearly as fast as nn.GroupNorm.
Ready for TorchScript, favors composition over inheritance.
"""
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True,
device=None, dtype=None):
assert num_groups == num_channels, (
"num_groups != num_channels not yet supported in MaskedGroupNorm")
super().__init__()
self._group_norm = nn.GroupNorm(num_groups, num_channels, eps=eps,
affine=affine, device=device,
dtype=dtype)
def forward(self, x, x_lens):
var = torch.zeros_like(x[:, :, 0])
mean = torch.zeros_like(x[:, :, 0])
for i in range(x.size(0)):
mean[i] = torch.mean(x[i, :, :x_lens[i]], dim=1)
var[i] = torch.var(x[i, :, :x_lens[i]], dim=1, unbiased=False)
out = (x - mean[:, :, None]) / torch.sqrt(var[:, :, None] + self._group_norm.eps)
if self._group_norm.affine:
return out * self._group_norm.weight[None, :, None] + self._group_norm.bias[None, :, None]
else:
return out
class Fp32MaskedGroupNorm(nn.Module):
"""GroupNorm layer which skips padding.
In wav2vec 2.0 encoder where batch size is small and time dimensio huge,
this is nearly as fast as nn.GroupNorm.
Ready for TorchScript, favors composition over inheritance.
"""
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True,
device=None, dtype=None):
assert num_groups == num_channels, (
"num_groups != num_channels not yet supported in MaskedGroupNorm")
super().__init__()
self._group_norm = nn.GroupNorm(num_groups, num_channels, eps=eps,
affine=affine, device=device,
dtype=dtype)
def hook(state_dict, prefix, *args, **kwargs):
"""Renames keys from layers which used inheritance."""
new_sd = {}
for k, v in state_dict.items():
if not k.startswith(prefix):
new_sd[k] = v
else:
*pref, param = k.split(".")
new_k = ".".join(pref + ["_group_norm", param])
new_sd[new_k] = v
state_dict.clear()
state_dict.update(new_sd)
self._register_load_state_dict_pre_hook(hook)
def forward(self, x, x_lens):
return self._forward(
x.float(),
x_lens,
self._group_norm.weight.float() if self._group_norm.weight is not None else None,
self._group_norm.bias.float() if self._group_norm.bias is not None else None,
).type_as(x)
def _forward(self, x, x_lens, weight, bias):
var = torch.zeros_like(x[:, :, 0])
mean = torch.zeros_like(x[:, :, 0])
for i in range(x.size(0)):
mean[i] = torch.mean(x[i, :, :x_lens[i]], dim=1)
var[i] = torch.var(x[i, :, :x_lens[i]], dim=1, unbiased=False)
out = (x - mean[:, :, None]) / torch.sqrt(var[:, :, None] + self._group_norm.eps)
if self._group_norm.affine:
return out * weight[None, :, None] + bias[None, :, None]
else:
return out
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/fp32_group_norm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Optional
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class FairseqDropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def forward(self, x, inplace: bool = False):
if self.p > 0 and (self.training or self.apply_during_inference):
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
def make_generation_fast_(
self,
name: str,
retain_dropout: bool = False,
retain_dropout_modules: Optional[List[str]] = None,
**kwargs
):
if retain_dropout:
if retain_dropout_modules is not None and self.module_name is None:
logger.warning(
"Cannot enable dropout during inference for module {} "
"because module_name was not set".format(name)
)
elif (
retain_dropout_modules is None # if None, apply to all modules
or self.module_name in retain_dropout_modules
):
logger.info(
"Enabling dropout during inference for module: {}".format(name)
)
self.apply_during_inference = True
else:
logger.info("Disabling dropout for module: {}".format(name))
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/fairseq_dropout.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class GumbelVectorQuantizer(nn.Module):
def __init__(
self,
dim,
num_vars,
temp,
groups,
combine_groups,
vq_dim,
time_first,
activation=nn.GELU(),
weight_proj_depth=1,
weight_proj_factor=1,
):
"""Vector quantization using gumbel softmax
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor)
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1
weight_proj_depth: number of layers (with activation in between) to project input before computing logits
weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of
projections by this factor
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim))
nn.init.uniform_(self.vars)
if weight_proj_depth > 1:
def block(input_dim, output_dim):
return nn.Sequential(nn.Linear(input_dim, output_dim), activation)
inner_dim = self.input_dim * weight_proj_factor
self.weight_proj = nn.Sequential(
*[
block(self.input_dim if i == 0 else inner_dim, inner_dim)
for i in range(weight_proj_depth - 1)
],
nn.Linear(inner_dim, groups * num_vars),
)
else:
self.weight_proj = nn.Linear(self.input_dim, groups * num_vars)
nn.init.normal_(self.weight_proj.weight, mean=0, std=1)
nn.init.zeros_(self.weight_proj.bias)
if isinstance(temp, str):
import ast
temp = ast.literal_eval(temp)
assert len(temp) == 3, f"{temp}, {len(temp)}"
self.max_temp, self.min_temp, self.temp_decay = temp
self.curr_temp = self.max_temp
self.codebook_indices = None
def set_num_updates(self, num_updates):
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def get_codebook_indices(self):
if self.codebook_indices is None:
from itertools import product
p = [range(self.num_vars)] * self.groups
inds = list(product(*p))
self.codebook_indices = torch.tensor(
inds, dtype=torch.long, device=self.vars.device
).flatten()
if not self.combine_groups:
self.codebook_indices = self.codebook_indices.view(
self.num_vars ** self.groups, -1
)
for b in range(1, self.groups):
self.codebook_indices[:, b] += self.num_vars * b
self.codebook_indices = self.codebook_indices.flatten()
return self.codebook_indices
def codebook(self):
indices = self.get_codebook_indices()
return (
self.vars.squeeze(0)
.index_select(0, indices)
.view(self.num_vars ** self.groups, -1)
)
def sample_from_codebook(self, b, n):
indices = self.get_codebook_indices()
indices = indices.view(-1, self.groups)
cb_size = indices.size(0)
assert (
n < cb_size
), f"sample size {n} is greater than size of codebook {cb_size}"
sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,))
indices = indices[sample_idx]
z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1)
return z
def to_codebook_index(self, indices):
res = indices.new_full(indices.shape[:-1], 0)
for i in range(self.groups):
exponent = self.groups - i - 1
res += indices[..., i] * (self.num_vars ** exponent)
return res
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars * self.groups}
if not self.time_first:
x = x.transpose(1, 2)
bsz, tsz, fsz = x.shape
x = x.reshape(-1, fsz)
x = self.weight_proj(x)
x = x.view(bsz * tsz * self.groups, -1)
_, k = x.max(-1)
hard_x = (
x.new_zeros(*x.shape)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
avg_probs = torch.softmax(
x.view(bsz * tsz, self.groups, -1).float(), dim=-1
).mean(dim=0)
result["prob_perplexity"] = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
).sum()
result["temp"] = self.curr_temp
if self.training:
x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x)
else:
x = hard_x
x = x.view(bsz * tsz, -1)
vars = self.vars
if self.combine_groups:
vars = vars.repeat(1, self.groups, 1)
if produce_targets:
result["targets"] = (
x.view(bsz * tsz * self.groups, -1)
.argmax(dim=-1)
.view(bsz, tsz, self.groups)
.detach()
)
x = x.unsqueeze(-1) * vars
x = x.view(bsz * tsz, self.groups, self.num_vars, -1)
x = x.sum(-2)
x = x.view(bsz, tsz, -1)
if not self.time_first:
x = x.transpose(1, 2) # BTC -> BCT
result["x"] = x
return result
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/gumbel_vector_quantizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
transpose last 2 dimensions of the input
"""
import torch.nn as nn
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/transpose_last.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
TORCHSCRIPT = False
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if torch.jit.is_scripting() or TORCHSCRIPT:
export = True
if not export and torch.cuda.is_available() and has_fused_layernorm:
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/layer_norm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
class SamePad(nn.Module):
def __init__(self, kernel_size, causal=False):
super().__init__()
if causal:
self.remove = kernel_size - 1
else:
self.remove = 1 if kernel_size % 2 == 0 else 0
def forward(self, x):
if self.remove > 0:
x = x[:, :, : -self.remove]
return x
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules/same_pad.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numba import jit
@jit(nopython=True)
def batch_by_size_vec(indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult):
"""A numba version of cython batch_by_size_vec from data_utils_fast.pyx"""
indices_len = indices.shape[0]
batches_ends = np.zeros(indices_len, dtype=np.int32)
batches_ends_view = batches_ends
num_tokens_view = num_tokens_vec
pos = 0
new_batch_end = 0
new_batch_max_tokens = 0
new_batch_sentences = 0
new_batch_num_tokens = 0
overflow = False
size_matches_with_bsz_mult = False
batches_count = 0
batch_start = 0
tail_max_tokens = 0
batch_max_tokens = 0
for pos in range(indices_len):
# At every pos we keep stats about the last complete batch [batch_start:batch_end),
# and tail [batch_end:pos].
# 1) Every time when (batch + tail) forms a valid batch
# (according to max_tokens, max_sentences and bsz_mult) we append tail to batch.
# 2) When (batch+tail) violates max_tokens or max_sentences constraints
# we finalize running batch, and tail becomes a new batch.
# 3) There is a corner case when tail also violates constraints.
# In that situation [batch_end:pos-1] (tail without the current pos)
# gets added to the finalized batches, while [pos:pos] becomes a new tail.
#
# Important: For the sake of performance try to avoid using function calls within this loop.
tail_max_tokens = tail_max_tokens \
if tail_max_tokens > num_tokens_view[pos] \
else num_tokens_view[pos]
new_batch_end = pos + 1
new_batch_max_tokens = batch_max_tokens \
if batch_max_tokens > tail_max_tokens \
else tail_max_tokens
new_batch_sentences = new_batch_end - batch_start
new_batch_num_tokens = new_batch_sentences * new_batch_max_tokens
overflow = (new_batch_sentences > max_sentences > 0 or
new_batch_num_tokens > max_tokens > 0)
size_matches_with_bsz_mult = (new_batch_sentences < bsz_mult or
new_batch_sentences % bsz_mult == 0)
if overflow:
tail_num_tokens = tail_max_tokens * \
(new_batch_end - batches_ends_view[batches_count])
tail_overflow = tail_num_tokens > max_tokens > 0
# In case of a tail overflow finalize two batches
if tail_overflow:
batches_count += 1
batches_ends_view[batches_count] = pos
tail_max_tokens = num_tokens_view[pos]
batch_start = batches_ends_view[batches_count]
batches_count += 1
new_batch_max_tokens = tail_max_tokens
if overflow or size_matches_with_bsz_mult:
batches_ends_view[batches_count] = new_batch_end
batch_max_tokens = new_batch_max_tokens
tail_max_tokens = 0
if batches_ends_view[batches_count] != indices_len:
batches_count += 1
# Memory and time-efficient split
return np.split(indices, batches_ends[:batches_count])
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data/data_utils_fast.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""isort:skip_file"""
from .add_target_dataset import AddTargetDataset, BaseWrapperDataset
from .audio.raw_audio_dataset import FileAudioDataset
from .dictionary import Dictionary
__all__ = [
"AddTargetDataset",
"Dictionary",
"FileAudioDataset",
]
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import os
import re
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
from common.fairseq import utils
from common.fairseq.data.data_utils_fast import batch_by_size_vec
from common.fairseq.file_io import PathManager
logger = logging.getLogger(__name__)
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
for filename in PathManager.ls(path):
parts = filename.split(".")
if len(parts) >= 3 and len(parts[1].split("-")) == 2:
return parts[1].split("-")
return src, dst
def collate_tokens(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
pad_to_bsz=None,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
batch_size = len(values) if pad_to_bsz is None else max(len(values), pad_to_bsz)
res = values[0].new(batch_size, size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
def load_indexed_dataset(
path, dictionary=None, dataset_impl=None, combine=False, default="cached"
):
"""A helper function for loading indexed datasets.
Args:
path (str): path to indexed dataset (e.g., 'data-bin/train')
dictionary (~fairseq.data.Dictionary): data dictionary
dataset_impl (str, optional): which dataset implementation to use. If
not provided, it will be inferred automatically. For legacy indexed
data we use the 'cached' implementation by default.
combine (bool, optional): automatically load and combine multiple
datasets. For example, if *path* is 'data-bin/train', then we will
combine 'data-bin/train', 'data-bin/train1', ... and return a
single ConcatDataset instance.
"""
import fairseq.data.indexed_dataset as indexed_dataset
from fairseq.data.concat_dataset import ConcatDataset
datasets = []
for k in itertools.count():
path_k = path + (str(k) if k > 0 else "")
try:
path_k = indexed_dataset.get_indexed_dataset_to_local(path_k)
except Exception as e:
if "StorageException: [404] Path not found" in str(e):
logger.warning(f"path_k: {e} not found")
else:
raise e
dataset_impl_k = dataset_impl
if dataset_impl_k is None:
dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k)
dataset = indexed_dataset.make_dataset(
path_k,
impl=dataset_impl_k or default,
fix_lua_indexing=True,
dictionary=dictionary,
)
if dataset is None:
break
logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k))
datasets.append(dataset)
if not combine:
break
if len(datasets) == 0:
return None
elif len(datasets) == 1:
return datasets[0]
else:
return ConcatDataset(datasets)
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def collect_filtered(function, iterable, filtered):
"""
Similar to :func:`filter` but collects filtered elements in ``filtered``.
Args:
function (callable): function that returns ``False`` for elements that
should be filtered
iterable (iterable): iterable to filter
filtered (list): list to store filtered elements
"""
for el in iterable:
if function(el):
yield el
else:
filtered.append(el)
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def check_size(idx):
if isinstance(max_positions, float) or isinstance(max_positions, int):
return size_fn(idx) <= max_positions
elif isinstance(max_positions, dict):
idx_size = size_fn(idx)
assert isinstance(idx_size, dict)
intersect_keys = set(max_positions.keys()) & set(idx_size.keys())
return all(
all(
a is None or b is None or a <= b
for a, b in zip(idx_size[key], max_positions[key])
)
for key in intersect_keys
)
else:
# For MultiCorpusSampledDataset, will generalize it later
if not isinstance(size_fn(idx), Iterable):
return all(size_fn(idx) <= b for b in max_positions)
return all(
a is None or b is None or a <= b
for a, b in zip(size_fn(idx), max_positions)
)
ignored = []
itr = collect_filtered(check_size, indices, ignored)
indices = np.fromiter(itr, dtype=np.int64, count=-1)
return indices, ignored
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
"""
[deprecated] Filter indices based on their size.
Use `FairseqDataset::filter_indices_by_size` instead.
Args:
indices (List[int]): ordered list of dataset indices
dataset (FairseqDataset): fairseq dataset instance
max_positions (tuple): filter elements larger than this size.
Comparisons are done component-wise.
raise_exception (bool, optional): if ``True``, raise an exception if
any elements are filtered (default: False).
"""
warnings.warn(
"data_utils.filter_by_size is deprecated. "
"Use `FairseqDataset::filter_indices_by_size` instead.",
stacklevel=2,
)
if isinstance(max_positions, float) or isinstance(max_positions, int):
if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray):
ignored = indices[dataset.sizes[indices] > max_positions].tolist()
indices = indices[dataset.sizes[indices] <= max_positions]
elif (
hasattr(dataset, "sizes")
and isinstance(dataset.sizes, list)
and len(dataset.sizes) == 1
):
ignored = indices[dataset.sizes[0][indices] > max_positions].tolist()
indices = indices[dataset.sizes[0][indices] <= max_positions]
else:
indices, ignored = _filter_by_size_dynamic(
indices, dataset.size, max_positions
)
else:
indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if len(ignored) > 0 and raise_exception:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
if len(ignored) > 0:
logger.warning(
(
"{} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if max_sizes is None:
return indices, []
if type(max_sizes) in (int, float):
max_src_size, max_tgt_size = max_sizes, max_sizes
else:
max_src_size, max_tgt_size = max_sizes
if tgt_sizes is None:
ignored = indices[src_sizes[indices] > max_src_size]
else:
ignored = indices[
(src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size)
]
if len(ignored) > 0:
if tgt_sizes is None:
indices = indices[src_sizes[indices] <= max_src_size]
else:
indices = indices[
(src_sizes[indices] <= max_src_size)
& (tgt_sizes[indices] <= max_tgt_size)
]
return indices, ignored.tolist()
def batch_by_size(
indices,
num_tokens_fn,
num_tokens_vec=None,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
num_concat_batches=1,
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
num_tokens_vec (List[int], optional): precomputed vector of the number
of tokens for each index in indices (to enable faster batch generation)
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be less than N or a multiple of N (default: 1).
"""
# added int() to avoid TypeError: an integer is required
max_tokens = int(max_tokens) if max_tokens is not None else -1
max_sentences = max_sentences if max_sentences is not None else -1
bsz_mult = required_batch_size_multiple
if not isinstance(indices, np.ndarray):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray):
num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1)
assert num_tokens_vec is None # XXX(Adrian) erase if redundant
num_tokens_vec = np.zeros(indices.shape[0], dtype=np.int64)
for pos in range(indices.shape[0]):
num_tokens_vec[pos] = num_tokens_fn(indices[pos])
assert max_tokens <= 0 or np.max(num_tokens_vec) <= max_tokens, (
f"Sentences lengths should not exceed max_tokens={max_tokens}"
)
if indices.shape[0] == 0:
return []
batches = batch_by_size_vec(indices, num_tokens_vec, max_tokens,
max_sentences, bsz_mult)
if num_concat_batches > 1:
# Concatenate subsequent batches
ga = num_concat_batches
grouped = [batches[i*ga:(i+1)*ga] for i in range(len(batches) // ga)]
grouped_batches = [np.concatenate(g) for g in grouped]
return grouped_batches, batches
else:
return batches, batches
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == "wordpiece":
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == "letter":
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "silence":
import re
sentence = sentence.replace("<SIL>", "")
sentence = re.sub(' +', ' ', sentence).strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol in {"subword_nmt", "@@ ", "@@"}:
if symbol == "subword_nmt":
symbol = "@@ "
sentence = (sentence + " ").replace(symbol, "").rstrip()
elif symbol == "none":
pass
elif symbol is not None:
raise NotImplementedError(f"Unknown post_process option: {symbol}")
return sentence
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
require_same_masks: bool = True,
mask_dropout: float = 0.0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample
mask_dropout: randomly dropout this percentage of masks in each example
"""
assert require_same_masks
assert mask_dropout == 0.0
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
min_len = float("inf")
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
def get_lengths(num_mask):
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise ValueError("unknown mask selection " + mask_type)
return lengths
lengths = get_lengths(num_mask)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
mask_idc = np.unique(mask_idc[mask_idc < sz])
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len and require_same_masks:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
if mask_dropout > 0:
num_holes = np.rint(len(mask_idc) * mask_dropout).astype(int)
mask_idc = np.random.choice(
mask_idc, len(mask_idc) - num_holes, replace=False
)
mask[i, mask_idc] = True
return mask
def get_mem_usage():
try:
import psutil
mb = 1024 * 1024
return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb"
except ImportError:
return "N/A"
def get_buckets(sizes, num_buckets):
buckets = np.unique(
np.percentile(
sizes,
np.linspace(0, 100, num_buckets + 1),
interpolation='lower',
)[1:]
)
return buckets
def get_bucketed_sizes(orig_sizes, buckets):
sizes = np.copy(orig_sizes)
assert np.min(sizes) >= 0
start_val = -1
for end_val in buckets:
mask = (sizes > start_val) & (sizes <= end_val)
sizes[mask] = end_val
start_val = end_val
return sizes
def _find_extra_valid_paths(dataset_path: str) -> set:
paths = utils.split_paths(dataset_path)
all_valid_paths = set()
for sub_dir in paths:
contents = PathManager.ls(sub_dir)
valid_paths = [c for c in contents if re.match("valid*[0-9].*", c) is not None]
all_valid_paths |= {os.path.basename(p) for p in valid_paths}
# Remove .bin, .idx etc
roots = {os.path.splitext(p)[0] for p in all_valid_paths}
return roots
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data/data_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import Counter
from multiprocessing import Pool
import torch
from common.fairseq import utils
from common.fairseq.data import data_utils
from common.fairseq.file_chunker_utils import Chunker, find_offsets
from common.fairseq.file_io import PathManager
from common.fairseq.tokenizer import tokenize_line
class Dictionary:
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
bos="<s>",
pad="<pad>",
eos="</s>",
unk="<unk>",
extra_special_symbols=None,
):
self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def get_count(self, idx):
return self.count[idx]
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(
self,
tensor,
bpe_symbol=None,
escape_unk=False,
extra_symbols_to_ignore=None,
unk_string=None,
include_eos=False,
separator=" ",
):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return "\n".join(
self.string(
t,
bpe_symbol,
escape_unk,
extra_symbols_to_ignore,
include_eos=include_eos,
)
for t in tensor
)
extra_symbols_to_ignore = set(extra_symbols_to_ignore or [])
extra_symbols_to_ignore.add(self.eos())
def token_string(i):
if i == self.unk():
if unk_string is not None:
return unk_string
else:
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, "bos_index"):
extra_symbols_to_ignore.add(self.bos())
sent = separator.join(
token_string(i)
for i in tensor
if utils.item(i) not in extra_symbols_to_ignore
)
return data_utils.post_process(sent, bpe_symbol)
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
try:
line, field = line.rstrip().rsplit(" ", 1)
if field == "#fairseq:overwrite":
overwrite = True
line, field = line.rsplit(" ", 1)
else:
overwrite = False
count = int(field)
word = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(word)
)
self.add_symbol(word, n=count, overwrite=overwrite)
except ValueError:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt> [flags]'"
)
def _save(self, f, kv_iterator):
if isinstance(f, str):
PathManager.mkdirs(os.path.dirname(f))
with PathManager.open(f, "w", encoding="utf-8") as fd:
return self.save(fd)
for k, v in kv_iterator:
print("{} {}".format(k, v), file=f)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(
f,
zip(
ex_keys + self.symbols[self.nspecial :],
ex_vals + self.count[self.nspecial :],
),
)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
def encode_line(
self,
line,
line_tokenizer=tokenize_line,
add_if_not_exist=True,
consumer=None,
append_eos=True,
reverse_order=False,
) -> torch.IntTensor:
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
@staticmethod
def _add_file_to_dictionary_single_worker(
filename,
tokenize,
eos_word,
start_offset,
end_offset,
):
counter = Counter()
with Chunker(filename, start_offset, end_offset) as line_iterator:
for line in line_iterator:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
local_file = PathManager.get_local_path(filename)
offsets = find_offsets(local_file, num_workers)
if num_workers > 1:
chunks = zip(offsets, offsets[1:])
pool = Pool(processes=num_workers)
results = []
for (start_offset, end_offset) in chunks:
results.append(
pool.apply_async(
Dictionary._add_file_to_dictionary_single_worker,
(
local_file,
tokenize,
dict.eos_word,
start_offset,
end_offset,
),
)
)
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(
Dictionary._add_file_to_dictionary_single_worker(
local_file, tokenize, dict.eos_word, offsets[0], offsets[1]
)
)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data/dictionary.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from . import data_utils
class BaseWrapperDataset(torch.utils.data.Dataset):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
@property
def sizes(self):
return self.dataset.sizes
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
num_concat_batches=1,
):
return self.dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
num_concat_batches=num_concat_batches,
)
def filter_indices_by_size(self, indices, max_sizes):
return self.dataset.filter_indices_by_size(indices, max_sizes)
class AddTargetDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
labels,
pad,
eos,
batch_targets,
process_label=None,
add_to_input=False,
):
super().__init__(dataset)
self.labels = labels
self.batch_targets = batch_targets
self.pad = pad
self.eos = eos
self.process_label = process_label
self.add_to_input = add_to_input
def get_label(self, index):
return (
self.labels[index]
if self.process_label is None
else self.process_label(self.labels[index])
)
def __getitem__(self, index):
item = self.dataset[index]
item["label"] = self.get_label(index)
return item
def size(self, index):
sz = self.dataset.size(index)
own_sz = len(self.get_label(index))
return (sz, own_sz)
def collater(self, samples):
collated = self.dataset.collater(samples)
if len(collated) == 0:
return collated
indices = set(collated["id"].tolist())
target = [s["label"] for s in samples if s["id"] in indices]
if self.batch_targets:
collated["target_lengths"] = torch.LongTensor([len(t) for t in target])
target = data_utils.collate_tokens(target, pad_idx=self.pad, left_pad=False)
collated["ntokens"] = collated["target_lengths"].sum().item()
else:
collated["ntokens"] = sum([len(t) for t in target])
collated["target"] = target
if self.add_to_input:
eos = target.new_full((target.size(0), 1), self.eos)
collated["target"] = torch.cat([target, eos], dim=-1).long()
collated["net_input"]["prev_output_tokens"] = torch.cat(
[eos, target], dim=-1
).long()
collated["ntokens"] += target.size(0)
return collated
def __setattr__(self, attr, val):
if attr == "batch_ids":
self.dataset.batch_ids = val
else:
super().__setattr__(attr, val)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data/add_target_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import BinaryIO, Optional, Tuple, Union, List
import numpy as np
import torch
SF_AUDIO_FILE_EXTENSIONS = {".wav", ".flac", ".ogg"}
FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS = {".npy", ".wav", ".flac", ".ogg"}
def _convert_to_mono(
waveform: torch.FloatTensor, sample_rate: int
) -> torch.FloatTensor:
if waveform.shape[0] > 1:
try:
import torchaudio.sox_effects as ta_sox
except ImportError:
raise ImportError(
"Please install torchaudio to convert multi-channel audios"
)
effects = [['channels', '1']]
return ta_sox.apply_effects_tensor(waveform, sample_rate, effects)[0]
return waveform
def convert_to_mono(waveform: np.ndarray, sample_rate: int) -> np.ndarray:
if waveform.shape[0] > 1:
_waveform = torch.from_numpy(waveform)
return _convert_to_mono(_waveform, sample_rate).numpy()
return waveform
def get_waveform(
path_or_fp: Union[str, BinaryIO], normalization=True, mono=True,
frames=-1, start=0, always_2d=True
) -> Tuple[np.ndarray, int]:
"""Get the waveform and sample rate of a 16-bit WAV/FLAC/OGG Vorbis audio.
Args:
path_or_fp (str or BinaryIO): the path or file-like object
normalization (bool): Normalize values to [-1, 1] (Default: True)
mono (bool): convert multi-channel audio to mono-channel one
frames (int): the number of frames to read. (-1 for reading all)
start (int): Where to start reading. A negative value counts from the end.
always_2d (bool): always return 2D array even for mono-channel audios
Returns:
waveform (numpy.ndarray): 1D or 2D waveform (channels x length)
sample_rate (float): sample rate
"""
if isinstance(path_or_fp, str):
ext = Path(path_or_fp).suffix
if ext not in SF_AUDIO_FILE_EXTENSIONS:
raise ValueError(f"Unsupported audio format: {ext}")
try:
import soundfile as sf
except ImportError:
raise ImportError(
"Please install soundfile to load WAV/FLAC/OGG Vorbis audios"
)
waveform, sample_rate = sf.read(
path_or_fp, dtype="float32", always_2d=True, frames=frames, start=start
)
waveform = waveform.T # T x C -> C x T
if mono and waveform.shape[0] > 1:
waveform = convert_to_mono(waveform, sample_rate)
if not normalization:
waveform *= 2 ** 15 # denormalized to 16-bit signed integers
if not always_2d:
waveform = waveform.squeeze(axis=0)
return waveform, sample_rate
def _get_kaldi_fbank(
waveform: np.ndarray, sample_rate: int, n_bins=80
) -> Optional[np.ndarray]:
"""Get mel-filter bank features via PyKaldi."""
try:
from kaldi.feat.mel import MelBanksOptions
from kaldi.feat.fbank import FbankOptions, Fbank
from kaldi.feat.window import FrameExtractionOptions
from kaldi.matrix import Vector
mel_opts = MelBanksOptions()
mel_opts.num_bins = n_bins
frame_opts = FrameExtractionOptions()
frame_opts.samp_freq = sample_rate
opts = FbankOptions()
opts.mel_opts = mel_opts
opts.frame_opts = frame_opts
fbank = Fbank(opts=opts)
features = fbank.compute(Vector(waveform.squeeze()), 1.0).numpy()
return features
except ImportError:
return None
def _get_torchaudio_fbank(
waveform: np.ndarray, sample_rate, n_bins=80
) -> Optional[np.ndarray]:
"""Get mel-filter bank features via TorchAudio."""
try:
import torchaudio.compliance.kaldi as ta_kaldi
waveform = torch.from_numpy(waveform)
features = ta_kaldi.fbank(
waveform, num_mel_bins=n_bins, sample_frequency=sample_rate
)
return features.numpy()
except ImportError:
return None
def get_fbank(path_or_fp: Union[str, BinaryIO], n_bins=80) -> np.ndarray:
"""Get mel-filter bank features via PyKaldi or TorchAudio. Prefer PyKaldi
(faster CPP implementation) to TorchAudio (Python implementation). Note that
Kaldi/TorchAudio requires 16-bit signed integers as inputs and hence the
waveform should not be normalized."""
waveform, sample_rate = get_waveform(path_or_fp, normalization=False)
features = _get_kaldi_fbank(waveform, sample_rate, n_bins)
if features is None:
features = _get_torchaudio_fbank(waveform, sample_rate, n_bins)
if features is None:
raise ImportError(
"Please install pyKaldi or torchaudio to enable "
"online filterbank feature extraction"
)
return features
def is_npy_data(data: bytes) -> bool:
return data[0] == 147 and data[1] == 78
def is_sf_audio_data(data: bytes) -> bool:
is_wav = (data[0] == 82 and data[1] == 73 and data[2] == 70)
is_flac = (data[0] == 102 and data[1] == 76 and data[2] == 97)
is_ogg = (data[0] == 79 and data[1] == 103 and data[2] == 103)
return is_wav or is_flac or is_ogg
def read_from_stored_zip(zip_path: str, offset: int, file_size: int) -> bytes:
with open(zip_path, "rb") as f:
f.seek(offset)
data = f.read(file_size)
return data
def parse_path(path: str) -> Tuple[str, List[int]]:
"""Parse data path which is either a path to
1. a .npy/.wav/.flac/.ogg file
2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]"
Args:
path (str): the data path to parse
Returns:
file_path (str): the file path
slice_ptr (list of int): empty in case 1;
byte offset and length for the slice in case 2
"""
if Path(path).suffix in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS:
_path, slice_ptr = path, []
else:
_path, *slice_ptr = path.split(":")
if not Path(_path).is_file():
raise FileNotFoundError(f"File not found: {_path}")
assert len(slice_ptr) in {0, 2}, f"Invalid path: {path}"
slice_ptr = [int(i) for i in slice_ptr]
return _path, slice_ptr
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data/audio/audio_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import os
import sys
from itertools import groupby
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils
from common.fairseq.data import data_utils
from common.fairseq.data.audio.audio_utils import (
parse_path,
read_from_stored_zip,
is_sf_audio_data,
)
from common.fairseq.data.data_utils import (
compute_mask_indices,
get_bucketed_sizes,
get_buckets,
)
from common.utils import print_once
logger = logging.getLogger(__name__)
class RawAudioDataset(torch.utils.data.Dataset):
def __init__(
self,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
compute_mask_indices=False,
**mask_compute_kwargs,
):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.min_sample_size = min_sample_size
self.pad = pad
self.shuffle = shuffle
self.normalize = normalize
self.compute_mask_indices = compute_mask_indices
if self.compute_mask_indices:
self.mask_compute_kwargs = mask_compute_kwargs
self._features_size_map = {}
self._C = mask_compute_kwargs["encoder_embed_dim"]
self._conv_feature_layers = eval(mask_compute_kwargs["conv_feature_layers"])
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
if feats.dim() == 2:
feats = feats.mean(-1)
if curr_sample_rate != self.sample_rate:
raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}")
assert feats.dim() == 1, feats.dim()
if self.normalize:
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end]
def _compute_mask_indices(self, dims, padding_mask):
B, T, C = dims
mask_indices, mask_channel_indices = None, None
if self.mask_compute_kwargs["mask_prob"] > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_compute_kwargs["mask_prob"],
self.mask_compute_kwargs["mask_length"],
self.mask_compute_kwargs["mask_selection"],
self.mask_compute_kwargs["mask_other"],
min_masks=2,
no_overlap=self.mask_compute_kwargs["no_mask_overlap"],
min_space=self.mask_compute_kwargs["mask_min_space"],
)
mask_indices = torch.from_numpy(mask_indices)
if self.mask_compute_kwargs["mask_channel_prob"] > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_compute_kwargs["mask_channel_prob"],
self.mask_compute_kwargs["mask_channel_length"],
self.mask_compute_kwargs["mask_channel_selection"],
self.mask_compute_kwargs["mask_channel_other"],
no_overlap=self.mask_compute_kwargs["no_mask_channel_overlap"],
min_space=self.mask_compute_kwargs["mask_channel_min_space"],
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices).unsqueeze(1).expand(-1, T, -1)
)
return mask_indices, mask_channel_indices
@staticmethod
def _bucket_tensor(tensor, num_pad, value):
return F.pad(tensor, (0, num_pad), value=value)
def collater(self, samples):
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
sources = [s["source"] for s in samples]
sizes = [len(s) for s in sources]
if self.pad:
target_size = min(max(sizes), self.max_sample_size)
else:
target_size = min(min(sizes), self.max_sample_size)
input, out = {}, {}
if "batch_id" in samples[0]:
# The data for wav2vec 2.0 is sorted by len and cut into batches.
# We concat --num_concat_batches together to better utilize GPUs.
# Yet, we split them back to calculate masking, sample negatives,
# and calculate loss, as these ops are dependent on batch size.
# In order to split, we need to remember original (sub)batch ids.
batch_inds = [s['batch_id'] for s in samples]
sub_batch_lens = [len(list(b)) for _, b in groupby(batch_inds)]
starts_ends = np.cumsum([0] + sub_batch_lens)
target_sizes = np.array(
[min(max(sizes[s:e]), self.max_sample_size)
for s, e in zip(starts_ends[:-1], starts_ends[1:])]
)
out["sub_batch_sizes"] = torch.LongTensor(sub_batch_lens)
out["sub_batch_lens"] = torch.LongTensor(target_sizes)
collated_sources = sources[0].new_zeros(len(sources), target_size)
padding_mask = (
torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None
)
for i, (source, size) in enumerate(zip(sources, sizes)):
diff = size - target_size
if diff == 0:
collated_sources[i] = source
elif diff > 0:
collated_sources[i] = self.crop_to_max_size(source, target_size)
else: # diff < 0:
assert self.pad
collated_sources[i] = torch.cat(
[source, source.new_full((-diff,), 0.0)]
)
padding_mask[i, diff:] = True
input["source"] = collated_sources
out["id"] = torch.LongTensor([s["id"] for s in samples])
if self.pad:
input["padding_mask"] = padding_mask
if hasattr(self, "num_buckets") and self.num_buckets > 0:
assert self.pad, "Cannot bucket without padding first."
bucket = max(self._bucketed_sizes[s["id"]] for s in samples)
num_pad = bucket - collated_sources.size(-1)
if num_pad:
input["source"] = self._bucket_tensor(collated_sources, num_pad, 0)
input["padding_mask"] = self._bucket_tensor(padding_mask, num_pad, True)
if self.compute_mask_indices:
B = input["source"].size(0)
T = self._get_mask_indices_dims(input["source"].size(-1))
padding_mask_reshaped = input["padding_mask"].clone()
extra = padding_mask_reshaped.size(1) % T
if extra > 0:
padding_mask_reshaped = padding_mask_reshaped[:, :-extra]
padding_mask_reshaped = padding_mask_reshaped.view(
padding_mask_reshaped.size(0), T, -1
)
padding_mask_reshaped = padding_mask_reshaped.all(-1)
input["padding_count"] = padding_mask_reshaped.sum(-1).max().item()
mask_indices, mask_channel_indices = self._compute_mask_indices(
(B, T, self._C),
padding_mask_reshaped,
)
input["mask_indices"] = mask_indices
input["mask_channel_indices"] = mask_channel_indices
out["sample_size"] = mask_indices.sum().item()
out["net_input"] = input
return out
def _get_mask_indices_dims(self, size, padding=0, dilation=1):
if size not in self._features_size_map:
L_in = size
for (_, kernel_size, stride) in self._conv_feature_layers:
L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1
L_out = 1 + L_out // stride
L_in = L_out
self._features_size_map[size] = L_out
return self._features_size_map[size]
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if self.pad:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
order.append(
np.minimum(
np.array(self.sizes),
self.max_sample_size,
)
)
return np.lexsort(order)[::-1]
else:
return np.arange(len(self))
def set_bucket_info(self, num_buckets):
self.num_buckets = num_buckets
if self.num_buckets > 0:
self._collated_sizes = np.minimum(
np.array(self.sizes),
self.max_sample_size,
)
self.buckets = get_buckets(
self._collated_sizes,
self.num_buckets,
)
self._bucketed_sizes = get_bucketed_sizes(
self._collated_sizes, self.buckets
)
logger.info(
f"{len(self.buckets)} bucket(s) for the audio dataset: "
f"{self.buckets}"
)
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
num_concat_batches=1,
):
"""
Given an ordered set of indices, return batches according to
*max_tokens*, *max_sentences* and *required_batch_size_multiple*.
"""
from common.fairseq.data import data_utils
return data_utils.batch_by_size(
indices,
num_tokens_fn=self.num_tokens,
num_tokens_vec=None,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
num_concat_batches=num_concat_batches,
)
def filter_indices_by_size(self, indices, max_sizes):
"""
Filter a list of sample indices. Remove those that are longer than
specified in *max_sizes*.
WARNING: don't update, override method in child classes
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if isinstance(max_sizes, float) or isinstance(max_sizes, int):
if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray):
ignored = indices[self.sizes[indices] > max_sizes].tolist()
indices = indices[self.sizes[indices] <= max_sizes]
elif (
hasattr(self, "sizes")
and isinstance(self.sizes, list)
and len(self.sizes) == 1
):
ignored = indices[self.sizes[0][indices] > max_sizes].tolist()
indices = indices[self.sizes[0][indices] <= max_sizes]
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
return indices, ignored
class FileAudioDataset(RawAudioDataset):
def __init__(
self,
manifest_path,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
num_buckets=0,
compute_mask_indices=False,
**mask_compute_kwargs,
):
super().__init__(
sample_rate=sample_rate,
max_sample_size=max_sample_size,
min_sample_size=min_sample_size,
shuffle=shuffle,
pad=pad,
normalize=normalize,
compute_mask_indices=compute_mask_indices,
**mask_compute_kwargs,
)
skipped = 0
self.fnames = []
sizes = []
self.skipped_indices = set()
with open(manifest_path, "r") as f:
self.root_dir = f.readline().strip()
for i, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_sample_size is not None and sz < min_sample_size:
skipped += 1
self.skipped_indices.add(i)
continue
self.fnames.append(items[0])
sizes.append(sz)
print_once(f"loaded {len(self.fnames)}, skipped {skipped} samples")
self.sizes = np.array(sizes, dtype=np.int64)
try:
import pyarrow
self.fnames = pyarrow.array(self.fnames)
except:
logger.debug("Could not create a pyarrow array. "
"Please install pyarrow for better performance")
pass
self.set_bucket_info(num_buckets)
def __getitem__(self, index):
import soundfile as sf
path_or_fp = os.path.join(self.root_dir, str(self.fnames[index]))
_path, slice_ptr = parse_path(path_or_fp)
if len(slice_ptr) == 2:
byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
assert is_sf_audio_data(byte_data)
path_or_fp = io.BytesIO(byte_data)
try:
wav, curr_sample_rate = sf.read(path_or_fp, dtype="float32")
except RuntimeError as e:
if not os.path.isfile(path_or_fp):
raise FileNotFoundError(path_or_fp)
else:
raise e
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
ret = {"id": index, "source": feats}
if hasattr(self, 'batch_ids'):
ret['batch_id'] = self.batch_ids[index]
return ret
| DeepLearningExamples-master | PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data/audio/raw_audio_dataset.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import os
import random
import time
import torch
import amp_C
import numpy as np
import torch.distributed as dist
from apex.optimizers import FusedLAMB, FusedNovoGrad
from contextlib import suppress as empty_context
from common import helpers
from common.dali.data_loader import DaliDataLoader
from common.dataset import AudioDataset, get_data_loader
from common.features import BaseFeatures, FilterbankFeatures
from common.helpers import (Checkpointer, greedy_wer, num_weights, print_once,
process_evaluation_epoch)
from common.optimizers import AdamW, lr_policy, Novograd
from common.tb_dllogger import flush_log, init_log, log
from common.utils import BenchmarkStats
from quartznet import config
from quartznet.model import CTCLossNM, GreedyCTCDecoder, QuartzNet
def parse_args():
parser = argparse.ArgumentParser(description='QuartzNet')
training = parser.add_argument_group('training setup')
training.add_argument('--epochs', default=400, type=int,
help='Number of epochs for the entire training; influences the lr schedule')
training.add_argument("--warmup_epochs", default=0, type=int,
help='Initial epochs of increasing learning rate')
training.add_argument("--hold_epochs", default=0, type=int,
help='Constant max learning rate epochs after warmup')
training.add_argument('--epochs_this_job', default=0, type=int,
help=('Run for a number of epochs with no effect on the lr schedule.'
'Useful for re-starting the training.'))
training.add_argument('--cudnn_benchmark', action='store_true', default=True,
help='Enable cudnn benchmark')
training.add_argument('--amp', '--fp16', action='store_true', default=False,
help='Use pytorch native mixed precision training')
training.add_argument('--seed', default=None, type=int, help='Random seed')
training.add_argument('--local_rank', '--local-rank', default=os.getenv('LOCAL_RANK', 0), type=int,
help='GPU id used for distributed training')
training.add_argument('--pre_allocate_range', default=None, type=int, nargs=2,
help='Warmup with batches of length [min, max] before training')
optim = parser.add_argument_group('optimization setup')
optim.add_argument('--gpu_batch_size', default=32, type=int,
help='Batch size for a single forward/backward pass. '
'The Effective batch size is gpu_batch_size * grad_accumulation.')
optim.add_argument('--lr', default=1e-3, type=float,
help='Peak learning rate')
optim.add_argument("--min_lr", default=1e-5, type=float,
help='minimum learning rate')
optim.add_argument("--lr_policy", default='exponential', type=str,
choices=['exponential', 'legacy'], help='lr scheduler')
optim.add_argument("--lr_exp_gamma", default=0.99, type=float,
help='gamma factor for exponential lr scheduler')
optim.add_argument('--weight_decay', default=1e-3, type=float,
help='Weight decay for the optimizer')
optim.add_argument('--grad_accumulation', '--update-freq', default=1, type=int,
help='Number of accumulation steps')
optim.add_argument('--optimizer', default='novograd', type=str,
choices=['novograd', 'adamw', 'lamb98', 'fused_novograd'],
help='Optimization algorithm')
optim.add_argument('--ema', type=float, default=0.0,
help='Discount factor for exp averaging of model weights')
optim.add_argument('--multi_tensor_ema', action='store_true',
help='Use multi_tensor_apply for EMA')
io = parser.add_argument_group('feature and checkpointing setup')
io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'],
default='gpu', help='Use DALI pipeline for fast data processing')
io.add_argument('--resume', action='store_true',
help='Try to resume from last saved checkpoint.')
io.add_argument('--ckpt', default=None, type=str,
help='Path to a checkpoint for resuming training')
io.add_argument('--save_frequency', default=10, type=int,
help='Checkpoint saving frequency in epochs')
io.add_argument('--keep_milestones', default=[100, 200, 300], type=int, nargs='+',
help='Milestone checkpoints to keep from removing')
io.add_argument('--save_best_from', default=380, type=int,
help='Epoch on which to begin tracking best checkpoint (dev WER)')
io.add_argument('--eval_frequency', default=200, type=int,
help='Number of steps between evaluations on dev set')
io.add_argument('--log_frequency', default=25, type=int,
help='Number of steps between printing training stats')
io.add_argument('--prediction_frequency', default=100, type=int,
help='Number of steps between printing sample decodings')
io.add_argument('--model_config', type=str, required=True,
help='Path of the model configuration file')
io.add_argument('--train_manifests', type=str, required=True, nargs='+',
help='Paths of the training dataset manifest file')
io.add_argument('--val_manifests', type=str, required=True, nargs='+',
help='Paths of the evaluation datasets manifest files')
io.add_argument('--dataset_dir', required=True, type=str,
help='Root dir of dataset')
io.add_argument('--output_dir', type=str, required=True,
help='Directory for logs and checkpoints')
io.add_argument('--log_file', type=str, default=None,
help='Path to save the training logfile.')
io.add_argument('--benchmark_epochs_num', type=int, default=1,
help='Number of epochs accounted in final average throughput.')
io.add_argument('--override_config', type=str, action='append',
help='Overrides arbitrary config value.'
' Syntax: `--override_config nested.config.key=val`.')
return parser.parse_args()
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt.true_divide(num_gpus)
def init_multi_tensor_ema(model, ema_model):
model_weights = list(model.state_dict().values())
ema_model_weights = list(ema_model.state_dict().values())
ema_overflow_buf = torch.cuda.IntTensor([0])
return model_weights, ema_model_weights, ema_overflow_buf
def apply_multi_tensor_ema(decay, model_weights, ema_model_weights, overflow_buf):
amp_C.multi_tensor_axpby(
65536, overflow_buf,
[ema_model_weights, model_weights, ema_model_weights],
decay, 1-decay, -1)
def apply_ema(model, ema_model, decay):
if not decay:
return
sd = getattr(model, 'module', model).state_dict()
for k, v in ema_model.state_dict().items():
v.copy_(decay * v + (1 - decay) * sd[k])
@torch.no_grad()
def evaluate(epoch, step, val_loader, val_feat_proc, labels, model,
ema_model, ctc_loss, greedy_decoder, use_amp, use_dali=False):
for model, subset in [(model, 'dev'), (ema_model, 'dev_ema')]:
if model is None:
continue
model.eval()
torch.cuda.synchronize()
start_time = time.time()
agg = {'losses': [], 'preds': [], 'txts': []}
for batch in val_loader:
if use_dali:
# with DALI, the data is already on GPU
feat, feat_lens, txt, txt_lens = batch
if val_feat_proc is not None:
feat, feat_lens = val_feat_proc(feat, feat_lens)
else:
batch = [t.cuda(non_blocking=True) for t in batch]
audio, audio_lens, txt, txt_lens = batch
feat, feat_lens = val_feat_proc(audio, audio_lens)
with torch.cuda.amp.autocast(enabled=use_amp):
log_probs, enc_lens = model(feat, feat_lens)
loss = ctc_loss(log_probs, txt, enc_lens, txt_lens)
pred = greedy_decoder(log_probs)
agg['losses'] += helpers.gather_losses([loss])
agg['preds'] += helpers.gather_predictions([pred], labels)
agg['txts'] += helpers.gather_transcripts([txt], [txt_lens], labels)
wer, loss = process_evaluation_epoch(agg)
torch.cuda.synchronize()
log(() if epoch is None else (epoch,),
step, subset, {'loss': loss, 'wer': 100.0 * wer,
'took': time.time() - start_time})
model.train()
return wer
def main():
args = parse_args()
assert(torch.cuda.is_available())
assert args.prediction_frequency % args.log_frequency == 0
torch.backends.cudnn.benchmark = args.cudnn_benchmark
# set up distributed training
multi_gpu = int(os.environ.get('WORLD_SIZE', 1)) > 1
if multi_gpu:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
world_size = dist.get_world_size()
print_once(f'Distributed training with {world_size} GPUs\n')
else:
world_size = 1
if args.seed is not None:
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
init_log(args)
cfg = config.load(args.model_config)
config.apply_config_overrides(cfg, args)
symbols = helpers.add_ctc_blank(cfg['labels'])
assert args.grad_accumulation >= 1
batch_size = args.gpu_batch_size
print_once('Setting up datasets...')
train_dataset_kw, train_features_kw = config.input(cfg, 'train')
val_dataset_kw, val_features_kw = config.input(cfg, 'val')
use_dali = args.dali_device in ('cpu', 'gpu')
if use_dali:
assert train_dataset_kw['ignore_offline_speed_perturbation'], \
"DALI doesn't support offline speed perturbation"
# pad_to_max_duration is not supported by DALI - have simple padders
if train_features_kw['pad_to_max_duration']:
train_feat_proc = BaseFeatures(
pad_align=train_features_kw['pad_align'],
pad_to_max_duration=True,
max_duration=train_features_kw['max_duration'],
sample_rate=train_features_kw['sample_rate'],
window_size=train_features_kw['window_size'],
window_stride=train_features_kw['window_stride'])
train_features_kw['pad_to_max_duration'] = False
else:
train_feat_proc = None
if val_features_kw['pad_to_max_duration']:
val_feat_proc = BaseFeatures(
pad_align=val_features_kw['pad_align'],
pad_to_max_duration=True,
max_duration=val_features_kw['max_duration'],
sample_rate=val_features_kw['sample_rate'],
window_size=val_features_kw['window_size'],
window_stride=val_features_kw['window_stride'])
val_features_kw['pad_to_max_duration'] = False
else:
val_feat_proc = None
train_loader = DaliDataLoader(gpu_id=args.local_rank,
dataset_path=args.dataset_dir,
config_data=train_dataset_kw,
config_features=train_features_kw,
json_names=args.train_manifests,
batch_size=batch_size,
grad_accumulation_steps=args.grad_accumulation,
pipeline_type="train",
device_type=args.dali_device,
symbols=symbols)
val_loader = DaliDataLoader(gpu_id=args.local_rank,
dataset_path=args.dataset_dir,
config_data=val_dataset_kw,
config_features=val_features_kw,
json_names=args.val_manifests,
batch_size=batch_size,
pipeline_type="val",
device_type=args.dali_device,
symbols=symbols)
else:
train_dataset_kw, train_features_kw = config.input(cfg, 'train')
train_dataset = AudioDataset(args.dataset_dir,
args.train_manifests,
symbols,
**train_dataset_kw)
train_loader = get_data_loader(train_dataset,
batch_size,
multi_gpu=multi_gpu,
shuffle=True,
num_workers=4)
train_feat_proc = FilterbankFeatures(**train_features_kw)
val_dataset_kw, val_features_kw = config.input(cfg, 'val')
val_dataset = AudioDataset(args.dataset_dir,
args.val_manifests,
symbols,
**val_dataset_kw)
val_loader = get_data_loader(val_dataset,
batch_size,
multi_gpu=multi_gpu,
shuffle=False,
num_workers=4,
drop_last=False)
val_feat_proc = FilterbankFeatures(**val_features_kw)
dur = train_dataset.duration / 3600
dur_f = train_dataset.duration_filtered / 3600
nsampl = len(train_dataset)
print_once(f'Training samples: {nsampl} ({dur:.1f}h, '
f'filtered {dur_f:.1f}h)')
if train_feat_proc is not None:
train_feat_proc.cuda()
if val_feat_proc is not None:
val_feat_proc.cuda()
steps_per_epoch = len(train_loader) // args.grad_accumulation
# set up the model
model = QuartzNet(encoder_kw=config.encoder(cfg),
decoder_kw=config.decoder(cfg, n_classes=len(symbols)))
model.cuda()
ctc_loss = CTCLossNM(n_classes=len(symbols))
greedy_decoder = GreedyCTCDecoder()
print_once(f'Model size: {num_weights(model) / 10**6:.1f}M params\n')
# optimization
kw = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == "novograd":
optimizer = Novograd(model.parameters(), **kw)
elif args.optimizer == "adamw":
optimizer = AdamW(model.parameters(), **kw)
elif args.optimizer == 'lamb98':
optimizer = FusedLAMB(model.parameters(), betas=(0.9, 0.98), eps=1e-9,
**kw)
elif args.optimizer == 'fused_novograd':
optimizer = FusedNovoGrad(model.parameters(), betas=(0.95, 0),
bias_correction=False, reg_inside_moment=True,
grad_averaging=False, **kw)
else:
raise ValueError(f'Invalid optimizer "{args.optimizer}"')
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
adjust_lr = lambda step, epoch, optimizer: lr_policy(
step, epoch, args.lr, optimizer, steps_per_epoch=steps_per_epoch,
warmup_epochs=args.warmup_epochs, hold_epochs=args.hold_epochs,
num_epochs=args.epochs, policy=args.lr_policy, min_lr=args.min_lr,
exp_gamma=args.lr_exp_gamma)
if args.ema > 0:
ema_model = copy.deepcopy(model)
else:
ema_model = None
if multi_gpu:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank)
# load checkpoint
meta = {'best_wer': 10**6, 'start_epoch': 0}
checkpointer = Checkpointer(args.output_dir, 'QuartzNet',
args.keep_milestones)
if args.resume:
args.ckpt = checkpointer.last_checkpoint() or args.ckpt
if args.ckpt is not None:
checkpointer.load(args.ckpt, model, ema_model, optimizer, scaler, meta)
start_epoch = meta['start_epoch']
best_wer = meta['best_wer']
epoch = 1
step = start_epoch * steps_per_epoch + 1
# training loop
model.train()
if args.ema > 0.0:
mt_ema_params = init_multi_tensor_ema(model, ema_model)
# ema_model_weight_list, model_weight_list, overflow_buf_for_ema = ema_
# pre-allocate
if args.pre_allocate_range is not None:
n_feats = train_features_kw['n_filt']
pad_align = train_features_kw['pad_align']
a, b = args.pre_allocate_range
for n_frames in range(a, b + pad_align, pad_align):
print_once(f'Pre-allocation ({batch_size}x{n_feats}x{n_frames})...')
feat = torch.randn(batch_size, n_feats, n_frames, device='cuda')
feat_lens = torch.ones(batch_size, device='cuda').fill_(n_frames)
txt = torch.randint(high=len(symbols)-1, size=(batch_size, 100),
device='cuda')
txt_lens = torch.ones(batch_size, device='cuda').fill_(100)
with torch.cuda.amp.autocast(enabled=args.amp):
log_probs, enc_lens = model(feat, feat_lens)
del feat
loss = ctc_loss(log_probs, txt, enc_lens, txt_lens)
loss.backward()
model.zero_grad()
torch.cuda.empty_cache()
bmark_stats = BenchmarkStats()
for epoch in range(start_epoch + 1, args.epochs + 1):
if multi_gpu and not use_dali:
train_loader.sampler.set_epoch(epoch)
torch.cuda.synchronize()
epoch_start_time = time.time()
epoch_utts = 0
epoch_loss = 0
accumulated_batches = 0
for batch in train_loader:
if accumulated_batches == 0:
step_loss = 0
step_utts = 0
step_start_time = time.time()
if use_dali:
# with DALI, the data is already on GPU
feat, feat_lens, txt, txt_lens = batch
if train_feat_proc is not None:
feat, feat_lens = train_feat_proc(feat, feat_lens)
else:
batch = [t.cuda(non_blocking=True) for t in batch]
audio, audio_lens, txt, txt_lens = batch
feat, feat_lens = train_feat_proc(audio, audio_lens)
# Use context manager to prevent redundant accumulation of gradients
if (multi_gpu and accumulated_batches + 1 < args.grad_accumulation):
ctx = model.no_sync()
else:
ctx = empty_context()
with ctx:
with torch.cuda.amp.autocast(enabled=args.amp):
log_probs, enc_lens = model(feat, feat_lens)
loss = ctc_loss(log_probs, txt, enc_lens, txt_lens)
loss /= args.grad_accumulation
if multi_gpu:
reduced_loss = reduce_tensor(loss.data, world_size)
else:
reduced_loss = loss
if torch.isnan(reduced_loss).any():
print_once(f'WARNING: loss is NaN; skipping update')
continue
else:
step_loss += reduced_loss.item()
step_utts += batch[0].size(0) * world_size
epoch_utts += batch[0].size(0) * world_size
accumulated_batches += 1
scaler.scale(loss).backward()
if accumulated_batches % args.grad_accumulation == 0:
epoch_loss += step_loss
scaler.step(optimizer)
scaler.update()
adjust_lr(step, epoch, optimizer)
optimizer.zero_grad()
if args.ema > 0.0:
apply_multi_tensor_ema(args.ema, *mt_ema_params)
if step % args.log_frequency == 0:
preds = greedy_decoder(log_probs)
wer, pred_utt, ref = greedy_wer(preds, txt, txt_lens, symbols)
if step % args.prediction_frequency == 0:
print_once(f' Decoded: {pred_utt[:90]}')
print_once(f' Reference: {ref[:90]}')
step_time = time.time() - step_start_time
log((epoch, step % steps_per_epoch or steps_per_epoch, steps_per_epoch),
step, 'train',
{'loss': step_loss,
'wer': 100.0 * wer,
'throughput': step_utts / step_time,
'took': step_time,
'lrate': optimizer.param_groups[0]['lr']})
step_start_time = time.time()
if step % args.eval_frequency == 0:
wer = evaluate(epoch, step, val_loader, val_feat_proc,
symbols, model, ema_model, ctc_loss,
greedy_decoder, args.amp, use_dali)
if wer < best_wer and epoch >= args.save_best_from:
checkpointer.save(model, ema_model, optimizer, scaler,
epoch, step, best_wer, is_best=True)
best_wer = wer
step += 1
accumulated_batches = 0
# end of step
# DALI iterator need to be exhausted;
# if not using DALI, simulate drop_last=True with grad accumulation
if not use_dali and step > steps_per_epoch * epoch:
break
torch.cuda.synchronize()
epoch_time = time.time() - epoch_start_time
epoch_loss /= steps_per_epoch
log((epoch,), None, 'train_avg', {'throughput': epoch_utts / epoch_time,
'took': epoch_time,
'loss': epoch_loss})
bmark_stats.update(epoch_utts, epoch_time, epoch_loss)
if epoch % args.save_frequency == 0 or epoch in args.keep_milestones:
checkpointer.save(model, ema_model, optimizer, scaler, epoch, step,
best_wer)
if 0 < args.epochs_this_job <= epoch - start_epoch:
print_once(f'Finished after {args.epochs_this_job} epochs.')
break
# end of epoch
log((), None, 'train_avg', bmark_stats.get(args.benchmark_epochs_num))
evaluate(None, step, val_loader, val_feat_proc, symbols, model,
ema_model, ctc_loss, greedy_decoder, args.amp, use_dali)
if epoch == args.epochs:
checkpointer.save(model, ema_model, optimizer, scaler, epoch, step,
best_wer)
flush_log()
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/train.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import os
import random
import time
from heapq import nlargest
from itertools import chain, repeat
from pathlib import Path
from tqdm import tqdm
import dllogger
import torch
import numpy as np
import torch.distributed as distrib
from dllogger import JSONStreamBackend, StdOutBackend, Verbosity
from quartznet import config
from common import helpers
from common.dali.data_loader import DaliDataLoader
from common.dataset import (AudioDataset, FilelistDataset, get_data_loader,
SingleAudioDataset)
from common.features import BaseFeatures, FilterbankFeatures
from common.helpers import print_once, process_evaluation_epoch
from common.tb_dllogger import stdout_metric_format, unique_log_fpath
from nemo_dle_model_converter import load_nemo_ckpt
from quartznet.model import GreedyCTCDecoder, QuartzNet
def get_parser():
parser = argparse.ArgumentParser(description='QuartzNet inference')
parser.add_argument('--batch_size', default=16, type=int,
help='Data batch size')
parser.add_argument('--steps', default=0, type=int,
help='Eval this many steps for every worker')
parser.add_argument('--warmup_steps', default=0, type=int,
help='Burn-in period before measuring latencies')
parser.add_argument('--model_config', type=str, required=True,
help='Relative model config path given dataset folder')
parser.add_argument('--dataset_dir', type=str,
help='Absolute path to dataset folder')
parser.add_argument('--val_manifests', type=str, nargs='+',
help='Relative path to evaluation dataset manifest files')
parser.add_argument('--ckpt', default=None, type=str,
help='Path to model checkpoint')
parser.add_argument('--amp', '--fp16', action='store_true',
help='Use FP16 precision')
parser.add_argument('--cudnn_benchmark', action='store_true',
help='Enable cudnn benchmark')
parser.add_argument('--cpu', action='store_true',
help='Run inference on CPU')
parser.add_argument("--seed", default=None, type=int, help='Random seed')
parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0),
type=int, help='GPU id used for distributed training')
io = parser.add_argument_group('feature and checkpointing setup')
io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'],
default='gpu', help='Use DALI pipeline for fast data processing')
io.add_argument('--save_predictions', type=str, default=None,
help='Save predictions in text form at this location')
io.add_argument('--save_logits', default=None, type=str,
help='Save output logits under specified path')
io.add_argument('--transcribe_wav', type=str,
help='Path to a single .wav file (16KHz)')
io.add_argument('--transcribe_filelist', type=str,
help='Path to a filelist with one .wav path per line')
io.add_argument('-o', '--output_dir', default='results/',
help='Output folder to save audio (file per phrase)')
io.add_argument('--log_file', type=str, default=None,
help='Path to a DLLogger log file')
io.add_argument('--ema', action='store_true',
help='Load averaged model weights')
io.add_argument('--torchscript', action='store_true',
help='Evaluate with a TorchScripted model')
io.add_argument('--torchscript_export', action='store_true',
help='Export the model with torch.jit to the output_dir')
io.add_argument('--override_config', type=str, action='append',
help='Overrides arbitrary config value.'
' Syntax: `--override_config nested.config.key=val`.')
return parser
def durs_to_percentiles(durations, ratios):
durations = np.asarray(durations) * 1000 # in ms
latency = durations
latency = latency[5:]
mean_latency = np.mean(latency)
latency_worst = nlargest(math.ceil((1 - min(ratios)) * len(latency)), latency)
latency_ranges = get_percentile(ratios, latency_worst, len(latency))
latency_ranges[0.5] = mean_latency
return latency_ranges
def get_percentile(ratios, arr, nsamples):
res = {}
for a in ratios:
idx = max(int(nsamples * (1 - a)), 0)
res[a] = arr[idx]
return res
def torchscript_export(data_loader, audio_processor, model, greedy_decoder,
output_dir, use_amp, use_conv_masks, model_config, device,
save):
audio_processor.to(device)
for batch in data_loader:
batch = [t.to(device, non_blocking=True) for t in batch]
audio, audio_len, _, _ = batch
feats, feat_lens = audio_processor(audio, audio_len)
break
print("\nExporting featurizer...")
print("\nNOTE: Dithering causes warnings about non-determinism.\n")
ts_feat = torch.jit.trace(audio_processor, (audio, audio_len))
print("\nExporting acoustic model...")
model(feats, feat_lens)
ts_acoustic = torch.jit.trace(model, (feats, feat_lens))
print("\nExporting decoder...")
log_probs = model(feats, feat_lens)
ts_decoder = torch.jit.script(greedy_decoder, log_probs)
print("\nJIT export complete.")
if save:
precision = "fp16" if use_amp else "fp32"
module_name = f'{os.path.basename(model_config)}_{precision}'
ts_feat.save(os.path.join(output_dir, module_name + "_feat.pt"))
ts_acoustic.save(os.path.join(output_dir, module_name + "_acoustic.pt"))
ts_decoder.save(os.path.join(output_dir, module_name + "_decoder.pt"))
return ts_feat, ts_acoustic, ts_decoder
def main():
parser = get_parser()
args = parser.parse_args()
log_fpath = args.log_file or str(Path(args.output_dir, 'nvlog_infer.json'))
dllogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, metric_format=stdout_metric_format)
])
[dllogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()]
for step in ['DNN', 'data+DNN', 'data']:
for c in [0.99, 0.95, 0.9, 0.5]:
cs = 'avg' if c == 0.5 else f'{int(100*c)}%'
dllogger.metadata(f'{step.lower()}_latency_{c}',
{'name': f'{step} latency {cs}',
'format': ':>7.2f', 'unit': 'ms'})
dllogger.metadata(
'eval_wer', {'name': 'WER', 'format': ':>3.2f', 'unit': '%'})
if args.cpu:
device = torch.device('cpu')
else:
assert torch.cuda.is_available()
device = torch.device('cuda')
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if args.seed is not None:
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
# set up distributed training
multi_gpu = not args.cpu and int(os.environ.get('WORLD_SIZE', 1)) > 1
if multi_gpu:
torch.cuda.set_device(args.local_rank)
distrib.init_process_group(backend='nccl', init_method='env://')
print_once(f'Inference with {distrib.get_world_size()} GPUs')
if args.ckpt is not None:
print(f'Loading the model from {args.ckpt} ...')
print(f'{args.model_config} will be overriden.')
if args.ckpt.lower().endswith('.nemo'):
ckpt, cfg = load_nemo_ckpt(args.ckpt)
else:
cfg = config.load(args.model_config)
ckpt = torch.load(args.ckpt, map_location='cpu')
sd_key = 'ema_state_dict' if args.ema else 'state_dict'
if args.ema and 'ema_state_dict' not in ckpt:
print(f'WARNING: EMA weights are unavailable in {args.ckpt}.')
sd_key = 'state_dict'
state_dict = ckpt[sd_key]
else:
cfg = config.load(args.model_config)
state_dict = None
config.apply_config_overrides(cfg, args)
symbols = helpers.add_ctc_blank(cfg['labels'])
use_dali = args.dali_device in ('cpu', 'gpu')
dataset_kw, features_kw = config.input(cfg, 'val')
measure_perf = args.steps > 0
# dataset
if args.transcribe_wav or args.transcribe_filelist:
if use_dali:
print("DALI supported only with input .json files; disabling")
use_dali = False
assert not cfg['input_val']['audio_dataset'].get('pad_to_max_duration', False)
assert not (args.transcribe_wav and args.transcribe_filelist)
if args.transcribe_wav:
dataset = SingleAudioDataset(args.transcribe_wav)
else:
dataset = FilelistDataset(args.transcribe_filelist)
data_loader = get_data_loader(dataset,
batch_size=1,
multi_gpu=multi_gpu,
shuffle=False,
num_workers=0,
drop_last=(True if measure_perf else False))
_, features_kw = config.input(cfg, 'val')
feat_proc = FilterbankFeatures(**features_kw)
elif use_dali:
# pad_to_max_duration is not supported by DALI - have simple padders
if features_kw['pad_to_max_duration']:
feat_proc = BaseFeatures(
pad_align=features_kw['pad_align'],
pad_to_max_duration=True,
max_duration=features_kw['max_duration'],
sample_rate=features_kw['sample_rate'],
window_size=features_kw['window_size'],
window_stride=features_kw['window_stride'])
features_kw['pad_to_max_duration'] = False
else:
feat_proc = None
data_loader = DaliDataLoader(
gpu_id=args.local_rank or 0,
dataset_path=args.dataset_dir,
config_data=dataset_kw,
config_features=features_kw,
json_names=args.val_manifests,
batch_size=args.batch_size,
pipeline_type=("train" if measure_perf else "val"), # no drop_last
device_type=args.dali_device,
symbols=symbols)
else:
dataset = AudioDataset(args.dataset_dir,
args.val_manifests,
symbols,
**dataset_kw)
data_loader = get_data_loader(dataset,
args.batch_size,
multi_gpu=multi_gpu,
shuffle=False,
num_workers=4,
drop_last=False)
feat_proc = FilterbankFeatures(**features_kw)
model = QuartzNet(encoder_kw=config.encoder(cfg),
decoder_kw=config.decoder(cfg, n_classes=len(symbols)))
if state_dict is not None:
model.load_state_dict(state_dict, strict=True)
model.to(device)
model.eval()
if feat_proc is not None:
feat_proc.to(device)
feat_proc.eval()
if args.amp:
model = model.half()
if args.torchscript:
greedy_decoder = GreedyCTCDecoder()
feat_proc, model, greedy_decoder = torchscript_export(
data_loader, feat_proc, model, greedy_decoder, args.output_dir,
use_amp=args.amp, use_conv_masks=True, model_toml=args.model_toml,
device=device, save=args.torchscript_export)
if multi_gpu:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank)
agg = {'txts': [], 'preds': [], 'logits': []}
dur = {'data': [], 'dnn': [], 'data+dnn': []}
looped_loader = chain.from_iterable(repeat(data_loader))
greedy_decoder = GreedyCTCDecoder()
sync = lambda: torch.cuda.synchronize() if device.type == 'cuda' else None
steps = args.steps + args.warmup_steps or len(data_loader)
with torch.no_grad():
for it, batch in enumerate(tqdm(looped_loader, initial=1, total=steps)):
if use_dali:
feats, feat_lens, txt, txt_lens = batch
if feat_proc is not None:
feats, feat_lens = feat_proc(feats, feat_lens)
else:
batch = [t.to(device, non_blocking=True) for t in batch]
audio, audio_lens, txt, txt_lens = batch
feats, feat_lens = feat_proc(audio, audio_lens)
sync()
t1 = time.time()
if args.amp:
feats = feats.half()
if model.encoder.use_conv_masks:
log_probs, log_prob_lens = model(feats, feat_lens)
else:
log_probs = model(feats, feat_lens)
preds = greedy_decoder(log_probs)
sync()
t2 = time.time()
# burn-in period; wait for a new loader due to num_workers
if it >= 1 and (args.steps == 0 or it >= args.warmup_steps):
dur['data'].append(t1 - t0)
dur['dnn'].append(t2 - t1)
dur['data+dnn'].append(t2 - t0)
if txt is not None:
agg['txts'] += helpers.gather_transcripts([txt], [txt_lens],
symbols)
agg['preds'] += helpers.gather_predictions([preds], symbols)
agg['logits'].append(log_probs)
if it + 1 == steps:
break
sync()
t0 = time.time()
# communicate the results
if args.transcribe_wav:
for idx, p in enumerate(agg['preds']):
print_once(f'Prediction {idx+1: >3}: {p}')
elif args.transcribe_filelist:
pass
elif not multi_gpu or distrib.get_rank() == 0:
wer, _ = process_evaluation_epoch(agg)
dllogger.log(step=(), data={'eval_wer': 100 * wer})
if args.save_predictions:
with open(args.save_predictions, 'w') as f:
f.write('\n'.join(agg['preds']))
if args.save_logits:
logits = torch.cat(agg['logits'], dim=0).cpu()
torch.save(logits, args.save_logits)
# report timings
if len(dur['data']) >= 20:
ratios = [0.9, 0.95, 0.99]
for stage in dur:
lat = durs_to_percentiles(dur[stage], ratios)
for k in [0.99, 0.95, 0.9, 0.5]:
kk = str(k).replace('.', '_')
dllogger.log(step=(), data={f'{stage.lower()}_latency_{kk}': lat[k]})
else:
print_once('Not enough samples to measure latencies.')
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/inference.py |
import argparse
import io
import sys
from copy import deepcopy
from functools import reduce
from pathlib import Path
from subprocess import CalledProcessError, check_output
import torch
import yaml
import quartznet.config
from common import helpers
from common.features import FilterbankFeatures
from quartznet.config import load as load_yaml
from quartznet.model import QuartzNet, MaskedConv1d
# Corresponding DLE <-> NeMo config keys
cfg_key_map = {
("input_val", "audio_dataset", "sample_rate"): ("preprocessor", "sample_rate"),
("input_val", "filterbank_features", "dither"): ("preprocessor", "dither"),
("input_val", "filterbank_features", "frame_splicing"): ("preprocessor", "frame_splicing"),
("input_val", "filterbank_features", "n_fft"): ("preprocessor", "n_fft"),
("input_val", "filterbank_features", "n_filt"): ("preprocessor", "features"),
("input_val", "filterbank_features", "normalize"): ("preprocessor", "normalize"),
("input_val", "filterbank_features", "sample_rate"): ("preprocessor", "sample_rate"),
("input_val", "filterbank_features", "window"): ("preprocessor", "window"),
("input_val", "filterbank_features", "window_size"): ("preprocessor", "window_size"),
("input_val", "filterbank_features", "window_stride"): ("preprocessor", "window_stride"),
("labels",): ("decoder", "vocabulary"),
("quartznet", "decoder", "in_feats"): ("decoder", "feat_in"),
("quartznet", "encoder", "activation"): ("encoder", "activation"),
("quartznet", "encoder", "blocks"): ("encoder", "jasper"),
("quartznet", "encoder", "frame_splicing"): ("preprocessor", "frame_splicing"),
("quartznet", "encoder", "in_feats"): ("encoder", "feat_in"),
("quartznet", "encoder", "use_conv_masks"): ("encoder", "conv_mask"),
}
def load_nemo_ckpt(fpath):
"""Make a DeepLearningExamples state_dict and config from a .nemo file."""
try:
cmd = ['tar', 'Oxzf', fpath, './model_config.yaml']
nemo_cfg = yaml.safe_load(io.BytesIO(check_output(cmd)))
cmd = ['tar', 'Oxzf', fpath, './model_weights.ckpt']
ckpt = torch.load(io.BytesIO(check_output(cmd)), map_location="cpu")
except (FileNotFoundError, CalledProcessError):
print('WARNING: Could not uncompress with tar. '
'Falling back to the tarfile module (might take a few minutes).')
import tarfile
with tarfile.open(fpath, "r:gz") as tar:
f = tar.extractfile(tar.getmember("./model_config.yaml"))
nemo_cfg = yaml.safe_load(f)
f = tar.extractfile(tar.getmember("./model_weights.ckpt"))
ckpt = torch.load(f, map_location="cpu")
remap = lambda k: (k.replace("encoder.encoder", "encoder.layers")
.replace("decoder.decoder_layers", "decoder.layers")
.replace("conv.weight", "weight"))
dle_ckpt = {'state_dict': {remap(k): v for k, v in ckpt.items()
if "preproc" not in k}}
dle_cfg = config_from_nemo(nemo_cfg)
return dle_ckpt, dle_cfg
def save_nemo_ckpt(dle_ckpt, dle_cfg, dest_path):
"""Save a DeepLearningExamples model as a .nemo file."""
cfg = deepcopy(dle_cfg)
dle_ckpt = torch.load(dle_ckpt, map_location="cpu")["ema_state_dict"]
# Build a DLE model instance and fill with weights
symbols = helpers.add_ctc_blank(cfg['labels'])
enc_kw = quartznet.config.encoder(cfg)
dec_kw = quartznet.config.decoder(cfg, n_classes=len(symbols))
model = QuartzNet(enc_kw, dec_kw)
model.load_state_dict(dle_ckpt, strict=True)
# Reaname core modules, e.g., encoder.layers -> encoder.encoder
model.encoder._modules['encoder'] = model.encoder._modules.pop('layers')
model.decoder._modules['decoder_layers'] = model.decoder._modules.pop('layers')
# MaskedConv1d is made via composition in NeMo, and via inheritance in DLE
# Params for MaskedConv1d in NeMo have an additional '.conv.' infix
def rename_convs(module):
for name in list(module._modules.keys()):
submod = module._modules[name]
if isinstance(submod, MaskedConv1d):
module._modules[f'{name}.conv'] = module._modules.pop(name)
else:
rename_convs(submod)
rename_convs(model.encoder.encoder)
# Use FilterbankFeatures to calculate fbanks and store with model weights
feature_processor = FilterbankFeatures(
**dle_cfg['input_val']['filterbank_features'])
nemo_ckpt = model.state_dict()
nemo_ckpt["preprocessor.featurizer.fb"] = feature_processor.fb
nemo_ckpt["preprocessor.featurizer.window"] = feature_processor.window
nemo_cfg = config_to_nemo(dle_cfg)
# Prepare the directory for zipping
ckpt_files = dest_path / "ckpt_files"
ckpt_files.mkdir(exist_ok=True, parents=False)
with open(ckpt_files / "model_config.yaml", "w") as f:
yaml.dump(nemo_cfg, f)
torch.save(nemo_ckpt, ckpt_files / "model_weights.ckpt")
with tarfile.open(dest_path / "quartznet.nemo", "w:gz") as tar:
tar.add(ckpt_files, arcname="./")
def save_dle_ckpt(ckpt, cfg, dest_dir):
torch.save(ckpt, dest_dir / "model.pt")
with open(dest_dir / "model_config.yaml", "w") as f:
yaml.dump(cfg, f)
def set_nested_item(tgt, src, tgt_keys, src_keys):
"""Assigns nested dict keys, e.g., d1[a][b][c] = d2[e][f][g][h]."""
tgt_nested = reduce(lambda d, k: d[k], tgt_keys[:-1], tgt)
tgt_nested[tgt_keys[-1]] = reduce(lambda d, k: d[k], src_keys, src)
def config_from_nemo(nemo_cfg):
"""Convert a DeepLearningExamples config to a NeMo format."""
dle_cfg = {
'name': 'QuartzNet',
'input_val': {
'audio_dataset': {
'normalize_transcripts': True,
},
'filterbank_features': {
'pad_align': 16,
},
},
'quartznet': {
'decoder': {},
'encoder': {},
},
}
for dle_keys, nemo_keys in cfg_key_map.items():
try:
set_nested_item(dle_cfg, nemo_cfg, dle_keys, nemo_keys)
except KeyError:
print(f'WARNING: Could not load config {nemo_keys} as {dle_keys}.')
# mapping kernel_size is not expressable with cfg_map
for block in dle_cfg["quartznet"]["encoder"]["blocks"]:
block["kernel_size"] = block.pop("kernel")
return dle_cfg
def config_to_nemo(dle_cfg):
"""Convert a DeepLearningExamples config to a NeMo format."""
nemo_cfg = {
"target": "nemo.collections.asr.models.ctc_models.EncDecCTCModel",
"dropout": 0.0,
"preprocessor": {
"_target_": "nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor",
"stft_conv": False,
},
"encoder": {
"_target_": "nemo.collections.asr.modules.ConvASREncoder",
"jasper": {}
},
"decoder": {
"_target_": "nemo.collections.asr.modules.ConvASRDecoder",
},
}
for dle_keys, nemo_keys in cfg_key_map.items():
try:
set_nested_item(nemo_cfg, dle_cfg, nemo_keys, dle_keys)
except KeyError:
print(f"WARNING: Could not load config {dle_keys} as {nemo_keys}.")
nemo_cfg["sample_rate"] = nemo_cfg["preprocessor"]["sample_rate"]
nemo_cfg["repeat"] = nemo_cfg["encoder"]["jasper"][1]["repeat"]
nemo_cfg["separable"] = nemo_cfg["encoder"]["jasper"][1]["separable"]
nemo_cfg["labels"] = nemo_cfg["decoder"]["vocabulary"]
nemo_cfg["decoder"]["num_classes"] = len(nemo_cfg["decoder"]["vocabulary"])
# mapping kernel_size is not expressable with cfg_map
for block in nemo_cfg["encoder"]["jasper"]:
if "kernel_size" in block:
block["kernel"] = block.pop("kernel_size")
return nemo_cfg
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="QuartzNet DLE <-> NeMo model converter.")
parser.add_argument("source_model", type=Path,
help="A DLE or NeMo QuartzNet model to be converted (.pt or .nemo, respectively)")
parser.add_argument("dest_dir", type=Path, help="Destination directory")
parser.add_argument("--dle_config_yaml", type=Path,
help="A DLE config .yaml file, required only to convert DLE -> NeMo")
args = parser.parse_args()
ext = args.source_model.suffix.lower()
if ext == ".nemo":
ckpt, cfg = load_nemo_ckpt(args.source_model)
save_dle_ckpt(ckpt, cfg, args.dest_dir)
elif ext == ".pt":
dle_cfg = load_yaml(args.dle_config_yaml)
save_nemo_ckpt(args.source_model, dle_cfg, args.dest_dir)
else:
raise ValueError(f"Unknown extension {ext}.")
print('Converted succesfully.')
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/nemo_dle_model_converter.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import argparse
import os
import glob
import multiprocessing
import json
import pandas as pd
from preprocessing_utils import parallel_preprocess
parser = argparse.ArgumentParser(description='Preprocess LibriSpeech.')
parser.add_argument('--input_dir', type=str, required=True,
help='LibriSpeech collection input dir')
parser.add_argument('--dest_dir', type=str, required=True,
help='Output dir')
parser.add_argument('--output_json', type=str, default='./',
help='name of the output json file.')
parser.add_argument('-s','--speed', type=float, nargs='*',
help='Speed perturbation ratio')
parser.add_argument('--target_sr', type=int, default=None,
help='Target sample rate. '
'defaults to the input sample rate')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite file if exists')
parser.add_argument('--parallel', type=int, default=multiprocessing.cpu_count(),
help='Number of threads to use when processing audio files')
args = parser.parse_args()
args.input_dir = args.input_dir.rstrip('/')
args.dest_dir = args.dest_dir.rstrip('/')
def build_input_arr(input_dir):
txt_files = glob.glob(os.path.join(input_dir, '**', '*.trans.txt'),
recursive=True)
input_data = []
for txt_file in txt_files:
rel_path = os.path.relpath(txt_file, input_dir)
with open(txt_file) as fp:
for line in fp:
fname, _, transcript = line.partition(' ')
input_data.append(dict(input_relpath=os.path.dirname(rel_path),
input_fname=fname+'.flac',
transcript=transcript))
return input_data
print("[%s] Scaning input dir..." % args.output_json)
dataset = build_input_arr(input_dir=args.input_dir)
print("[%s] Converting audio files..." % args.output_json)
dataset = parallel_preprocess(dataset=dataset,
input_dir=args.input_dir,
dest_dir=args.dest_dir,
target_sr=args.target_sr,
speed=args.speed,
overwrite=args.overwrite,
parallel=args.parallel)
print("[%s] Generating json..." % args.output_json)
df = pd.DataFrame(dataset, dtype=object)
# Save json with python. df.to_json() produces back slashed in file paths
dataset = df.to_dict(orient='records')
with open(args.output_json, 'w') as fp:
json.dump(dataset, fp, indent=2)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/utils/convert_librispeech.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import argparse
import pandas as pd
from download_utils import download_file, md5_checksum, extract
parser = argparse.ArgumentParser(description='Download, verify and extract dataset files')
parser.add_argument('csv', type=str,
help='CSV file with urls and checksums to download.')
parser.add_argument('dest', type=str,
help='Download destnation folder.')
parser.add_argument('-e', type=str, default=None,
help='Extraction destnation folder. Defaults to download folder if not provided')
parser.add_argument('--skip_download', action='store_true',
help='Skip downloading the files')
parser.add_argument('--skip_checksum', action='store_true',
help='Skip checksum')
parser.add_argument('--skip_extract', action='store_true',
help='Skip extracting files')
args = parser.parse_args()
args.e = args.e or args.dest
df = pd.read_csv(args.csv, delimiter=',')
if not args.skip_download:
for url in df.url:
fname = url.split('/')[-1]
print("Downloading %s:" % fname)
download_file(url=url, dest_folder=args.dest, fname=fname)
else:
print("Skipping file download")
if not args.skip_checksum:
for index, row in df.iterrows():
url = row['url']
md5 = row['md5']
fname = url.split('/')[-1]
fpath = os.path.join(args.dest, fname)
print("Verifing %s: " % fname, end='')
ret = md5_checksum(fpath=fpath, target_hash=md5)
print("Passed" if ret else "Failed")
else:
print("Skipping checksum")
if not args.skip_extract:
for url in df.url:
fname = url.split('/')[-1]
fpath = os.path.join(args.dest, fname)
print("Decompressing %s:" % fpath)
extract(fpath=fpath, dest_folder=args.e)
else:
print("Skipping file extraction")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/utils/download_librispeech.py |
DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/utils/__init__.py |
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import multiprocessing
import functools
import sox
from tqdm import tqdm
def preprocess(data, input_dir, dest_dir, target_sr=None, speed=None,
overwrite=True):
speed = speed or []
speed.append(1)
speed = list(set(speed)) # Make uniqe
input_fname = os.path.join(input_dir,
data['input_relpath'],
data['input_fname'])
input_sr = sox.file_info.sample_rate(input_fname)
target_sr = target_sr or input_sr
os.makedirs(os.path.join(dest_dir, data['input_relpath']), exist_ok=True)
output_dict = {}
output_dict['transcript'] = data['transcript'].lower().strip()
output_dict['files'] = []
fname = os.path.splitext(data['input_fname'])[0]
for s in speed:
output_fname = fname + '{}.wav'.format('' if s==1 else '-{}'.format(s))
output_fpath = os.path.join(dest_dir,
data['input_relpath'],
output_fname)
if not os.path.exists(output_fpath) or overwrite:
cbn = sox.Transformer().speed(factor=s).convert(target_sr)
cbn.build(input_fname, output_fpath)
file_info = sox.file_info.info(output_fpath)
file_info['fname'] = os.path.join(os.path.basename(dest_dir),
data['input_relpath'],
output_fname)
file_info['speed'] = s
output_dict['files'].append(file_info)
if s == 1:
file_info = sox.file_info.info(output_fpath)
output_dict['original_duration'] = file_info['duration']
output_dict['original_num_samples'] = file_info['num_samples']
return output_dict
def parallel_preprocess(dataset, input_dir, dest_dir, target_sr, speed, overwrite, parallel):
with multiprocessing.Pool(parallel) as p:
func = functools.partial(preprocess,
input_dir=input_dir, dest_dir=dest_dir,
target_sr=target_sr, speed=speed, overwrite=overwrite)
dataset = list(tqdm(p.imap(func, dataset), total=len(dataset)))
return dataset
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/utils/preprocessing_utils.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import hashlib
import requests
import os
import tarfile
import tqdm
def download_file(url, dest_folder, fname, overwrite=False):
fpath = os.path.join(dest_folder, fname)
if os.path.isfile(fpath):
if overwrite:
print("Overwriting existing file")
else:
print("File exists, skipping download.")
return
tmp_fpath = fpath + '.tmp'
if not os.path.exists(os.path.dirname(tmp_fpath)):
os.makedirs(os.path.dirname(tmp_fpath))
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-Length'])
chunk_size = 1024 * 1024 # 1MB
total_chunks = int(file_size / chunk_size)
with open(tmp_fpath, 'wb') as fp:
content_iterator = r.iter_content(chunk_size=chunk_size)
chunks = tqdm.tqdm(content_iterator, total=total_chunks,
unit='MB', desc=fpath, leave=True)
for chunk in chunks:
fp.write(chunk)
os.rename(tmp_fpath, fpath)
def md5_checksum(fpath, target_hash):
file_hash = hashlib.md5()
with open(fpath, "rb") as fp:
for chunk in iter(lambda: fp.read(1024*1024), b""):
file_hash.update(chunk)
return file_hash.hexdigest() == target_hash
def extract(fpath, dest_folder):
if fpath.endswith('.tar.gz'):
mode = 'r:gz'
elif fpath.endswith('.tar'):
mode = 'r:'
else:
raise IOError('fpath has unknown extention: %s' % fpath)
with tarfile.open(fpath, mode) as tar:
members = tar.getmembers()
for member in tqdm.tqdm(iterable=members, total=len(members), leave=True):
tar.extract(path=dest_folder, member=member)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/utils/download_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import glob
import os
import re
from pathlib import Path
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
import dllogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
tb_loggers = {}
class TBLogger:
"""
xyz_dummies: stretch the screen with empty plots so the legend would
always fit for other plots
"""
def __init__(self, enabled, log_dir, name, interval=1, dummies=True):
self.enabled = enabled
self.interval = interval
self.cache = {}
if self.enabled:
self.summary_writer = SummaryWriter(
log_dir=os.path.join(log_dir, name),
flush_secs=120, max_queue=200)
atexit.register(self.summary_writer.close)
if dummies:
for key in ('aaa', 'zzz'):
self.summary_writer.add_scalar(key, 0.0, 1)
def log(self, step, data):
for k, v in data.items():
self.log_value(step, k, v.item() if type(v) is torch.Tensor else v)
def log_value(self, step, key, val, stat='mean'):
if self.enabled:
if key not in self.cache:
self.cache[key] = []
self.cache[key].append(val)
if len(self.cache[key]) == self.interval:
agg_val = getattr(np, stat)(self.cache[key])
self.summary_writer.add_scalar(key, agg_val, step)
del self.cache[key]
def log_grads(self, step, model):
if self.enabled:
norms = [p.grad.norm().item() for p in model.parameters()
if p.grad is not None]
for stat in ('max', 'min', 'mean'):
self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms),
stat=stat)
def unique_log_fpath(fpath):
"""Have a unique log filename for every separate run"""
log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1))
for f in glob.glob(f"{fpath}.*")])
return f"{fpath}.{log_num + 1}"
def stdout_step_format(step):
if isinstance(step, str):
return step
fields = []
if len(step) > 0:
fields.append("epoch {:>4}".format(step[0]))
if len(step) > 1:
fields.append("iter {:>4}".format(step[1]))
if len(step) > 2:
fields[-1] += "/{}".format(step[2])
return " | ".join(fields)
def stdout_metric_format(metric, metadata, value):
name = metadata.get("name", metric + " : ")
unit = metadata.get("unit", None)
format = f'{{{metadata.get("format", "")}}}'
fields = [name, format.format(value) if value is not None else value, unit]
fields = [f for f in fields if f is not None]
return "| " + " ".join(fields)
def init_log(args):
enabled = (args.local_rank == 0)
if enabled:
fpath = args.log_file or os.path.join(args.output_dir, 'nvlog.json')
backends = [
JSONStreamBackend(Verbosity.DEFAULT, fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(fpath)),
StdOutBackend(Verbosity.VERBOSE, step_format=stdout_step_format,
metric_format=stdout_metric_format)
]
else:
backends = []
dllogger.init(backends=backends)
dllogger.metadata("train_lrate", {"name": "lrate", "unit": None, "format": ":>3.2e"})
for id_, pref in [('train', ''), ('train_avg', 'avg train '),
('dev', ' avg dev '), ('dev_ema', ' EMA dev ')]:
dllogger.metadata(f"{id_}_loss",
{"name": f"{pref}loss", "unit": None, "format": ":>7.2f"})
dllogger.metadata(f"{id_}_wer",
{"name": f"{pref}wer", "unit": "%", "format": ":>6.2f"})
dllogger.metadata(f"{id_}_throughput",
{"name": f"{pref}utts/s", "unit": "samples/s", "format": ":>5.0f"})
dllogger.metadata(f"{id_}_took",
{"name": "took", "unit": "s", "format": ":>5.2f"})
tb_subsets = ['train', 'dev', 'dev_ema'] if args.ema else ['train', 'dev']
global tb_loggers
tb_loggers = {s: TBLogger(enabled, args.output_dir, name=s)
for s in tb_subsets}
log_parameters(vars(args), tb_subset='train')
def log(step, tb_total_steps=None, subset='train', data={}):
if tb_total_steps is not None:
tb_loggers[subset].log(tb_total_steps, data)
if subset != '':
data = {f'{subset}_{key}': val for key, val in data.items()}
dllogger.log(step, data=data)
def log_grads_tb(tb_total_steps, grads, tb_subset='train'):
tb_loggers[tb_subset].log_grads(tb_total_steps, grads)
def log_parameters(data, verbosity=0, tb_subset=None):
for k, v in data.items():
dllogger.log(step="PARAMETER", data={k: v}, verbosity=verbosity)
if tb_subset is not None and tb_loggers[tb_subset].enabled:
tb_data = {k: v for k, v in data.items()
if type(v) in (str, bool, int, float)}
tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {})
def flush_log():
dllogger.flush()
for tbl in tb_loggers.values():
if tbl.enabled:
tbl.summary_writer.flush()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/tb_dllogger.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def __levenshtein(a, b):
"""Calculates the Levenshtein distance between two sequences."""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n + 1))
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def word_error_rate(hypotheses, references):
"""Computes average Word Error Rate (WER) between two text lists."""
scores = 0
words = 0
len_diff = len(references) - len(hypotheses)
if len_diff > 0:
raise ValueError("Uneqal number of hypthoses and references: "
"{0} and {1}".format(len(hypotheses), len(references)))
elif len_diff < 0:
hypotheses = hypotheses[:len_diff]
for h, r in zip(hypotheses, references):
h_list = h.split()
r_list = r.split()
words += len(r_list)
scores += __levenshtein(h_list, r_list)
if words!=0:
wer = 1.0*scores/words
else:
wer = float('inf')
return wer, scores, words
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/metrics.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mutes known and unrelated PyTorch warnings.
The warnings module keeps a list of filters. Importing it as late as possible
prevents its filters from being overriden.
"""
import warnings
# NGC 22.04-py3 container (PyTorch 1.12.0a0+bd13bc6)
warnings.filterwarnings(
"ignore",
message='positional arguments and argument "destination" are deprecated.'
' nn.Module.state_dict will not accept them in the future.')
# 22.08-py3 container
warnings.filterwarnings(
"ignore",
message="is_namedtuple is deprecated, please use the python checks")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/filter_warnings.py |
DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/__init__.py |
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
import librosa
import torch
import torch.nn as nn
class BaseFeatures(nn.Module):
"""Base class for GPU accelerated audio preprocessing."""
__constants__ = ["pad_align", "pad_to_max_duration", "max_len"]
def __init__(self, pad_align, pad_to_max_duration, max_duration,
sample_rate, window_size, window_stride, spec_augment=None,
cutout_augment=None):
super(BaseFeatures, self).__init__()
self.pad_align = pad_align
self.pad_to_max_duration = pad_to_max_duration
self.win_length = int(sample_rate * window_size) # frame size
self.hop_length = int(sample_rate * window_stride)
# Calculate maximum sequence length (# frames)
if pad_to_max_duration:
self.max_len = 1 + math.ceil(
(max_duration * sample_rate - self.win_length) / self.hop_length
)
if spec_augment is not None:
self.spec_augment = SpecAugment(**spec_augment)
else:
self.spec_augment = None
if cutout_augment is not None:
self.cutout_augment = CutoutAugment(**cutout_augment)
else:
self.cutout_augment = None
@torch.no_grad()
def calculate_features(self, audio, audio_lens):
return audio, audio_lens
def __call__(self, audio, audio_lens):
dtype = audio.dtype
audio = audio.float()
feat, feat_lens = self.calculate_features(audio, audio_lens)
feat = self.apply_padding(feat)
if self.cutout_augment is not None:
feat = self.cutout_augment(feat)
if self.spec_augment is not None:
feat = self.spec_augment(feat)
feat = feat.to(dtype)
return feat, feat_lens
def apply_padding(self, x):
if self.pad_to_max_duration:
x_size = max(x.size(-1), self.max_len)
else:
x_size = x.size(-1)
if self.pad_align > 0:
pad_amt = x_size % self.pad_align
else:
pad_amt = 0
padded_len = x_size + (self.pad_align - pad_amt if pad_amt > 0 else 0)
return nn.functional.pad(x, (0, padded_len - x.size(-1)))
class SpecAugment(nn.Module):
"""Spec augment. refer to https://arxiv.org/abs/1904.08779
"""
def __init__(self, freq_masks=0, min_freq=0, max_freq=10, time_masks=0,
min_time=0, max_time=10):
super(SpecAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.freq_masks = freq_masks
self.min_freq = min_freq
self.max_freq = max_freq
self.time_masks = time_masks
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for _ in range(self.freq_masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
f0 = torch.randint(0, max(1, sh[1] - w), size=(1,))
mask[idx, f0:f0+w] = 1
for _ in range(self.time_masks):
w = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
t0 = torch.randint(0, max(1, sh[2] - w), size=(1,))
mask[idx, :, t0:t0+w] = 1
return x.masked_fill(mask, 0)
class CutoutAugment(nn.Module):
"""Cutout. refer to https://arxiv.org/pdf/1708.04552.pdf
"""
def __init__(self, masks=0, min_freq=20, max_freq=20, min_time=5, max_time=5):
super(CutoutAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.masks = masks
self.min_freq = min_freq
self.max_freq = max_freq
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for i in range(self.masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
h = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
f0 = int(random.uniform(0, sh[1] - w))
t0 = int(random.uniform(0, sh[2] - h))
mask[idx, f0:f0+w, t0:t0+h] = 1
return x.masked_fill(mask, 0)
@torch.jit.script
def normalize_batch(x, seq_len, normalize_type: str):
# print ("normalize_batch: x, seq_len, shapes: ", x.shape, seq_len, seq_len.shape)
if normalize_type == "per_feature":
x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, :int(seq_len[i])].mean()
x_std[i] = x[i, :, :int(seq_len[i])].std()
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)
else:
return x
@torch.jit.script
def splice_frames(x, frame_splicing: int):
""" Stacks frames together across feature dim
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim*frame_splicing, num_frames
"""
seq = [x]
# TORCHSCRIPT: JIT doesnt like range(start, stop)
for n in range(frame_splicing - 1):
seq.append(torch.cat([x[:, :, :n + 1], x[:, :, n + 1:]], dim=2))
return torch.cat(seq, dim=1)
class FilterbankFeatures(BaseFeatures):
# For JIT, https://pytorch.org/docs/stable/jit.html#python-defined-constants
__constants__ = ["dither", "preemph", "n_fft", "hop_length", "win_length",
"log", "frame_splicing", "normalize"]
# torchscript: "center" removed due to a bug
def __init__(self, spec_augment=None, cutout_augment=None,
sample_rate=8000, window_size=0.02, window_stride=0.01,
window="hamming", normalize="per_feature", n_fft=None,
preemph=0.97, n_filt=64, lowfreq=0, highfreq=None, log=True,
dither=1e-5, pad_align=8, pad_to_max_duration=False,
max_duration=float('inf'), frame_splicing=1):
super(FilterbankFeatures, self).__init__(
pad_align=pad_align, pad_to_max_duration=pad_to_max_duration,
max_duration=max_duration, sample_rate=sample_rate,
window_size=window_size, window_stride=window_stride,
spec_augment=spec_augment, cutout_augment=cutout_augment)
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.normalize = normalize
self.log = log
#TORCHSCRIPT: Check whether or not we need this
self.dither = dither
self.frame_splicing = frame_splicing
self.n_filt = n_filt
self.preemph = preemph
highfreq = highfreq or sample_rate / 2
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(self.win_length,
periodic=False) if window_fn else None
filterbanks = torch.tensor(
librosa.filters.mel(sr=sample_rate, n_fft=self.n_fft, n_mels=n_filt,
fmin=lowfreq, fmax=highfreq),
dtype=torch.float).unsqueeze(0)
# torchscript
self.register_buffer("fb", filterbanks)
self.register_buffer("window", window_tensor)
def get_seq_len(self, seq_len):
return torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(
dtype=torch.int)
# TORCHSCRIPT: center removed due to bug
def stft(self, x):
spec = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,
win_length=self.win_length,
window=self.window.to(dtype=torch.float),
return_complex=True)
return torch.view_as_real(spec)
@torch.no_grad()
def calculate_features(self, x, seq_len):
dtype = x.dtype
seq_len = self.get_seq_len(seq_len)
# dither
if self.dither > 0:
x += self.dither * torch.randn_like(x)
# do preemphasis
if self.preemph is not None:
x = torch.cat(
(x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]), dim=1)
x = self.stft(x)
# get power spectrum
x = x.pow(2).sum(-1)
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
x = torch.log(x + 1e-20)
# frame splicing if required
if self.frame_splicing > 1:
raise ValueError('Frame splicing not supported')
# normalize if required
x = normalize_batch(x, seq_len, normalize_type=self.normalize)
# mask to zero any values beyond seq_len in batch,
# pad to multiple of `pad_align` (for efficiency)
max_len = x.size(-1)
mask = torch.arange(max_len, dtype=seq_len.dtype, device=x.device)
mask = mask.expand(x.size(0), max_len) >= seq_len.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1), 0)
# TORCHSCRIPT: Is this del important? It breaks scripting
# del mask
return x.to(dtype), seq_len
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/features.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.optim import Optimizer
import math
def lr_policy(step, epoch, initial_lr, optimizer, steps_per_epoch, warmup_epochs,
hold_epochs, num_epochs=None, policy='linear', min_lr=1e-5,
exp_gamma=None):
"""
learning rate decay
Args:
initial_lr: base learning rate
step: current iteration number
N: total number of iterations over which learning rate is decayed
lr_steps: list of steps to apply exp_gamma
"""
warmup_steps = warmup_epochs * steps_per_epoch
hold_steps = hold_epochs * steps_per_epoch
if policy == 'legacy':
assert num_epochs is not None
tot_steps = num_epochs * steps_per_epoch
if step < warmup_steps:
a = (step + 1) / (warmup_steps + 1)
elif step < warmup_steps + hold_steps:
a = 1.0
else:
a = (((tot_steps - step)
/ (tot_steps - warmup_steps - hold_steps)) ** 2)
elif policy == 'exponential':
assert exp_gamma is not None
if step < warmup_steps:
a = (step + 1) / (warmup_steps + 1)
elif step < warmup_steps + hold_steps:
a = 1.0
else:
a = exp_gamma ** (epoch - warmup_epochs - hold_epochs)
else:
raise ValueError
new_lr = max(a * initial_lr, min_lr)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class AdamW(Optimizer):
"""Implements AdamW algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(torch.mul(p.data, group['weight_decay']).addcdiv_(1, exp_avg, denom), alpha=-step_size)
return loss
class Novograd(Optimizer):
"""
Implements Novograd algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.95, 0))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
grad_averaging: gradient averaging
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
"""
def __init__(self, params, lr=1e-3, betas=(0.95, 0), eps=1e-8,
weight_decay=0, grad_averaging=False, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
amsgrad=amsgrad)
super(Novograd, self).__init__(params, defaults)
def __setstate__(self, state):
super(Novograd, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Sparse gradients are not supported.')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
norm = torch.sum(torch.pow(grad, 2))
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
grad.div_(denom)
if group['weight_decay'] != 0:
grad.add_(p.data, alpha=group['weight_decay'])
if group['grad_averaging']:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
p.data.add_(exp_avg, alpha=-group['lr'])
return loss
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/optimizers.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
from .audio import (audio_from_file, AudioSegment, GainPerturbation,
ShiftPerturbation, SpeedPerturbation)
from .text import _clean_text, punctuation_map
def normalize_string(s, labels, punct_map):
"""Normalizes string.
Example:
'call me at 8:00 pm!' -> 'call me at eight zero pm'
"""
labels = set(labels)
try:
text = _clean_text(s, ["english_cleaners"], punct_map).strip()
return ''.join([tok for tok in text if all(t in labels for t in tok)])
except:
print(f"WARNING: Normalizing failed: {s}")
return None
class FilelistDataset(Dataset):
def __init__(self, filelist_fpath):
self.samples = [line.strip() for line in open(filelist_fpath, 'r')]
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
audio, audio_len = audio_from_file(self.samples[index])
return (audio.squeeze(0), audio_len, torch.LongTensor([0]),
torch.LongTensor([0]))
class SingleAudioDataset(FilelistDataset):
def __init__(self, audio_fpath):
self.samples = [audio_fpath]
class AudioDataset(Dataset):
def __init__(self, data_dir, manifest_fpaths, labels,
sample_rate=16000, min_duration=0.1, max_duration=float("inf"),
pad_to_max_duration=False, max_utts=0, normalize_transcripts=True,
sort_by_duration=False, trim_silence=False,
speed_perturbation=None, gain_perturbation=None,
shift_perturbation=None, ignore_offline_speed_perturbation=False):
"""Loads audio, transcript and durations listed in a .json file.
Args:
data_dir: absolute path to dataset folder
manifest_filepath: relative path from dataset folder
to manifest json as described above. Can be coma-separated paths.
labels (str): all possible output symbols
min_duration (int): skip audio shorter than threshold
max_duration (int): skip audio longer than threshold
pad_to_max_duration (bool): pad all sequences to max_duration
max_utts (int): limit number of utterances
normalize_transcripts (bool): normalize transcript text
sort_by_duration (bool): sort sequences by increasing duration
trim_silence (bool): trim leading and trailing silence from audio
ignore_offline_speed_perturbation (bool): use precomputed speed perturbation
Returns:
tuple of Tensors
"""
self.data_dir = data_dir
self.labels = labels
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
self.punctuation_map = punctuation_map(labels)
self.blank_index = len(labels)
self.pad_to_max_duration = pad_to_max_duration
self.sort_by_duration = sort_by_duration
self.max_utts = max_utts
self.normalize_transcripts = normalize_transcripts
self.ignore_offline_speed_perturbation = ignore_offline_speed_perturbation
self.min_duration = min_duration
self.max_duration = max_duration
self.trim_silence = trim_silence
self.sample_rate = sample_rate
perturbations = []
if speed_perturbation is not None:
perturbations.append(SpeedPerturbation(**speed_perturbation))
if gain_perturbation is not None:
perturbations.append(GainPerturbation(**gain_perturbation))
if shift_perturbation is not None:
perturbations.append(ShiftPerturbation(**shift_perturbation))
self.perturbations = perturbations
self.max_duration = max_duration
self.samples = []
self.duration = 0.0
self.duration_filtered = 0.0
for fpath in manifest_fpaths:
self._load_json_manifest(fpath)
if sort_by_duration:
self.samples = sorted(self.samples, key=lambda s: s['duration'])
def __getitem__(self, index):
s = self.samples[index]
rn_indx = np.random.randint(len(s['audio_filepath']))
duration = s['audio_duration'][rn_indx] if 'audio_duration' in s else 0
offset = s.get('offset', 0)
segment = AudioSegment(
s['audio_filepath'][rn_indx], target_sr=self.sample_rate,
offset=offset, duration=duration, trim=self.trim_silence)
for p in self.perturbations:
p.maybe_apply(segment, self.sample_rate)
segment = torch.FloatTensor(segment.samples)
return (segment,
torch.tensor(segment.shape[0]).int(),
torch.tensor(s["transcript"]),
torch.tensor(len(s["transcript"])).int())
def __len__(self):
return len(self.samples)
def _load_json_manifest(self, fpath):
for s in json.load(open(fpath, "r", encoding="utf-8")):
if self.pad_to_max_duration and not self.ignore_offline_speed_perturbation:
# require all perturbed samples to be < self.max_duration
s_max_duration = max(f['duration'] for f in s['files'])
else:
# otherwise we allow perturbances to be > self.max_duration
s_max_duration = s['original_duration']
s['duration'] = s.pop('original_duration')
if not (self.min_duration <= s_max_duration <= self.max_duration):
self.duration_filtered += s['duration']
continue
# Prune and normalize according to transcript
tr = (s.get('transcript', None) or
self.load_transcript(s['text_filepath']))
if not isinstance(tr, str):
print(f'WARNING: Skipped sample (transcript not a str): {tr}.')
self.duration_filtered += s['duration']
continue
if self.normalize_transcripts:
tr = normalize_string(tr, self.labels, self.punctuation_map)
s["transcript"] = self.to_vocab_inds(tr)
files = s.pop('files')
if self.ignore_offline_speed_perturbation:
files = [f for f in files if f['speed'] == 1.0]
s['audio_duration'] = [f['duration'] for f in files]
s['audio_filepath'] = [str(Path(self.data_dir, f['fname']))
for f in files]
self.samples.append(s)
self.duration += s['duration']
if self.max_utts > 0 and len(self.samples) >= self.max_utts:
print(f'Reached max_utts={self.max_utts}. Finished parsing {fpath}.')
break
def load_transcript(self, transcript_path):
with open(transcript_path, 'r', encoding="utf-8") as transcript_file:
transcript = transcript_file.read().replace('\n', '')
return transcript
def to_vocab_inds(self, transcript):
chars = [self.labels_map.get(x, self.blank_index) for x in list(transcript)]
transcript = list(filter(lambda x: x != self.blank_index, chars))
return transcript
def collate_fn(batch):
bs = len(batch)
max_len = lambda l, idx: max(el[idx].size(0) for el in l)
audio = torch.zeros(bs, max_len(batch, 0))
audio_lens = torch.zeros(bs, dtype=torch.int32)
transcript = torch.zeros(bs, max_len(batch, 2))
transcript_lens = torch.zeros(bs, dtype=torch.int32)
for i, sample in enumerate(batch):
audio[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0])
audio_lens[i] = sample[1]
transcript[i].narrow(0, 0, sample[2].size(0)).copy_(sample[2])
transcript_lens[i] = sample[3]
return audio, audio_lens, transcript, transcript_lens
def get_data_loader(dataset, batch_size, multi_gpu=True, shuffle=True,
drop_last=True, num_workers=4):
kw = {'dataset': dataset, 'collate_fn': collate_fn,
'num_workers': num_workers, 'pin_memory': True}
if multi_gpu:
loader_shuffle = False
sampler = DistributedSampler(dataset, shuffle=shuffle)
else:
loader_shuffle = shuffle
sampler = None
return DataLoader(batch_size=batch_size, drop_last=drop_last,
sampler=sampler, shuffle=loader_shuffle, **kw)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/dataset.py |
import numpy as np
class BenchmarkStats:
""" Tracks statistics used for benchmarking. """
def __init__(self):
self.utts = []
self.times = []
self.losses = []
def update(self, utts, times, losses):
self.utts.append(utts)
self.times.append(times)
self.losses.append(losses)
def get(self, n_epochs):
throughput = sum(self.utts[-n_epochs:]) / sum(self.times[-n_epochs:])
return {'throughput': throughput, 'benchmark_epochs_num': n_epochs,
'loss': np.mean(self.losses[-n_epochs:])}
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/utils.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import soundfile as sf
import librosa
import torch
import numpy as np
import sox
def audio_from_file(file_path, offset=0, duration=0, trim=False, target_sr=16000):
audio = AudioSegment(file_path, target_sr=target_sr, int_values=False,
offset=offset, duration=duration, trim=trim)
samples = torch.tensor(audio.samples, dtype=torch.float).cuda()
num_samples = torch.tensor(samples.shape[0]).int().cuda()
return (samples.unsqueeze(0), num_samples.unsqueeze(0))
class AudioSegment(object):
"""Monaural audio segment abstraction.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, filename, target_sr=None, int_values=False, offset=0,
duration=0, trim=False, trim_db=60):
"""Create audio segment from samples.
Samples are converted to float32 internally, with int scaled to [-1, 1].
Load a file supported by librosa and return as an AudioSegment.
:param filename: path of file to load
:param target_sr: the desired sample rate
:param int_values: if true, load samples as 32-bit integers
:param offset: offset in seconds when loading audio
:param duration: duration in seconds when loading audio
:return: numpy array of samples
"""
with sf.SoundFile(filename, 'r') as f:
dtype = 'int32' if int_values else 'float32'
sample_rate = f.samplerate
if offset > 0:
f.seek(int(offset * sample_rate))
if duration > 0:
samples = f.read(int(duration * sample_rate), dtype=dtype)
else:
samples = f.read(dtype=dtype)
samples = samples.transpose()
samples = self._convert_samples_to_float32(samples)
if target_sr is not None and target_sr != sample_rate:
samples = librosa.resample(samples, orig_sr=sample_rate,
target_sr=target_sr)
sample_rate = target_sr
if trim:
samples, _ = librosa.effects.trim(samples, top_db=trim_db)
self._samples = samples
self._sample_rate = sample_rate
if self._samples.ndim >= 2:
self._samples = np.mean(self._samples, 0)
def __eq__(self, other):
"""Return whether two objects are equal."""
if type(other) is not type(self):
return False
if self._sample_rate != other._sample_rate:
return False
if self._samples.shape != other._samples.shape:
return False
if np.any(self.samples != other._samples):
return False
return True
def __ne__(self, other):
"""Return whether two objects are unequal."""
return not self.__eq__(other)
def __str__(self):
"""Return human-readable representation of segment."""
return ("%s: num_samples=%d, sample_rate=%d, duration=%.2fsec, "
"rms=%.2fdB" % (type(self), self.num_samples, self.sample_rate,
self.duration, self.rms_db))
@staticmethod
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= (1. / 2 ** (bits - 1))
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return float32_samples
@property
def samples(self):
return self._samples.copy()
@property
def sample_rate(self):
return self._sample_rate
@property
def num_samples(self):
return self._samples.shape[0]
@property
def duration(self):
return self._samples.shape[0] / float(self._sample_rate)
@property
def rms_db(self):
mean_square = np.mean(self._samples ** 2)
return 10 * np.log10(mean_square)
def gain_db(self, gain):
self._samples *= 10. ** (gain / 20.)
def pad(self, pad_size, symmetric=False):
"""Add zero padding to the sample.
The pad size is given in number of samples. If symmetric=True,
`pad_size` will be added to both sides. If false, `pad_size` zeros
will be added only to the end.
"""
self._samples = np.pad(self._samples,
(pad_size if symmetric else 0, pad_size),
mode='constant')
def subsegment(self, start_time=None, end_time=None):
"""Cut the AudioSegment between given boundaries.
Note that this is an in-place transformation.
:param start_time: Beginning of subsegment in seconds.
:type start_time: float
:param end_time: End of subsegment in seconds.
:type end_time: float
:raise ValueError: If start_time or end_time is incorrectly set, e.g. out
of bounds in time.
"""
start_time = 0.0 if start_time is None else start_time
end_time = self.duration if end_time is None else end_time
if start_time < 0.0:
start_time = self.duration + start_time
if end_time < 0.0:
end_time = self.duration + end_time
if start_time < 0.0:
raise ValueError("The slice start position (%f s) is out of "
"bounds." % start_time)
if end_time < 0.0:
raise ValueError("The slice end position (%f s) is out of bounds." %
end_time)
if start_time > end_time:
raise ValueError("The slice start position (%f s) is later than "
"the end position (%f s)." % (start_time, end_time))
if end_time > self.duration:
raise ValueError("The slice end position (%f s) is out of bounds "
"(> %f s)" % (end_time, self.duration))
start_sample = int(round(start_time * self._sample_rate))
end_sample = int(round(end_time * self._sample_rate))
self._samples = self._samples[start_sample:end_sample]
class Perturbation:
def __init__(self, p=0.1, rng=None):
self.p = p
self._rng = random.Random() if rng is None else rng
def maybe_apply(self, segment, sample_rate=None):
if self._rng.random() < self.p:
self(segment, sample_rate)
class SpeedPerturbation(Perturbation):
def __init__(self, min_rate=0.85, max_rate=1.15, discrete=False, p=0.1, rng=None):
super(SpeedPerturbation, self).__init__(p, rng)
assert 0 < min_rate < max_rate
self.min_rate = min_rate
self.max_rate = max_rate
self.discrete = discrete
def __call__(self, data, sample_rate):
if self.discrete:
rate = np.random.choice([self.min_rate, None, self.max_rate])
else:
rate = self._rng.uniform(self.min_rate, self.max_rate)
if rate is not None:
data._samples = sox.Transformer().speed(factor=rate).build_array(
input_array=data._samples, sample_rate_in=sample_rate)
class GainPerturbation(Perturbation):
def __init__(self, min_gain_dbfs=-10, max_gain_dbfs=10, p=0.1, rng=None):
super(GainPerturbation, self).__init__(p, rng)
self._rng = random.Random() if rng is None else rng
self._min_gain_dbfs = min_gain_dbfs
self._max_gain_dbfs = max_gain_dbfs
def __call__(self, data, sample_rate=None):
del sample_rate
gain = self._rng.uniform(self._min_gain_dbfs, self._max_gain_dbfs)
data._samples = data._samples * (10. ** (gain / 20.))
class ShiftPerturbation(Perturbation):
def __init__(self, min_shift_ms=-5.0, max_shift_ms=5.0, p=0.1, rng=None):
super(ShiftPerturbation, self).__init__(p, rng)
self._min_shift_ms = min_shift_ms
self._max_shift_ms = max_shift_ms
def __call__(self, data, sample_rate):
shift_ms = self._rng.uniform(self._min_shift_ms, self._max_shift_ms)
if abs(shift_ms) / 1000 > data.duration:
# TODO: do something smarter than just ignore this condition
return
shift_samples = int(shift_ms * data.sample_rate // 1000)
# print("DEBUG: shift:", shift_samples)
if shift_samples < 0:
data._samples[-shift_samples:] = data._samples[:shift_samples]
data._samples[:-shift_samples] = 0
elif shift_samples > 0:
data._samples[:-shift_samples] = data._samples[shift_samples:]
data._samples[-shift_samples:] = 0
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/audio.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.